diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 99c93b116..b4df4b315 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,73 +1,24 @@ { "ImportPath": "github.com/skippbox/kubewatch", - "GoVersion": "go1.6", - "GodepVersion": "v74", + "GoVersion": "go1.8", + "GodepVersion": "v79", "Packages": [ "./..." ], "Deps": [ { - "ImportPath": "github.com/Sirupsen/logrus", - "Comment": "v0.10.0-21-g32055c3", - "Rev": "32055c351ea8b00b96d70f28db48d9840feaf0ec" - }, - { - "ImportPath": "github.com/beorn7/perks/quantile", - "Rev": "3ac7bf7a47d159a033b107610db8a1b6575507a4" - }, - { - "ImportPath": "github.com/blang/semver", - "Comment": "v3.3.0", - "Rev": "60ec3488bfea7cca02b021d106d9911120d25fe9" - }, - { - "ImportPath": "github.com/cloudfoundry-incubator/candiedyaml", - "Rev": "99c3df83b51532e3615f851d8c2dbb638f5313bf" - }, - { - "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "48c41f8e5a608ae49cbff1d977dd060815a8bb9f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "48c41f8e5a608ae49cbff1d977dd060815a8bb9f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "48c41f8e5a608ae49cbff1d977dd060815a8bb9f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "48c41f8e5a608ae49cbff1d977dd060815a8bb9f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "48c41f8e5a608ae49cbff1d977dd060815a8bb9f" - }, - { - "ImportPath": "github.com/coreos/go-systemd/journal", - "Comment": "v10-9-g5f8a27b", - "Rev": "5f8a27bd032fac5e5bbdcbf614a9e95e06cdbdbb" + "ImportPath": "github.com/PuerkitoBio/purell", + "Comment": "v1.0.0", + "Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4" }, { - "ImportPath": "github.com/coreos/pkg/capnslog", - "Comment": "v2-10-ga48e304", - "Rev": "a48e304ff9331be6d5df352b6b47bd1395ab5dd7" + "ImportPath": "github.com/PuerkitoBio/urlesc", + "Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e" }, { - "ImportPath": "github.com/coreos/pkg/health", - "Comment": "v2-10-ga48e304", - "Rev": "a48e304ff9331be6d5df352b6b47bd1395ab5dd7" - }, - { - "ImportPath": "github.com/coreos/pkg/httputil", - "Comment": "v2-10-ga48e304", - "Rev": "a48e304ff9331be6d5df352b6b47bd1395ab5dd7" - }, - { - "ImportPath": "github.com/coreos/pkg/timeutil", - "Comment": "v2-10-ga48e304", - "Rev": "a48e304ff9331be6d5df352b6b47bd1395ab5dd7" + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.10.0-21-g32055c3", + "Rev": "32055c351ea8b00b96d70f28db48d9840feaf0ec" }, { "ImportPath": "github.com/davecgh/go-spew/spew", @@ -75,74 +26,14 @@ }, { "ImportPath": "github.com/docker/distribution/digest", - "Comment": "v2.5.0-rc.1-32-g07f32ac", + "Comment": "docs-v2.4.1-2016-06-28-16-g07f32ac", "Rev": "07f32ac1831ed0fc71960b7da5d6bb83cb6881b5" }, { "ImportPath": "github.com/docker/distribution/reference", - "Comment": "v2.5.0-rc.1-32-g07f32ac", + "Comment": "docs-v2.4.1-2016-06-28-16-g07f32ac", "Rev": "07f32ac1831ed0fc71960b7da5d6bb83cb6881b5" }, - { - "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "v1.4.1-13592-g9d236d8", - "Rev": "9d236d88b78b02ff11f76043522525a9d35e6372" - }, - { - "ImportPath": "github.com/docker/engine-api/types", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/blkiodev", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/container", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/filters", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/network", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/registry", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/strslice", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/swarm", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/engine-api/types/versions", - "Comment": "v0.3.1-268-g603ec41", - "Rev": "603ec41824c63d1e6498a22271987fa1f268c0c0" - }, - { - "ImportPath": "github.com/docker/go-connections/nat", - "Comment": "v0.2.1", - "Rev": "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a" - }, - { - "ImportPath": "github.com/docker/go-units", - "Comment": "v0.3.1", - "Rev": "f2d77a61e3c169b43402a0a1e84f06daf29b8190" - }, { "ImportPath": "github.com/emicklei/go-restful", "Comment": "v1.2-75-gb14c3a9", @@ -158,10 +49,6 @@ "Comment": "v1.2-75-gb14c3a9", "Rev": "b14c3a95fc27c52959d2eddc85066da3c14bf269" }, - { - "ImportPath": "github.com/evanphx/json-patch", - "Rev": "b49dc1f728861356c47692618c6b832fb00af335" - }, { "ImportPath": "github.com/fsnotify/fsnotify", "Comment": "v1.4.0-2-gc07fd86", @@ -172,26 +59,34 @@ "Rev": "aa0c862057666179de291b67d9f093d12b5a8473" }, { - "ImportPath": "github.com/gogo/protobuf/proto", - "Comment": "v0.2-22-g2752d97", - "Rev": "2752d97bbd91927dd1c43296dbf8700e50e2708c" + "ImportPath": "github.com/go-openapi/jsonpointer", + "Rev": "46af16f9f7b149af66e5d1bd010e3574dc06de98" }, { - "ImportPath": "github.com/golang/glog", - "Rev": "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + "ImportPath": "github.com/go-openapi/jsonreference", + "Rev": "13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272" + }, + { + "ImportPath": "github.com/go-openapi/spec", + "Rev": "6aced65f8501fe1217321abf0749d354824ba2ff" + }, + { + "ImportPath": "github.com/go-openapi/swag", + "Rev": "1d0bd113de87027671077d3c71eb3ac5d7dbba72" }, { - "ImportPath": "github.com/golang/groupcache/lru", - "Rev": "02826c3e79038b59d737d3b1c0a1d937f71a4433" + "ImportPath": "github.com/gogo/protobuf/proto", + "Comment": "v0.2-22-g2752d97", + "Rev": "2752d97bbd91927dd1c43296dbf8700e50e2708c" }, { - "ImportPath": "github.com/golang/protobuf/proto", - "Rev": "874264fbbb43f4d91e999fecb4b40143ed611400" + "ImportPath": "github.com/gogo/protobuf/sortkeys", + "Comment": "v0.2-22-g2752d97", + "Rev": "2752d97bbd91927dd1c43296dbf8700e50e2708c" }, { - "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.23.2-49-g2290707", - "Rev": "2290707a3b79a9c9a58d3fb7abd0923be3d88a7b" + "ImportPath": "github.com/golang/glog", + "Rev": "23def4e6c14b4da8ac2ed8007337bc5eb5007998" }, { "ImportPath": "github.com/google/gofuzz", @@ -233,6 +128,10 @@ "ImportPath": "github.com/hashicorp/hcl/json/token", "Rev": "ef8133da8cda503718a74741312bf50821e6de79" }, + { + "ImportPath": "github.com/howeyc/gopass", + "Rev": "3ca23474a7c7203e0a0a070fd33508f6efdb9b3d" + }, { "ImportPath": "github.com/imdario/mergo", "Comment": "0.1.4-8-g9f1127a", @@ -242,11 +141,6 @@ "ImportPath": "github.com/inconshreveable/mousetrap", "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" }, - { - "ImportPath": "github.com/jonboulle/clockwork", - "Comment": "v0.1.0", - "Rev": "2eee05ed794112d45db504eb05aa693efd2b8b09" - }, { "ImportPath": "github.com/juju/ratelimit", "Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177" @@ -261,9 +155,16 @@ "Rev": "0723e352fa358f9322c938cc2dadda874e9151a9" }, { - "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "Comment": "v1.0.0-2-gc12348c", - "Rev": "c12348ce28de40eed0136aa2b644d0ee0650e56c" + "ImportPath": "github.com/mailru/easyjson/buffer", + "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" + }, + { + "ImportPath": "github.com/mailru/easyjson/jlexer", + "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" + }, + { + "ImportPath": "github.com/mailru/easyjson/jwriter", + "Rev": "d5b7844b561a7bc640052f1b935f7b800330d7e0" }, { "ImportPath": "github.com/mitchellh/mapstructure", @@ -274,38 +175,9 @@ "Comment": "v0.0.1-118-g4feee83", "Rev": "4feee83bb2b31d790977ce727a028c6a542c72c7" }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc1-57-g4eb8c2f", - "Rev": "4eb8c2fb1dcb10fa3bf9bd7031f3a25a8ce2fef6" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc1-57-g4eb8c2f", - "Rev": "4eb8c2fb1dcb10fa3bf9bd7031f3a25a8ce2fef6" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc1-57-g4eb8c2f", - "Rev": "4eb8c2fb1dcb10fa3bf9bd7031f3a25a8ce2fef6" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc1-57-g4eb8c2f", - "Rev": "4eb8c2fb1dcb10fa3bf9bd7031f3a25a8ce2fef6" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc1-57-g4eb8c2f", - "Rev": "4eb8c2fb1dcb10fa3bf9bd7031f3a25a8ce2fef6" - }, - { - "ImportPath": "github.com/pborman/uuid", - "Comment": "v1.0-11-gc55201b", - "Rev": "c55201b036063326c5b1b89ccfe45a184973d073" - }, { "ImportPath": "github.com/pelletier/go-buffruneio", + "Comment": "v0.1.0", "Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d" }, { @@ -322,32 +194,6 @@ "ImportPath": "github.com/pkg/sftp", "Rev": "4d0e916071f68db74f8a73926335f809396d6b42" }, - { - "ImportPath": "github.com/prometheus/client_golang/prometheus", - "Comment": "0.7.0-87-g28be158", - "Rev": "28be15864ef9ba05d74fa6fd13b928fd250e8f01" - }, - { - "ImportPath": "github.com/prometheus/client_model/go", - "Comment": "model-0.0.2-12-gfa8ad6f", - "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" - }, - { - "ImportPath": "github.com/prometheus/common/expfmt", - "Rev": "4402f4e5ea79ec15f3c574773b6a5198fbea215f" - }, - { - "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "Rev": "4402f4e5ea79ec15f3c574773b6a5198fbea215f" - }, - { - "ImportPath": "github.com/prometheus/common/model", - "Rev": "4402f4e5ea79ec15f3c574773b6a5198fbea215f" - }, - { - "ImportPath": "github.com/prometheus/procfs", - "Rev": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5" - }, { "ImportPath": "github.com/spf13/afero", "Rev": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78" @@ -401,19 +247,19 @@ "Rev": "484eb34681af59703e639b971bc307019182c41f" }, { - "ImportPath": "golang.org/x/net/context", - "Rev": "f841c39de738b1d0df95b5a7187744f0e03d8112" + "ImportPath": "golang.org/x/crypto/ssh/terminal", + "Rev": "484eb34681af59703e639b971bc307019182c41f" }, { - "ImportPath": "golang.org/x/net/context/ctxhttp", + "ImportPath": "golang.org/x/net/http2", "Rev": "f841c39de738b1d0df95b5a7187744f0e03d8112" }, { - "ImportPath": "golang.org/x/net/http2", + "ImportPath": "golang.org/x/net/http2/hpack", "Rev": "f841c39de738b1d0df95b5a7187744f0e03d8112" }, { - "ImportPath": "golang.org/x/net/http2/hpack", + "ImportPath": "golang.org/x/net/idna", "Rev": "f841c39de738b1d0df95b5a7187744f0e03d8112" }, { @@ -424,77 +270,49 @@ "ImportPath": "golang.org/x/net/websocket", "Rev": "f841c39de738b1d0df95b5a7187744f0e03d8112" }, - { - "ImportPath": "golang.org/x/oauth2", - "Rev": "7357e96168422ba1e6e7f21075713b72d58764e7" - }, - { - "ImportPath": "golang.org/x/oauth2/google", - "Rev": "7357e96168422ba1e6e7f21075713b72d58764e7" - }, - { - "ImportPath": "golang.org/x/oauth2/internal", - "Rev": "7357e96168422ba1e6e7f21075713b72d58764e7" - }, - { - "ImportPath": "golang.org/x/oauth2/jws", - "Rev": "7357e96168422ba1e6e7f21075713b72d58764e7" - }, - { - "ImportPath": "golang.org/x/oauth2/jwt", - "Rev": "7357e96168422ba1e6e7f21075713b72d58764e7" - }, { "ImportPath": "golang.org/x/sys/unix", "Rev": "b518c298ac9dc94b6ac0757394f50d10c5dfa25a" }, { - "ImportPath": "golang.org/x/text/transform", + "ImportPath": "golang.org/x/text/cases", "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "golang.org/x/text/unicode/norm", + "ImportPath": "golang.org/x/text/internal/tag", "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/appengine", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" - }, - { - "ImportPath": "google.golang.org/appengine/internal", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" - }, - { - "ImportPath": "google.golang.org/appengine/internal/app_identity", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" + "ImportPath": "golang.org/x/text/language", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/appengine/internal/base", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" + "ImportPath": "golang.org/x/text/runes", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/appengine/internal/datastore", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" + "ImportPath": "golang.org/x/text/secure/bidirule", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/appengine/internal/log", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" + "ImportPath": "golang.org/x/text/secure/precis", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/appengine/internal/modules", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" + "ImportPath": "golang.org/x/text/transform", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/appengine/internal/remote_api", - "Rev": "267c27e7492265b84fc6719503b14a1e17975d79" + "ImportPath": "golang.org/x/text/unicode/bidi", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/cloud/compute/metadata", - "Rev": "abc55035ece67841a0419d4b66017317a232015d" + "ImportPath": "golang.org/x/text/unicode/norm", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { - "ImportPath": "google.golang.org/cloud/internal", - "Rev": "abc55035ece67841a0419d4b66017317a232015d" + "ImportPath": "golang.org/x/text/width", + "Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21" }, { "ImportPath": "gopkg.in/inf.v0", @@ -506,689 +324,570 @@ "Rev": "a83829b6f1293c91addabc89d0571c246397bbf4" }, { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/apis/federation/v1beta1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/annotations", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/errors", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/meta", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/meta/metatypes", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/rest", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/api/errors", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/api/meta", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/unversioned/validation", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/api/resource", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/util", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/apimachinery", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/v1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/apimachinery/announced", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/api/validation", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/apimachinery/registered", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/conversion", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/conversion/queryparams", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1alpha1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/fields", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/labels", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/openapi", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/schema", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/json", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/selection", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/types", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/diff", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/errors", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/framer", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/intstr", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/json", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/net", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/rand", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/runtime", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/sets", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/validation", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1alpha1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/validation/field", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/wait", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/util/yaml", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/version", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/auth/user", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/pkg/watch", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", + "Rev": "75b8dd260ef0469d96d578705a87cffd0e09dab8" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/cache", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/discovery", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/scheme", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/metrics", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/record", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/restclient", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/transport", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/core/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/auth", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/framework", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/conversion", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/api", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/api/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/api/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/apps", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/fields", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/apps/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/apps/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos/util", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authentication/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/labels", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/generic", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/authorization/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/protobuf", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/batch", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/streaming", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/batch/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/batch/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/batch/v2alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/storage", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/certificates", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/types", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/certificates/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/certificates/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/crypto", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/extensions", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/deployment", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/extensions/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/errors", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/extensions/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/flag", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/policy", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/flowcontrol", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/policy/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/framer", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/policy/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/homedir", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/integer", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/intstr", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/rbac/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/json", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/settings", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/jsonpath", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/settings/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/settings/v1alpha1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/net", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/storage", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/storage/install", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/storage/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/pod", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/apis/storage/v1beta1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/rand", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/util", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/replicaset", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/util/parsers", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/runtime", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/pkg/version", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/sets", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/rest", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/rest/watch", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/strategicpatch", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/auth", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/validation", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/cache", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/validation/field", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/clientcmd", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/wait", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/clientcmd/api", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/util/yaml", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/clientcmd/api/latest", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/clientcmd/api/v1", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/watch", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/tools/metrics", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/pkg/watch/versioned", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/transport", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/util/cert", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/gcp", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/util/clock", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/client/auth/oidc", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/util/flowcontrol", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/third_party/forked/json", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/util/homedir", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/third_party/forked/reflect", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/util/integer", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" }, { - "ImportPath": "k8s.io/kubernetes/third_party/golang/template", - "Comment": "v1.3.4", - "Rev": "dd6b458ef8dbf24aff55795baa68f83383c9b3a9" + "ImportPath": "k8s.io/client-go/util/workqueue", + "Comment": "v3.0.0-beta.0", + "Rev": "3627aeb7d4f6ade38f995d2c923e459146493c7e" } ] } diff --git a/pkg/client/run.go b/pkg/client/run.go index 82253624e..654068862 100644 --- a/pkg/client/run.go +++ b/pkg/client/run.go @@ -41,5 +41,5 @@ func Run(conf *config.Config) { log.Fatal(err) } - controller.Controller(conf, eventHandler) + controller.Start(conf, eventHandler) } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 9efa80447..76c566509 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -17,196 +17,315 @@ limitations under the License. package controller import ( - "net/http" + "fmt" + "os" + "os/signal" + "syscall" "time" "github.com/Sirupsen/logrus" "github.com/skippbox/kubewatch/config" "github.com/skippbox/kubewatch/pkg/handlers" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" - "k8s.io/kubernetes/pkg/client/cache" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/fields" - cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - "k8s.io/kubernetes/pkg/util/wait" + "github.com/skippbox/kubewatch/pkg/utils" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apimachinery/pkg/util/wait" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/client-go/kubernetes" ) -func Controller(conf *config.Config, eventHandler handlers.Handler) { +const maxRetries = 5 - factory := cmdutil.NewFactory(nil) - kubeConfig, err := factory.ClientConfig() - if err != nil { - logrus.Fatal(err) - } +// Controller object +type Controller struct { + logger *logrus.Entry + clientset kubernetes.Interface + queue workqueue.RateLimitingInterface + informer cache.SharedIndexInformer + eventHandler handlers.Handler +} - kubeClient := client.NewOrDie(kubeConfig) - kubeExtensionsClient := client.NewExtensionsOrDie(kubeConfig) +func Start(conf *config.Config, eventHandler handlers.Handler) { + kubeClient := utils.GetClientOutOfCluster() if conf.Resource.Pod { - watchPods(kubeClient, eventHandler) - } - - if conf.Resource.Services { - watchServices(kubeClient, eventHandler) - } + c := newControllerPod(kubeClient, eventHandler) + stopCh := make(chan struct{}) + defer close(stopCh) - if conf.Resource.ReplicationController { - watchReplicationControllers(kubeClient, eventHandler) - } - - if conf.Resource.Deployment { - watchDeployments(kubeExtensionsClient, eventHandler) - } + go c.Run(stopCh) - if conf.Resource.Job { - watchJobs(kubeExtensionsClient, eventHandler) + sigterm := make(chan os.Signal, 1) + signal.Notify(sigterm, syscall.SIGTERM) + signal.Notify(sigterm, syscall.SIGINT) + <-sigterm } - if conf.Resource.PersistentVolume { - var servicesStore cache.Store - servicesStore = watchPersistenVolumes(kubeClient, servicesStore, eventHandler) - } - - logrus.Fatal(http.ListenAndServe(":8081", nil)) + //if conf.Resource.Services { + // watchServices(kubeClient, eventHandler) + //} + // + //if conf.Resource.ReplicationController { + // watchReplicationControllers(kubeClient, eventHandler) + //} + // + //if conf.Resource.Deployment { + // watchDeployments(kubeExtensionsClient, eventHandler) + //} + // + //if conf.Resource.Job { + // watchJobs(kubeExtensionsClient, eventHandler) + //} + // + //if conf.Resource.PersistentVolume { + // var servicesStore cache.Store + // servicesStore = watchPersistenVolumes(kubeClient, servicesStore, eventHandler) + //} + + //logrus.Fatal(http.ListenAndServe(":8081", nil)) } -func watchPods(client *client.Client, eventHandler handlers.Handler) cache.Store { - //Define what we want to look for (Pods) - watchlist := cache.NewListWatchFromClient(client, "pods", api.NamespaceAll, fields.Everything()) - - resyncPeriod := 30 * time.Minute - - //Setup an informer to call functions when the watchlist changes - eStore, eController := framework.NewInformer( - watchlist, - &api.Pod{}, - resyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: eventHandler.ObjectCreated, - DeleteFunc: eventHandler.ObjectDeleted, +func newControllerPod(client kubernetes.Interface, eventHandler handlers.Handler) *Controller { + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + + informer := cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + return client.CoreV1().Pods(meta_v1.NamespaceAll).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + return client.CoreV1().Pods(meta_v1.NamespaceAll).Watch(options) + }, }, + &api_v1.Pod{}, + 0, //Skip resync + cache.Indexers{}, ) - //Run the controller as a goroutine - go eController.Run(wait.NeverStop) - - return eStore -} - -func watchServices(client *client.Client, eventHandler handlers.Handler) cache.Store { - //Define what we want to look for (Services) - watchlist := cache.NewListWatchFromClient(client, "services", api.NamespaceAll, fields.Everything()) - - resyncPeriod := 30 * time.Minute - - //Setup an informer to call functions when the watchlist changes - eStore, eController := framework.NewInformer( - watchlist, - &api.Service{}, - resyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: eventHandler.ObjectCreated, - DeleteFunc: eventHandler.ObjectDeleted, - UpdateFunc: eventHandler.ObjectUpdated, + informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err == nil { + queue.Add(key) + } }, - ) - - //Run the controller as a goroutine - go eController.Run(wait.NeverStop) - - return eStore -} - -func watchReplicationControllers(client *client.Client, eventHandler handlers.Handler) cache.Store { - //Define what we want to look for (ReplicationControllers) - watchlist := cache.NewListWatchFromClient(client, "replicationcontrollers", api.NamespaceAll, fields.Everything()) - - resyncPeriod := 30 * time.Minute - - //Setup an informer to call functions when the watchlist changes - eStore, eController := framework.NewInformer( - watchlist, - &api.ReplicationController{}, - resyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: eventHandler.ObjectCreated, - DeleteFunc: eventHandler.ObjectDeleted, + UpdateFunc: func(old, new interface{}) { + key, err := cache.MetaNamespaceKeyFunc(new) + if err == nil { + queue.Add(key) + } }, - ) - - //Run the controller as a goroutine - go eController.Run(wait.NeverStop) - - return eStore + DeleteFunc: func(obj interface{}) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err == nil { + queue.Add(key) + } + }, + }) + + return &Controller{ + logger: logrus.WithField("pkg", "kubewatch-pod"), + clientset: client, + informer: informer, + queue: queue, + eventHandler: eventHandler, + } } -func watchDeployments(client *client.ExtensionsClient, eventHandler handlers.Handler) cache.Store { - //Define what we want to look for (Deployments) - watchlist := cache.NewListWatchFromClient(client, "deployments", api.NamespaceAll, fields.Everything()) +// Run starts the kubewatch controller +func (c *Controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() - resyncPeriod := 30 * time.Minute + c.logger.Info("Starting kubewatch controller") - //Setup an informer to call functions when the watchlist changes - eStore, eController := framework.NewInformer( - watchlist, - &v1beta1.Deployment{}, - resyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: eventHandler.ObjectCreated, - DeleteFunc: eventHandler.ObjectDeleted, - }, - ) + go c.informer.Run(stopCh) - //Run the controller as a goroutine - go eController.Run(wait.NeverStop) + if !cache.WaitForCacheSync(stopCh, c.HasSynced) { + utilruntime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) + return + } + + c.logger.Info("Kubewatch controller synced and ready") - return eStore + wait.Until(c.runWorker, time.Second, stopCh) } -func watchJobs(client *client.ExtensionsClient, eventHandler handlers.Handler) cache.Store { - //Define what we want to look for (Jobs) - watchlist := cache.NewListWatchFromClient(client, "jobs", api.NamespaceAll, fields.Everything()) +// HasSynced is required for the cache.Controller interface. +func (c *Controller) HasSynced() bool { + return c.informer.HasSynced() +} - resyncPeriod := 30 * time.Minute +// LastSyncResourceVersion is required for the cache.Controller interface. +func (c *Controller) LastSyncResourceVersion() string { + return c.informer.LastSyncResourceVersion() +} - //Setup an informer to call functions when the watchlist changes - eStore, eController := framework.NewInformer( - watchlist, - &v1beta1.Job{}, - resyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: eventHandler.ObjectCreated, - DeleteFunc: eventHandler.ObjectDeleted, - }, - ) +func (c *Controller) runWorker() { + for c.processNextItem() { + // continue looping + } +} - //Run the controller as a goroutine - go eController.Run(wait.NeverStop) +func (c *Controller) processNextItem() bool { + key, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(key) + + err := c.processItem(key.(string)) + if err == nil { + // No error, reset the ratelimit counters + c.queue.Forget(key) + } else if c.queue.NumRequeues(key) < maxRetries { + c.logger.Errorf("Error processing %s (will retry): %v", key, err) + c.queue.AddRateLimited(key) + } else { + // err != nil and too many retries + c.logger.Errorf("Error processing %s (giving up): %v", key, err) + c.queue.Forget(key) + utilruntime.HandleError(err) + } - return eStore + return true } -func watchPersistenVolumes(client *client.Client, store cache.Store, eventHandler handlers.Handler) cache.Store { - //Define what we want to look for (PersistenVolumes) - watchlist := cache.NewListWatchFromClient(client, "persistentvolumes", api.NamespaceAll, fields.Everything()) - - resyncPeriod := 30 * time.Minute +func (c *Controller) processItem(key string) error { + c.logger.Infof("Processing change to Pod %s", key) - //Setup an informer to call functions when the watchlist changes - eStore, eController := framework.NewInformer( - watchlist, - &api.PersistentVolume{}, - resyncPeriod, - framework.ResourceEventHandlerFuncs{ - AddFunc: eventHandler.ObjectCreated, - DeleteFunc: eventHandler.ObjectDeleted, - }, - ) + obj, exists, err := c.informer.GetIndexer().GetByKey(key) + if err != nil { + return fmt.Errorf("Error fetching object with key %s from store: %v", key, err) + } - //Run the controller as a goroutine - go eController.Run(wait.NeverStop) + if !exists { + c.eventHandler.ObjectDeleted(obj) + return nil + } - return eStore + c.eventHandler.ObjectCreated(obj) + return nil } + +// +//func watchServices(client *client.Client, eventHandler handlers.Handler) cache.Store { +// //Define what we want to look for (Services) +// watchlist := cache.NewListWatchFromClient(client, "services", api.NamespaceAll, fields.Everything()) +// +// resyncPeriod := 30 * time.Minute +// +// //Setup an informer to call functions when the watchlist changes +// eStore, eController := framework.NewInformer( +// watchlist, +// &api.Service{}, +// resyncPeriod, +// framework.ResourceEventHandlerFuncs{ +// AddFunc: eventHandler.ObjectCreated, +// DeleteFunc: eventHandler.ObjectDeleted, +// UpdateFunc: eventHandler.ObjectUpdated, +// }, +// ) +// +// //Run the controller as a goroutine +// go eController.Run(wait.NeverStop) +// +// return eStore +//} +// +//func watchReplicationControllers(client *client.Client, eventHandler handlers.Handler) cache.Store { +// //Define what we want to look for (ReplicationControllers) +// watchlist := cache.NewListWatchFromClient(client, "replicationcontrollers", api.NamespaceAll, fields.Everything()) +// +// resyncPeriod := 30 * time.Minute +// +// //Setup an informer to call functions when the watchlist changes +// eStore, eController := framework.NewInformer( +// watchlist, +// &api.ReplicationController{}, +// resyncPeriod, +// framework.ResourceEventHandlerFuncs{ +// AddFunc: eventHandler.ObjectCreated, +// DeleteFunc: eventHandler.ObjectDeleted, +// }, +// ) +// +// //Run the controller as a goroutine +// go eController.Run(wait.NeverStop) +// +// return eStore +//} +// +//func watchDeployments(client *client.ExtensionsClient, eventHandler handlers.Handler) cache.Store { +// //Define what we want to look for (Deployments) +// watchlist := cache.NewListWatchFromClient(client, "deployments", api.NamespaceAll, fields.Everything()) +// +// resyncPeriod := 30 * time.Minute +// +// //Setup an informer to call functions when the watchlist changes +// eStore, eController := framework.NewInformer( +// watchlist, +// &v1beta1.Deployment{}, +// resyncPeriod, +// framework.ResourceEventHandlerFuncs{ +// AddFunc: eventHandler.ObjectCreated, +// DeleteFunc: eventHandler.ObjectDeleted, +// }, +// ) +// +// //Run the controller as a goroutine +// go eController.Run(wait.NeverStop) +// +// return eStore +//} +// +//func watchJobs(client *client.ExtensionsClient, eventHandler handlers.Handler) cache.Store { +// //Define what we want to look for (Jobs) +// watchlist := cache.NewListWatchFromClient(client, "jobs", api.NamespaceAll, fields.Everything()) +// +// resyncPeriod := 30 * time.Minute +// +// //Setup an informer to call functions when the watchlist changes +// eStore, eController := framework.NewInformer( +// watchlist, +// &v1beta1.Job{}, +// resyncPeriod, +// framework.ResourceEventHandlerFuncs{ +// AddFunc: eventHandler.ObjectCreated, +// DeleteFunc: eventHandler.ObjectDeleted, +// }, +// ) +// +// //Run the controller as a goroutine +// go eController.Run(wait.NeverStop) +// +// return eStore +//} +// +//func watchPersistenVolumes(client *client.Client, store cache.Store, eventHandler handlers.Handler) cache.Store { +// //Define what we want to look for (PersistenVolumes) +// watchlist := cache.NewListWatchFromClient(client, "persistentvolumes", api.NamespaceAll, fields.Everything()) +// +// resyncPeriod := 30 * time.Minute +// +// //Setup an informer to call functions when the watchlist changes +// eStore, eController := framework.NewInformer( +// watchlist, +// &api.PersistentVolume{}, +// resyncPeriod, +// framework.ResourceEventHandlerFuncs{ +// AddFunc: eventHandler.ObjectCreated, +// DeleteFunc: eventHandler.ObjectDeleted, +// }, +// ) +// +// //Run the controller as a goroutine +// go eController.Run(wait.NeverStop) +// +// return eStore +//} diff --git a/pkg/event/event.go b/pkg/event/event.go index 83e0cef82..d2569d31c 100644 --- a/pkg/event/event.go +++ b/pkg/event/event.go @@ -17,9 +17,9 @@ limitations under the License. package event import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" + "k8s.io/client-go/pkg/apis/batch" ) // Event represent an event got from k8s api server @@ -44,39 +44,39 @@ var m = map[string]string{ // New create new KubewatchEvent func New(obj interface{}, action string) Event { var namespace, kind, component, host, reason, status, name string - if apiService, ok := obj.(*api.Service); ok { + if apiService, ok := obj.(*v1.Service); ok { namespace = apiService.ObjectMeta.Namespace name = apiService.Name kind = "service" component = string(apiService.Spec.Type) reason = action status = m[action] - } else if apiPod, ok := obj.(*api.Pod); ok { + } else if apiPod, ok := obj.(*v1.Pod); ok { namespace = apiPod.ObjectMeta.Namespace name = apiPod.Name kind = "pod" reason = action host = apiPod.Spec.NodeName status = m[action] - } else if apiRC, ok := obj.(*api.ReplicationController); ok { + } else if apiRC, ok := obj.(*v1.ReplicationController); ok { namespace = apiRC.ObjectMeta.Namespace name = apiRC.Name kind = "replication controller" reason = action status = m[action] - } else if apiDeployment, ok := obj.(*extensions.Deployment); ok { + } else if apiDeployment, ok := obj.(*v1beta1.Deployment); ok { namespace = apiDeployment.ObjectMeta.Namespace name = apiDeployment.Name kind = "deployment" reason = action status = m[action] - } else if apiJob, ok := obj.(*v1beta1.Job); ok { + } else if apiJob, ok := obj.(*batch.Job); ok { namespace = apiJob.ObjectMeta.Namespace name = apiJob.Name kind = "job" reason = action status = m[action] - } else if apiPV, ok := obj.(*api.PersistentVolume); ok { + } else if apiPV, ok := obj.(*v1.PersistentVolume); ok { name = apiPV.Name kind = "persistent volume" reason = action diff --git a/pkg/utils/k8sutil.go b/pkg/utils/k8sutil.go new file mode 100644 index 000000000..83adb1acb --- /dev/null +++ b/pkg/utils/k8sutil.go @@ -0,0 +1,45 @@ +package utils + +import ( + "os" + + "github.com/Sirupsen/logrus" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// GetClient returns a k8s clientset to the request from inside of cluster +func GetClient() kubernetes.Interface { + config, err := rest.InClusterConfig() + if err != nil { + logrus.Fatalf("Can not get kubernetes config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + logrus.Fatalf("Can not create kubernetes client: %v", err) + } + + return clientset +} + +func buildOutOfClusterConfig() (*rest.Config, error) { + kubeconfigPath := os.Getenv("KUBECONFIG") + if kubeconfigPath == "" { + kubeconfigPath = os.Getenv("HOME") + "/.kube/config" + } + return clientcmd.BuildConfigFromFlags("", kubeconfigPath) +} + +// GetClientOutOfCluster returns a k8s clientset to the request from outside of cluster +func GetClientOutOfCluster() kubernetes.Interface { + config, err := buildOutOfClusterConfig() + if err != nil { + logrus.Fatalf("Can not get kubernetes config: %v", err) + } + + clientset, err := kubernetes.NewForConfig(config) + + return clientset +} \ No newline at end of file diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore new file mode 100644 index 000000000..748e4c807 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.gitignore @@ -0,0 +1,5 @@ +*.sublime-* +.DS_Store +*.swp +*.swo +tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml new file mode 100644 index 000000000..facfc91c6 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE new file mode 100644 index 000000000..4b9986dea --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md new file mode 100644 index 000000000..a78a3df65 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -0,0 +1,185 @@ +# Purell + +Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... + +Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. + +[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) + +## Install + +`go get github.com/PuerkitoBio/purell` + +## Changelog + +* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). +* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). +* **v0.2.0** : Add benchmarks, Attempt IDN support. +* **v0.1.0** : Initial release. + +## Examples + +From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): + +```go +package purell + +import ( + "fmt" + "net/url" +) + +func ExampleNormalizeURLString() { + if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", + FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { + panic(err) + } else { + fmt.Print(normalized) + } + // Output: http://somewebsite.com:80/Amazing%3F/url/ +} + +func ExampleMustNormalizeURLString() { + normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", + FlagsUnsafeGreedy) + fmt.Print(normalized) + + // Output: http://somewebsite.com/Amazing%FA/url +} + +func ExampleNormalizeURL() { + if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { + panic(err) + } else { + normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) + fmt.Print(normalized) + } + + // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 +} +``` + +## API + +As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: + +```go +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) +``` + +For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. + +The [full godoc reference is available on gopkgdoc][godoc]. + +Some things to note: + +* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. + +* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): + - %24 -> $ + - %26 -> & + - %2B-%3B -> +,-./0123456789:; + - %3D -> = + - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ + - %5F -> _ + - %61-%7A -> abcdefghijklmnopqrstuvwxyz + - %7E -> ~ + + +* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). + +* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. + +* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. + +### Safe vs Usually Safe vs Unsafe + +Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. + +Consider the following URL: + +`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +Normalizing with the `FlagsSafe` gives: + +`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +With the `FlagsUsuallySafeGreedy`: + +`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` + +And with `FlagsUnsafeGreedy`: + +`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` + +## TODOs + +* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. + +## Thanks / Contributions + +@rogpeppe +@jehiah +@opennota +@pchristopher1275 +@zenovich + +## License + +The [BSD 3-Clause license][bsd]. + +[bsd]: http://opensource.org/licenses/BSD-3-Clause +[wiki]: http://en.wikipedia.org/wiki/URL_normalization +[rfc]: http://tools.ietf.org/html/rfc3986#section-6 +[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell +[pr5]: https://github.com/PuerkitoBio/purell/pull/5 +[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go new file mode 100644 index 000000000..b79da64b3 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -0,0 +1,375 @@ +/* +Package purell offers URL normalization as described on the wikipedia page: +http://en.wikipedia.org/wiki/URL_normalization +*/ +package purell + +import ( + "bytes" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/PuerkitoBio/urlesc" + "golang.org/x/net/idna" + "golang.org/x/text/secure/precis" + "golang.org/x/text/unicode/norm" +) + +// A set of normalization flags determines how a URL will +// be normalized. +type NormalizationFlags uint + +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) +var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) +var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) +var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) +var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) +var rxEmptyPort = regexp.MustCompile(`:+$`) + +// Map of flags to implementation function. +// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically +// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. + +// Since maps have undefined traversing order, make a slice of ordered keys +var flagsOrder = []NormalizationFlags{ + FlagLowercaseScheme, + FlagLowercaseHost, + FlagRemoveDefaultPort, + FlagRemoveDirectoryIndex, + FlagRemoveDotSegments, + FlagRemoveFragment, + FlagForceHTTP, // Must be after remove default port (because https=443/http=80) + FlagRemoveDuplicateSlashes, + FlagRemoveWWW, + FlagAddWWW, + FlagSortQuery, + FlagDecodeDWORDHost, + FlagDecodeOctalHost, + FlagDecodeHexHost, + FlagRemoveUnnecessaryHostDots, + FlagRemoveEmptyPortSeparator, + FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last + FlagAddTrailingSlash, +} + +// ... and then the map, where order is unimportant +var flags = map[NormalizationFlags]func(*url.URL){ + FlagLowercaseScheme: lowercaseScheme, + FlagLowercaseHost: lowercaseHost, + FlagRemoveDefaultPort: removeDefaultPort, + FlagRemoveDirectoryIndex: removeDirectoryIndex, + FlagRemoveDotSegments: removeDotSegments, + FlagRemoveFragment: removeFragment, + FlagForceHTTP: forceHTTP, + FlagRemoveDuplicateSlashes: removeDuplicateSlashes, + FlagRemoveWWW: removeWWW, + FlagAddWWW: addWWW, + FlagSortQuery: sortQuery, + FlagDecodeDWORDHost: decodeDWORDHost, + FlagDecodeOctalHost: decodeOctalHost, + FlagDecodeHexHost: decodeHexHost, + FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, + FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, + FlagRemoveTrailingSlash: removeTrailingSlash, + FlagAddTrailingSlash: addTrailingSlash, +} + +// MustNormalizeURLString returns the normalized string, and panics if an error occurs. +// It takes an URL string as input, as well as the normalization flags. +func MustNormalizeURLString(u string, f NormalizationFlags) string { + result, e := NormalizeURLString(u, f) + if e != nil { + panic(e) + } + return result +} + +// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. +// It takes an URL string as input, as well as the normalization flags. +func NormalizeURLString(u string, f NormalizationFlags) (string, error) { + if parsed, e := url.Parse(u); e != nil { + return "", e + } else { + options := make([]precis.Option, 1, 3) + options[0] = precis.IgnoreCase + if f&FlagLowercaseHost == FlagLowercaseHost { + options = append(options, precis.FoldCase()) + } + options = append(options, precis.Norm(norm.NFC)) + profile := precis.NewFreeform(options...) + if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil { + return "", e + } + return NormalizeURL(parsed, f), nil + } + panic("Unreachable code.") +} + +// NormalizeURL returns the normalized string. +// It takes a parsed URL object as input, as well as the normalization flags. +func NormalizeURL(u *url.URL, f NormalizationFlags) string { + for _, k := range flagsOrder { + if f&k == k { + flags[k](u) + } + } + return urlesc.Escape(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if strings.HasSuffix(u.Path, "/") { + u.Path = u.Path[:l-1] + } + } else if l = len(u.Host); l > 0 { + if strings.HasSuffix(u.Host, "/") { + u.Host = u.Host[:l-1] + } + } +} + +func addTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } else if l = len(u.Host); l > 0 { + if !strings.HasSuffix(u.Host, "/") { + u.Host += "/" + } + } +} + +func removeDotSegments(u *url.URL) { + if len(u.Path) > 0 { + var dotFree []string + var lastIsDot bool + + sections := strings.Split(u.Path, "/") + for _, s := range sections { + if s == ".." { + if len(dotFree) > 0 { + dotFree = dotFree[:len(dotFree)-1] + } + } else if s != "." { + dotFree = append(dotFree, s) + } + lastIsDot = (s == "." || s == "..") + } + // Special case if host does not end with / and new path does not begin with / + u.Path = strings.Join(dotFree, "/") + if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + // Special case if the last segment was a dot, make sure the path ends with a slash + if lastIsDot && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } +} + +func removeDirectoryIndex(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") + } +} + +func removeFragment(u *url.URL) { + u.Fragment = "" +} + +func forceHTTP(u *url.URL) { + if strings.ToLower(u.Scheme) == "https" { + u.Scheme = "http" + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} + +func removeWWW(u *url.URL) { + if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = u.Host[4:] + } +} + +func addWWW(u *url.URL) { + if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = "www." + u.Host + } +} + +func sortQuery(u *url.URL) { + q := u.Query() + + if len(q) > 0 { + arKeys := make([]string, len(q)) + i := 0 + for k, _ := range q { + arKeys[i] = k + i++ + } + sort.Strings(arKeys) + buf := new(bytes.Buffer) + for _, k := range arKeys { + sort.Strings(q[k]) + for _, v := range q[k] { + if buf.Len() > 0 { + buf.WriteRune('&') + } + buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + } + } + + // Rebuild the raw query string + u.RawQuery = buf.String() + } +} + +func decodeDWORDHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { + var parts [4]int64 + + dword, _ := strconv.ParseInt(matches[1], 10, 0) + for i, shift := range []uint{24, 16, 8, 0} { + parts[i] = dword >> shift & 0xFF + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) + } + } +} + +func decodeOctalHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { + var parts [4]int64 + + for i := 1; i <= 4; i++ { + parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) + } + } +} + +func decodeHexHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { + // Conversion is safe because of regex validation + parsed, _ := strconv.ParseInt(matches[1], 16, 0) + // Set host as DWORD (base 10) encoded host + u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) + // The rest is the same as decoding a DWORD host + decodeDWORDHost(u) + } + } +} + +func removeUnncessaryHostDots(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { + // Trim the leading and trailing dots + u.Host = strings.Trim(matches[1], ".") + if len(matches) > 2 { + u.Host += matches[2] + } + } + } +} + +func removeEmptyPortSeparator(u *url.URL) { + if len(u.Host) > 0 { + u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") + } +} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml new file mode 100644 index 000000000..478630e50 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.4 + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/k8s.io/kubernetes/third_party/forked/json/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/json/LICENSE rename to vendor/github.com/PuerkitoBio/urlesc/LICENSE diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md new file mode 100644 index 000000000..bebe305e0 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/README.md @@ -0,0 +1,16 @@ +urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) +====== + +Package urlesc implements query escaping as per RFC 3986. + +It contains some parts of the net/url package, modified so as to allow +some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). + +## Install + + go get github.com/PuerkitoBio/urlesc + +## License + +Go license (BSD-3-Clause) + diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go new file mode 100644 index 000000000..1b8462459 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package urlesc implements query escaping as per RFC 3986. +// It contains some parts of the net/url package, modified so as to allow +// some reserved characters incorrectly escaped by net/url. +// See https://github.com/golang/go/issues/5684 +package urlesc + +import ( + "bytes" + "net/url" + "strings" +) + +type encoding int + +const ( + encodePath encoding = 1 + iota + encodeUserPassword + encodeQueryComponent + encodeFragment +) + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return false + } + + switch c { + case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) + return false + + // §2.2 Reserved characters (reserved) + case ':', '/', '?', '#', '[', ']', '@', // gen-delims + '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows sub-delims and : @. + // '/', '[' and ']' can be used to assign meaning to individual path + // segments. This package only manipulates the path as a whole, + // so we allow those as well. That leaves only ? and # to escape. + return c == '?' || c == '#' + + case encodeUserPassword: // §3.2.1 + // The RFC allows : and sub-delims in + // userinfo. The parsing of userinfo treats ':' as special so we must escape + // all the gen-delims. + return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' + + case encodeQueryComponent: // §3.4 + // The RFC allows / and ?. + return c != '/' && c != '?' + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing but # + return c == '#' + } + } + + // Everything else must be escaped. + return true +} + +// QueryEscape escapes the string so it can be safely placed +// inside a URL query. +func QueryEscape(s string) string { + return escape(s, encodeQueryComponent) +} + +func escape(s string, mode encoding) string { + spaceCount, hexCount := 0, 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c, mode) { + if c == ' ' && mode == encodeQueryComponent { + spaceCount++ + } else { + hexCount++ + } + } + } + + if spaceCount == 0 && hexCount == 0 { + return s + } + + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == ' ' && mode == encodeQueryComponent: + t[j] = '+' + j++ + case shouldEscape(c, mode): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} + +var uiReplacer = strings.NewReplacer( + "%21", "!", + "%27", "'", + "%28", "(", + "%29", ")", + "%2A", "*", +) + +// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. +func unescapeUserinfo(s string) string { + return uiReplacer.Replace(s) +} + +// Escape reassembles the URL into a valid URL string. +// The general form of the result is one of: +// +// scheme:opaque +// scheme://userinfo@host/path?query#fragment +// +// If u.Opaque is non-empty, String uses the first form; +// otherwise it uses the second form. +// +// In the second form, the following rules apply: +// - if u.Scheme is empty, scheme: is omitted. +// - if u.User is nil, userinfo@ is omitted. +// - if u.Host is empty, host/ is omitted. +// - if u.Scheme and u.Host are empty and u.User is nil, +// the entire scheme://userinfo@host/ is omitted. +// - if u.Host is non-empty and u.Path begins with a /, +// the form host/path does not add its own /. +// - if u.RawQuery is empty, ?query is omitted. +// - if u.Fragment is empty, #fragment is omitted. +func Escape(u *url.URL) string { + var buf bytes.Buffer + if u.Scheme != "" { + buf.WriteString(u.Scheme) + buf.WriteByte(':') + } + if u.Opaque != "" { + buf.WriteString(u.Opaque) + } else { + if u.Scheme != "" || u.Host != "" || u.User != nil { + buf.WriteString("//") + if ui := u.User; ui != nil { + buf.WriteString(unescapeUserinfo(ui.String())) + buf.WriteByte('@') + } + if h := u.Host; h != "" { + buf.WriteString(h) + } + } + if u.Path != "" && u.Path[0] != '/' && u.Host != "" { + buf.WriteByte('/') + } + buf.WriteString(escape(u.Path, encodePath)) + } + if u.RawQuery != "" { + buf.WriteByte('?') + buf.WriteString(u.RawQuery) + } + if u.Fragment != "" { + buf.WriteByte('#') + buf.WriteString(escape(u.Fragment, encodeFragment)) + } + return buf.String() +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be6..000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5b..000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86fc..000000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 4399639e2..000000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,191 +0,0 @@ -semver for golang [![Build Status](https://drone.io/github.com/blang/semver/status.png)](https://drone.io/github.com/blang/semver/latest) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c44..000000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index 238e1312d..000000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,233 +0,0 @@ -package semver - -import ( - "fmt" - "strings" - "unicode" -) - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range orParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// splitAndTrim splits a range string by spaces and cleans leading and trailing spaces -func splitAndTrim(s string) (result []string) { - last := 0 - for i := 0; i < len(s); i++ { - if s[i] == ' ' { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Spaces between the comparator and the version are not allowed. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842e6..000000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f88082..000000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d80266..000000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore b/vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore deleted file mode 100644 index 782377890..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.coverprofile diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml b/vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml deleted file mode 100644 index b39955e53..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.1 - -install: - - go get -t -v ./... - - go install github.com/onsi/ginkgo/ginkgo - -script: - - export PATH=$HOME/gopath/bin:$PATH - - ginkgo -r -failOnPending -randomizeAllSpecs -race diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE b/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE deleted file mode 100644 index d9a10c0d8..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md b/vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md deleted file mode 100644 index 94077994b..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md +++ /dev/null @@ -1,59 +0,0 @@ -[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml) -[![GoDoc](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml?status.svg)](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml) - - -candiedyaml -=========== - -YAML for Go - -A YAML 1.1 parser with support for YAML 1.2 features - -Usage ------ - -```go -package myApp - -import ( - "github.com/cloudfoundry-incubator/candiedyaml" - "fmt" - "os" -) - -func main() { - file, err := os.Open("path/to/some/file.yml") - if err != nil { - println("File does not exist:", err.Error()) - os.Exit(1) - } - defer file.Close() - - document := new(interface{}) - decoder := candiedyaml.NewDecoder(file) - err = decoder.Decode(document) - - if err != nil { - println("Failed to decode document:", err.Error()) - } - - println("parsed yml into interface:", fmt.Sprintf("%#v", document)) - - fileToWrite, err := os.Create("path/to/some/new/file.yml") - if err != nil { - println("Failed to open file for writing:", err.Error()) - os.Exit(1) - } - defer fileToWrite.Close() - - encoder := candiedyaml.NewEncoder(fileToWrite) - err = encoder.Encode(document) - - if err != nil { - println("Failed to encode document:", err.Error()) - os.Exit(1) - } - - return -} -``` diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go deleted file mode 100644 index 87c1043ea..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go +++ /dev/null @@ -1,834 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io" -) - -/* - * Create a new parser object. - */ - -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE), - buffer: make([]byte, 0, INPUT_BUFFER_SIZE), - } - - return true -} - -/* - * Destroy a parser object. - */ -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -/* - * String read handler. - */ - -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - - n := copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -/* - * File read handler. - */ - -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) { - return parser.input_reader.Read(buffer) -} - -/* - * Set a string input. - */ - -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = yaml_string_read_handler - - parser.input = input - parser.input_pos = 0 -} - -/* - * Set a reader input - */ -func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = yaml_file_read_handler - parser.input_reader = reader -} - -/* - * Set a generic input. - */ - -func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = handler -} - -/* - * Set the source encoding. - */ - -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("encoding already set") - } - - parser.encoding = encoding -} - -/* - * Create a new emitter object. - */ - -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, OUTPUT_BUFFER_SIZE), - raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE), - states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE), - events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE), - } -} - -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -/* - * String write handler. - */ - -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -/* - * File write handler. - */ - -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -/* - * Set a string output. - */ - -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = buffer -} - -/* - * Set a file output. - */ - -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -/* - * Set a generic output handler. - */ - -func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = handler -} - -/* - * Set the output encoding. - */ - -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("encoding already set") - } - - emitter.encoding = encoding -} - -/* - * Set the canonical output style. - */ - -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -/* - * Set the indentation increment. - */ - -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -/* - * Set the preferred line width. - */ - -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -/* - * Set if unescaped non-ASCII characters are allowed. - */ - -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -/* - * Set the preferred line break character. - */ - -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -/* - * Destroy a token object. - */ - -// yaml_DECLARE(void) -// yaml_token_delete(yaml_token_t *token) -// { -// assert(token); /* Non-NULL token object expected. */ -// -// switch (token.type) -// { -// case yaml_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case yaml_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case yaml_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case yaml_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case yaml_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -// } - -/* - * Check if a string is a valid UTF-8 sequence. - * - * Check 'reader.c' for more details on UTF-8 encoding. - */ - -// static int -// yaml_check_utf8(yaml_char_t *start, size_t length) -// { -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -// } - -/* - * Create STREAM-START. - */ - -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - event_type: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -/* - * Create STREAM-END. - */ - -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_STREAM_END_EVENT, - } -} - -/* - * Create DOCUMENT-START. - */ - -func yaml_document_start_event_initialize(event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool) { - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -/* - * Create DOCUMENT-END. - */ - -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -/* - * Create ALIAS. - */ - -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) { - *event = yaml_event_t{ - event_type: yaml_ALIAS_EVENT, - anchor: anchor, - } -} - -/* - * Create SCALAR. - */ - -func yaml_scalar_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, - value []byte, - plain_implicit bool, quoted_implicit bool, - style yaml_scalar_style_t) { - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } -} - -/* - * Create SEQUENCE-START. - */ - -func yaml_sequence_start_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) { - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -/* - * Create SEQUENCE-END. - */ - -func yaml_sequence_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - } -} - -/* - * Create MAPPING-START. - */ - -func yaml_mapping_start_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -/* - * Create MAPPING-END. - */ - -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - } -} - -/* - * Destroy an event object. - */ - -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -// /* -// * Create a document object. -// */ -// -// func yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives []yaml_tag_directive_t, -// start_implicit, end_implicit bool) bool { -// -// -// { -// struct { -// YAML_error_type_t error; -// } context; -// struct { -// yaml_node_t *start; -// yaml_node_t *end; -// yaml_node_t *top; -// } nodes = { NULL, NULL, NULL }; -// yaml_version_directive_t *version_directive_copy = NULL; -// struct { -// yaml_tag_directive_t *start; -// yaml_tag_directive_t *end; -// yaml_tag_directive_t *top; -// } tag_directives_copy = { NULL, NULL, NULL }; -// yaml_tag_directive_t value = { NULL, NULL }; -// YAML_mark_t mark = { 0, 0, 0 }; -// -// assert(document); /* Non-NULL document object is expected. */ -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)); -// /* Valid tag directives are expected. */ -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error; -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)); -// if (!version_directive_copy) goto error; -// version_directive_copy.major = version_directive.major; -// version_directive_copy.minor = version_directive.minor; -// } -// -// if (tag_directives_start != tag_directives_end) { -// yaml_tag_directive_t *tag_directive; -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error; -// for (tag_directive = tag_directives_start; -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle); -// assert(tag_directive.prefix); -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error; -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error; -// value.handle = yaml_strdup(tag_directive.handle); -// value.prefix = yaml_strdup(tag_directive.prefix); -// if (!value.handle || !value.prefix) goto error; -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error; -// value.handle = NULL; -// value.prefix = NULL; -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark); -// -// return 1; -// -// error: -// STACK_DEL(&context, nodes); -// yaml_free(version_directive_copy); -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// yaml_tag_directive_t value = POP(&context, tag_directives_copy); -// yaml_free(value.handle); -// yaml_free(value.prefix); -// } -// STACK_DEL(&context, tag_directives_copy); -// yaml_free(value.handle); -// yaml_free(value.prefix); -// -// return 0; -// } -// -// /* -// * Destroy a document object. -// */ -// -// yaml_DECLARE(void) -// yaml_document_delete(document *yaml_document_t) -// { -// struct { -// YAML_error_type_t error; -// } context; -// yaml_tag_directive_t *tag_directive; -// -// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */ -// -// assert(document); /* Non-NULL document object is expected. */ -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// yaml_node_t node = POP(&context, document.nodes); -// yaml_free(node.tag); -// switch (node.type) { -// case yaml_SCALAR_NODE: -// yaml_free(node.data.scalar.value); -// break; -// case yaml_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items); -// break; -// case yaml_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs); -// break; -// default: -// assert(0); /* Should not happen. */ -// } -// } -// STACK_DEL(&context, document.nodes); -// -// yaml_free(document.version_directive); -// for (tag_directive = document.tag_directives.start; -// tag_directive != document.tag_directives.end; -// tag_directive++) { -// yaml_free(tag_directive.handle); -// yaml_free(tag_directive.prefix); -// } -// yaml_free(document.tag_directives.start); -// -// memset(document, 0, sizeof(yaml_document_t)); -// } -// -// /** -// * Get a document node. -// */ -// -// yaml_DECLARE(yaml_node_t *) -// yaml_document_get_node(document *yaml_document_t, int index) -// { -// assert(document); /* Non-NULL document object is expected. */ -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1; -// } -// return NULL; -// } -// -// /** -// * Get the root object. -// */ -// -// yaml_DECLARE(yaml_node_t *) -// yaml_document_get_root_node(document *yaml_document_t) -// { -// assert(document); /* Non-NULL document object is expected. */ -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start; -// } -// return NULL; -// } -// -// /* -// * Add a scalar node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_scalar(document *yaml_document_t, -// yaml_char_t *tag, yaml_char_t *value, int length, -// yaml_scalar_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// yaml_char_t *value_copy = NULL; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// assert(value); /* Non-NULL value is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (length < 0) { -// length = strlen((char *)value); -// } -// -// if (!yaml_check_utf8(value, length)) goto error; -// value_copy = yaml_malloc(length+1); -// if (!value_copy) goto error; -// memcpy(value_copy, value, length); -// value_copy[length] = '\0'; -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// yaml_free(tag_copy); -// yaml_free(value_copy); -// -// return 0; -// } -// -// /* -// * Add a sequence node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_sequence(document *yaml_document_t, -// yaml_char_t *tag, yaml_sequence_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// struct { -// yaml_node_item_t *start; -// yaml_node_item_t *end; -// yaml_node_item_t *top; -// } items = { NULL, NULL, NULL }; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error; -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// STACK_DEL(&context, items); -// yaml_free(tag_copy); -// -// return 0; -// } -// -// /* -// * Add a mapping node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_mapping(document *yaml_document_t, -// yaml_char_t *tag, yaml_mapping_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// struct { -// yaml_node_pair_t *start; -// yaml_node_pair_t *end; -// yaml_node_pair_t *top; -// } pairs = { NULL, NULL, NULL }; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error; -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// STACK_DEL(&context, pairs); -// yaml_free(tag_copy); -// -// return 0; -// } -// -// /* -// * Append an item to a sequence node. -// */ -// -// yaml_DECLARE(int) -// yaml_document_append_sequence_item(document *yaml_document_t, -// int sequence, int item) -// { -// struct { -// YAML_error_type_t error; -// } context; -// -// assert(document); /* Non-NULL document is required. */ -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top); -// /* Valid sequence id is required. */ -// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE); -// /* A sequence node is required. */ -// assert(item > 0 && document.nodes.start + item <= document.nodes.top); -// /* Valid item id is required. */ -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0; -// -// return 1; -// } -// -// /* -// * Append a pair of a key and a value to a mapping node. -// */ -// -// yaml_DECLARE(int) -// yaml_document_append_mapping_pair(document *yaml_document_t, -// int mapping, int key, int value) -// { -// struct { -// YAML_error_type_t error; -// } context; -// -// yaml_node_pair_t pair; -// -// assert(document); /* Non-NULL document is required. */ -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top); -// /* Valid mapping id is required. */ -// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE); -// /* A mapping node is required. */ -// assert(key > 0 && document.nodes.start + key <= document.nodes.top); -// /* Valid key id is required. */ -// assert(value > 0 && document.nodes.start + value <= document.nodes.top); -// /* Valid value id is required. */ -// -// pair.key = key; -// pair.value = value; -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0; -// -// return 1; -// } -// diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go deleted file mode 100644 index dcc1b89cf..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go +++ /dev/null @@ -1,622 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "runtime" - "strconv" - "strings" -) - -type Unmarshaler interface { - UnmarshalYAML(tag string, value interface{}) error -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -type Decoder struct { - parser yaml_parser_t - event yaml_event_t - replay_events []yaml_event_t - useNumber bool - - anchors map[string][]yaml_event_t - tracking_anchors [][]yaml_event_t -} - -type ParserError struct { - ErrorType YAML_error_type_t - Context string - ContextMark YAML_mark_t - Problem string - ProblemMark YAML_mark_t -} - -func (e *ParserError) Error() string { - return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1) -} - -type UnexpectedEventError struct { - Value string - EventType yaml_event_type_t - At YAML_mark_t -} - -func (e *UnexpectedEventError) Error() string { - return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1) -} - -func recovery(err *error) { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - - var tmpError error - switch r := r.(type) { - case error: - tmpError = r - case string: - tmpError = errors.New(r) - default: - tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String()) - } - - *err = tmpError - } -} - -func Unmarshal(data []byte, v interface{}) error { - d := NewDecoder(bytes.NewBuffer(data)) - return d.Decode(v) -} - -func NewDecoder(r io.Reader) *Decoder { - d := &Decoder{ - anchors: make(map[string][]yaml_event_t), - tracking_anchors: make([][]yaml_event_t, 1), - } - yaml_parser_initialize(&d.parser) - yaml_parser_set_input_reader(&d.parser, r) - return d -} - -func (d *Decoder) Decode(v interface{}) (err error) { - defer recovery(&err) - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark) - } - - if d.event.event_type == yaml_NO_EVENT { - d.nextEvent() - - if d.event.event_type != yaml_STREAM_START_EVENT { - return errors.New("Invalid stream") - } - - d.nextEvent() - } - - d.document(rv) - return nil -} - -func (d *Decoder) UseNumber() { d.useNumber = true } - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) nextEvent() { - if d.event.event_type == yaml_STREAM_END_EVENT { - d.error(errors.New("The stream is closed")) - } - - if d.replay_events != nil { - d.event = d.replay_events[0] - if len(d.replay_events) == 1 { - d.replay_events = nil - } else { - d.replay_events = d.replay_events[1:] - } - } else { - if !yaml_parser_parse(&d.parser, &d.event) { - yaml_event_delete(&d.event) - - d.error(&ParserError{ - ErrorType: d.parser.error, - Context: d.parser.context, - ContextMark: d.parser.context_mark, - Problem: d.parser.problem, - ProblemMark: d.parser.problem_mark, - }) - } - } - - last := len(d.tracking_anchors) - // skip aliases when tracking an anchor - if last > 0 && d.event.event_type != yaml_ALIAS_EVENT { - d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event) - } -} - -func (d *Decoder) document(rv reflect.Value) { - if d.event.event_type != yaml_DOCUMENT_START_EVENT { - d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark)) - } - - d.nextEvent() - d.parse(rv) - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark)) - } - - d.nextEvent() -} - -func (d *Decoder) parse(rv reflect.Value) { - if !rv.IsValid() { - // skip ahead since we cannot store - d.valueInterface() - return - } - - anchor := string(d.event.anchor) - switch d.event.event_type { - case yaml_SEQUENCE_START_EVENT: - d.begin_anchor(anchor) - d.sequence(rv) - d.end_anchor(anchor) - case yaml_MAPPING_START_EVENT: - d.begin_anchor(anchor) - d.mapping(rv) - d.end_anchor(anchor) - case yaml_SCALAR_EVENT: - d.begin_anchor(anchor) - d.scalar(rv) - d.end_anchor(anchor) - case yaml_ALIAS_EVENT: - d.alias(rv) - case yaml_DOCUMENT_END_EVENT: - default: - d.error(&UnexpectedEventError{ - Value: string(d.event.value), - EventType: d.event.event_type, - At: d.event.start_mark, - }) - } -} - -func (d *Decoder) begin_anchor(anchor string) { - if anchor != "" { - events := []yaml_event_t{d.event} - d.tracking_anchors = append(d.tracking_anchors, events) - } -} - -func (d *Decoder) end_anchor(anchor string) { - if anchor != "" { - events := d.tracking_anchors[len(d.tracking_anchors)-1] - d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1] - // remove the anchor, replaying events shouldn't have anchors - events[0].anchor = nil - // we went one too many, remove the extra event - events = events[:len(events)-1] - // if nested, append to all the other anchors - for i, e := range d.tracking_anchors { - d.tracking_anchors[i] = append(e, events...) - } - d.anchors[anchor] = events - } -} - -func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - var temp interface{} - return u, reflect.ValueOf(&temp) - } - } - - v = v.Elem() - } - - return nil, v -} - -func (d *Decoder) sequence(v reflect.Value) { - if d.event.event_type != yaml_SEQUENCE_START_EVENT { - d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark)) - } - - u, pv := d.indirect(v, false) - if u != nil { - defer func() { - if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil { - d.error(err) - } - }() - _, pv = d.indirect(pv, false) - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.sequenceInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark)) - case reflect.Array: - case reflect.Slice: - break - } - - d.nextEvent() - - i := 0 -done: - for { - switch d.event.event_type { - case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.parse(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.parse(reflect.Value{}) - } - i++ - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } -} - -func (d *Decoder) mapping(v reflect.Value) { - u, pv := d.indirect(v, false) - if u != nil { - defer func() { - if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil { - d.error(err) - } - }() - _, pv = d.indirect(pv, false) - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.mappingInterface())) - return - } - - // Check type of target: struct or map[X]Y - switch v.Kind() { - case reflect.Struct: - d.mappingStruct(v) - return - case reflect.Map: - default: - d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark)) - } - - mapt := v.Type() - if v.IsNil() { - v.Set(reflect.MakeMap(mapt)) - } - - d.nextEvent() - - keyt := mapt.Key() - mapElemt := mapt.Elem() - - var mapElem reflect.Value -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT: - break done - case yaml_DOCUMENT_END_EVENT: - return - } - - key := reflect.New(keyt) - d.parse(key.Elem()) - - if !mapElem.IsValid() { - mapElem = reflect.New(mapElemt).Elem() - } else { - mapElem.Set(reflect.Zero(mapElemt)) - } - - d.parse(mapElem) - - v.SetMapIndex(key.Elem(), mapElem) - } - - d.nextEvent() -} - -func (d *Decoder) mappingStruct(v reflect.Value) { - - structt := v.Type() - fields := cachedTypeFields(structt) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT: - break done - case yaml_DOCUMENT_END_EVENT: - return - } - - key := "" - d.parse(reflect.ValueOf(&key)) - - // Figure out field corresponding to key. - var subv reflect.Value - - var f *field - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - - if f != nil { - subv = v - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - d.parse(subv) - } - - d.nextEvent() -} - -func (d *Decoder) scalar(v reflect.Value) { - val := string(d.event.value) - wantptr := null_values[val] - - u, pv := d.indirect(v, wantptr) - - var tag string - if u != nil { - defer func() { - if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil { - d.error(err) - } - }() - - _, pv = d.indirect(pv, wantptr) - } - v = pv - - var err error - tag, err = resolve(d.event, v, d.useNumber) - if err != nil { - d.error(err) - } - - d.nextEvent() -} - -func (d *Decoder) alias(rv reflect.Value) { - val, ok := d.anchors[string(d.event.anchor)] - if !ok { - d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark)) - } - - d.replay_events = val - d.nextEvent() - d.parse(rv) -} - -func (d *Decoder) valueInterface() interface{} { - var v interface{} - - anchor := string(d.event.anchor) - switch d.event.event_type { - case yaml_SEQUENCE_START_EVENT: - d.begin_anchor(anchor) - v = d.sequenceInterface() - case yaml_MAPPING_START_EVENT: - d.begin_anchor(anchor) - v = d.mappingInterface() - case yaml_SCALAR_EVENT: - d.begin_anchor(anchor) - v = d.scalarInterface() - case yaml_ALIAS_EVENT: - rv := reflect.ValueOf(&v) - d.alias(rv) - return v - case yaml_DOCUMENT_END_EVENT: - d.error(&UnexpectedEventError{ - Value: string(d.event.value), - EventType: d.event.event_type, - At: d.event.start_mark, - }) - - } - d.end_anchor(anchor) - - return v -} - -func (d *Decoder) scalarInterface() interface{} { - _, v := resolveInterface(d.event, d.useNumber) - - d.nextEvent() - return v -} - -// sequenceInterface is like sequence but returns []interface{}. -func (d *Decoder) sequenceInterface() []interface{} { - var v = make([]interface{}, 0) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - v = append(v, d.valueInterface()) - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } - - return v -} - -// mappingInterface is like mapping but returns map[interface{}]interface{}. -func (d *Decoder) mappingInterface() map[interface{}]interface{} { - m := make(map[interface{}]interface{}) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - key := d.valueInterface() - - // Read value. - m[key] = d.valueInterface() - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } - - return m -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go deleted file mode 100644 index bd2014f34..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go +++ /dev/null @@ -1,2072 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -/* - * Flush the buffer if needed. - */ - -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -/* - * Put a character to the output buffer. - */ -func put(emitter *yaml_emitter_t, value byte) bool { - if !flush(emitter) { - return false - } - - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -/* - * Put a line break to the output buffer. - */ - -func put_break(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos++ - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos++ - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 2 - default: - return false - } - emitter.column = 0 - emitter.line++ - return true -} - -/* - * Copy a character from a string into buffer. - */ -func write(emitter *yaml_emitter_t, src []byte, src_pos *int) bool { - if !flush(emitter) { - return false - } - copy_bytes(emitter.buffer, &emitter.buffer_pos, src, src_pos) - emitter.column++ - return true -} - -/* - * Copy a line break character from a string into buffer. - */ - -func write_break(emitter *yaml_emitter_t, src []byte, src_pos *int) bool { - if src[*src_pos] == '\n' { - if !put_break(emitter) { - return false - } - *src_pos++ - } else { - if !write(emitter, src, src_pos) { - return false - } - emitter.column = 0 - emitter.line++ - } - - return true -} - -/* - * Set an emitter error and return 0. - */ - -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -/* - * Emit an event. - */ - -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -/* - * Check if we need to accumulate more events before emitting. - * - * We accumulate extra - * - 1 event for DOCUMENT-START - * - 2 events for SEQUENCE-START - * - 3 events for MAPPING-START - */ - -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - - accumulate := 0 - switch emitter.events[emitter.events_head].event_type { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - case yaml_MAPPING_START_EVENT: - accumulate = 3 - default: - return false - } - - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - - level := 0 - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].event_type { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - - if level == 0 { - return false - } - } - return true -} - -/* - * Append a directive to the directives stack. - */ - -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, - value *yaml_tag_directive_t, allow_duplicates bool) bool { - - for i := range emitter.tag_directives { - - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicat %TAG directive") - } - } - - tag_copy := yaml_tag_directive_t{ - handle: value.handle, - prefix: value.prefix, - } - - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - - return true -} - -/* - * Increase the indentation level. - */ - -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow bool, indentless bool) bool { - - emitter.indents = append(emitter.indents, emitter.indent) - - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - - return true -} - -/* - * State dispatcher. - */ - -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, - "expected nothing after STREAM-END") - - } - - panic("invalid state") -} - -/* - * Expect STREAM-START. - */ - -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - if event.event_type != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, - "expected STREAM-START") - } - - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - - return true -} - -/* - * Expect DOCUMENT-START or STREAM-END. - */ - -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if event.event_type == yaml_DOCUMENT_START_EVENT { - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, - *event.version_directive) { - return false - } - } - - for i := range event.tag_directives { - tag_directive := &event.tag_directives[i] - - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := range default_tag_directives { - if !yaml_emitter_append_tag_directive(emitter, &default_tag_directives[i], true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if (event.version_directive != nil || len(event.tag_directives) > 0) && - emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := range event.tag_directives { - tag_directive := &event.tag_directives[i] - - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - - return true - } else if event.event_type == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !yaml_emitter_flush(emitter) { - return false - } - - emitter.state = yaml_EMIT_END_STATE - - return true - } - - return yaml_emitter_set_emitter_error(emitter, - "expected DOCUMENT-START or STREAM-END") -} - -/* - * Expect the root node. - */ - -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -/* - * Expect DOCUMENT-END. - */ - -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - if event.event_type != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, - "expected DOCUMENT-END") - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -/* - * - * Expect a flow item node. - */ - -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte("["), true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.event_type == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte("]"), false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -/* - * Expect a flow key node. - */ - -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - - if !yaml_emitter_write_indicator(emitter, []byte("{"), true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.event_type == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte("}"), false, false, false) { - return false - } - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } else { - if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, false) { - return false - } - - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) - } -} - -/* - * Expect a flow value node. - */ - -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, - event *yaml_event_t, simple bool) bool { - - if simple { - if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -/* - * Expect a block item node. - */ - -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - if !yaml_emitter_increase_indent(emitter, false, - (emitter.mapping_context && !emitter.indention)) { - return false - } - } - - if event.event_type == yaml_SEQUENCE_END_EVENT { - - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("-"), true, false, true) { - return false - } - - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -/* - * Expect a block key node. - */ - -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - - if event.event_type == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } else { - if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, false) - } -} - -/* - * Expect a block value node. - */ - -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, - event *yaml_event_t, simple bool) bool { - - if simple { - if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -/* - * Expect a node. - */ - -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.event_type { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - - return false -} - -/* - * Expect ALIAS. - */ - -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true -} - -/* - * Expect SCALAR. - */ - -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true -} - -/* - * Expect SEQUENCE-START. - */ - -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - - if emitter.flow_level > 0 || emitter.canonical || - event.style == yaml_style_t(yaml_FLOW_SEQUENCE_STYLE) || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - - return true -} - -/* - * Expect MAPPING-START. - */ - -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - - if emitter.flow_level > 0 || emitter.canonical || - event.style == yaml_style_t(yaml_FLOW_MAPPING_STYLE) || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - - return true -} - -/* - * Check if the document content is an empty scalar. - */ - -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false -} - -/* - * Check if the next events represent an empty sequence. - */ - -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - - return (emitter.events[emitter.events_head].event_type == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].event_type == yaml_SEQUENCE_END_EVENT) -} - -/* - * Check if the next events represent an empty mapping. - */ - -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - - return (emitter.events[emitter.events_head].event_type == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].event_type == yaml_MAPPING_END_EVENT) -} - -/* - * Check if the next node can be expressed as a simple key. - */ - -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - - switch emitter.events[emitter.events_head].event_type { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - - default: - return false - } - - if length > 128 { - return false - } - - return true -} - -/* - * Determine an acceptable scalar style. - */ - -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, - "neither tag nor implicit flags are specified") - } - - style := yaml_scalar_style_t(event.style) - - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if (emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed) || - (emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && - (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || - emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && - style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte("!") - } - - emitter.scalar_data.style = style - - return true -} - -/* - * Write an achor. - */ - -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - - indicator := "*" - if !emitter.anchor_data.alias { - indicator = "&" - } - if !yaml_emitter_write_indicator(emitter, []byte(indicator), true, false, false) { - return false - } - - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -/* - * Write a tag. - */ - -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - - } - } else { - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - - if !yaml_emitter_write_indicator(emitter, []byte(">"), false, false, false) { - return false - } - - } - - return true -} - -/* - * Write a scalar. - */ - -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, - emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, - emitter.scalar_data.value) - - default: - panic("unknown scalar") - } - - return false -} - -/* - * Check if a %YAML directive is valid. - */ - -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, - version_directive yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, - "incompatible %YAML directive") - } - - return true -} - -/* - * Check if a %TAG directive is valid. - */ - -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, - tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must not be empty") - } - - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must start with '!'") - } - - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must end with '!'") - } - - for i := 1; i < len(handle)-1; width(handle[i]) { - if !is_alpha(handle[i]) { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must contain alphanumerical characters only") - } - } - - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag prefix must not be empty") - } - - return true -} - -/* - * Check if an anchor is valid. - */ - -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, - anchor []byte, alias bool) bool { - if len(anchor) == 0 { - errmsg := "alias value must not be empty" - if !alias { - errmsg = "anchor value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, errmsg) - } - - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor[i]) { - errmsg := "alias value must contain alphanumerical characters only" - if !alias { - errmsg = "anchor value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, errmsg) - } - } - - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - - return true -} - -/* - * Check if a tag is valid. - */ - -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag value must not be empty") - } - - for i := range emitter.tag_directives { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - - emitter.tag_data.suffix = tag - - return true -} - -/* - * Check if a scalar is valid. - */ - -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - block_indicators := false - flow_indicators := false - line_breaks := false - special_characters := false - - leading_space := false - leading_break := false - trailing_space := false - trailing_break := false - break_space := false - space_break := false - - preceeded_by_whitespace := false - followed_by_whitespace := false - previous_space := false - previous_break := false - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || - (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blankz_at(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable_at(value, i) || (!is_ascii(value[i]) && !emitter.unicode) { - special_characters = true - } - - if is_break_at(value, i) { - line_breaks = true - } - - if is_space(value[i]) { - if i == 0 { - leading_space = true - } - if i+w == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break_at(value, i) { - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - preceeded_by_whitespace = is_blankz_at(value, i) - } - - emitter.scalar_data.multiline = line_breaks - - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - - if trailing_space { - emitter.scalar_data.block_allowed = false - } - - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - - return true -} - -/* - * Check if the event data is valid. - */ - -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.event_type { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - (!event.implicit && - !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, - event.tag) { - return false - } - } - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, - event.tag) { - return false - } - } - - } - return true -} - -/* - * Write the BOM character. - */ - -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - - pos := emitter.buffer_pos - emitter.buffer[pos] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - - if !emitter.indention || emitter.column > indent || - (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - - emitter.whitespace = true - emitter.indention = true - - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, - indicator []byte, need_whitespace bool, - is_whitespace bool, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - ind_pos := 0 - for ind_pos < len(indicator) { - if !write(emitter, indicator, &ind_pos) { - return false - } - } - - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - pos := 0 - for pos < len(value) { - if !write(emitter, value, &pos) { - return false - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - pos := 0 - for pos < len(value) { - if !write(emitter, value, &pos) { - return false - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, - need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - for i := 0; i < len(value); { - write_it := false - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', - '.', '!', '~', '*', '\'', '(', ')', '[', ']': - write_it = true - default: - write_it = is_alpha(value[i]) - } - if write_it { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for j := 0; j < w; j++ { - val := value[i] - i++ - - if !put(emitter, '%') { - return false - } - c := val >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = val & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - } - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - spaces := false - breaks := false - - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - for i := 0; i < len(value); { - if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - !is_space(value[i+1]) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break_at(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - spaces := false - breaks := false - - if !yaml_emitter_write_indicator(emitter, []byte("'"), true, false, false) { - return false - } - - for i := 0; i < len(value); { - if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - i > 0 && i < len(value)-1 && - !is_space(value[i+1]) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break_at(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if !yaml_emitter_write_indicator(emitter, []byte("'"), false, false, false) { - return false - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - - spaces := false - - if !yaml_emitter_write_indicator(emitter, []byte("\""), true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable_at(value, i) || (!emitter.unicode && !is_ascii(value[i])) || - is_bom_at(value, i) || is_break_at(value, i) || - value[i] == '"' || value[i] == '\\' { - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - switch v { - case 0x00: - if !put(emitter, '0') { - return false - } - case 0x07: - if !put(emitter, 'a') { - return false - } - case 0x08: - if !put(emitter, 'b') { - return false - } - case 0x09: - if !put(emitter, 't') { - return false - } - - case 0x0A: - if !put(emitter, 'n') { - return false - } - - case 0x0B: - if !put(emitter, 'v') { - return false - } - - case 0x0C: - if !put(emitter, 'f') { - return false - } - - case 0x0D: - if !put(emitter, 'r') { - return false - } - - case 0x1B: - if !put(emitter, 'e') { - return false - } - case 0x22: - if !put(emitter, '"') { - return false - } - case 0x5C: - if !put(emitter, '\\') { - return false - } - case 0x85: - if !put(emitter, 'N') { - return false - } - - case 0xA0: - if !put(emitter, '_') { - return false - } - - case 0x2028: - if !put(emitter, 'L') { - return false - } - - case 0x2029: - if !put(emitter, 'P') { - return false - } - default: - if v <= 0xFF { - if !put(emitter, 'x') { - return false - } - w = 2 - } else if v <= 0xFFFF { - if !put(emitter, 'u') { - return false - } - w = 4 - } else { - if !put(emitter, 'U') { - return false - } - w = 8 - } - for k := (w - 1) * 4; k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - c := digit + '0' - if c > 9 { - c = digit + 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - spaces = false - } else if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value[i+1]) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - - if !yaml_emitter_write_indicator(emitter, []byte("\""), false, false, false) { - return false - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - - if is_space(value[0]) || is_break_at(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - - if !is_break_at(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - for value[i]&0xC0 == 0x80 { - i-- - } - - if is_break_at(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - - breaks := true - - if !yaml_emitter_write_indicator(emitter, []byte("|"), true, false, false) { - return false - } - - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - - emitter.indention = true - emitter.whitespace = true - - for i := 0; i < len(value); { - if is_break_at(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - breaks := true - leading_spaces := true - - if !yaml_emitter_write_indicator(emitter, []byte(">"), true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - for i := 0; i < len(value); { - if is_break_at(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := i - for is_break_at(value, k) { - k += width(value[k]) - } - if !is_blankz_at(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value[i]) - } - if !breaks && is_space(value[i]) && !is_space(value[i+1]) && - emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go deleted file mode 100644 index fd9918089..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "encoding/base64" - "io" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "time" -) - -var ( - timeTimeType = reflect.TypeOf(time.Time{}) - marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - numberType = reflect.TypeOf(Number("")) - nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]") - multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029") - - shortTags = map[string]string{ - yaml_NULL_TAG: "!!null", - yaml_BOOL_TAG: "!!bool", - yaml_STR_TAG: "!!str", - yaml_INT_TAG: "!!int", - yaml_FLOAT_TAG: "!!float", - yaml_TIMESTAMP_TAG: "!!timestamp", - yaml_SEQ_TAG: "!!seq", - yaml_MAP_TAG: "!!map", - yaml_BINARY_TAG: "!!binary", - } -) - -type Marshaler interface { - MarshalYAML() (tag string, value interface{}, err error) -} - -// An Encoder writes JSON objects to an output stream. -type Encoder struct { - w io.Writer - emitter yaml_emitter_t - event yaml_event_t - flow bool - err error -} - -func Marshal(v interface{}) ([]byte, error) { - b := bytes.Buffer{} - e := NewEncoder(&b) - err := e.Encode(v) - return b.Bytes(), err -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{w: w} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, e.w) - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - - return e -} - -func (e *Encoder) Encode(v interface{}) (err error) { - defer recovery(&err) - - if e.err != nil { - return e.err - } - - e.marshal("", reflect.ValueOf(v), true) - - yaml_document_end_event_initialize(&e.event, true) - e.emit() - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() - - return nil -} - -func (e *Encoder) emit() { - if !yaml_emitter_emit(&e.emitter, &e.event) { - panic("bad emit") - } -} - -func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) { - vt := v.Type() - - if vt.Implements(marshalerType) { - e.emitMarshaler(tag, v) - return - } - - if vt.Kind() != reflect.Ptr && allowAddr { - if reflect.PtrTo(vt).Implements(marshalerType) { - e.emitAddrMarshaler(tag, v) - return - } - } - - switch v.Kind() { - case reflect.Interface: - if v.IsNil() { - e.emitNil() - } else { - e.marshal(tag, v.Elem(), allowAddr) - } - case reflect.Map: - e.emitMap(tag, v) - case reflect.Ptr: - if v.IsNil() { - e.emitNil() - } else { - e.marshal(tag, v.Elem(), true) - } - case reflect.Struct: - e.emitStruct(tag, v) - case reflect.Slice: - e.emitSlice(tag, v) - case reflect.String: - e.emitString(tag, v) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.emitInt(tag, v) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.emitUint(tag, v) - case reflect.Float32, reflect.Float64: - e.emitFloat(tag, v) - case reflect.Bool: - e.emitBool(tag, v) - default: - panic("Can't marshal type yet: " + v.Type().String()) - } -} - -func (e *Encoder) emitMap(tag string, v reflect.Value) { - e.mapping(tag, func() { - var keys stringValues = v.MapKeys() - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k, true) - e.marshal("", v.MapIndex(k), true) - } - }) -} - -func (e *Encoder) emitStruct(tag string, v reflect.Value) { - if v.Type() == timeTimeType { - e.emitTime(tag, v) - return - } - - fields := cachedTypeFields(v.Type()) - - e.mapping(tag, func() { - for _, f := range fields { - fv := fieldByIndex(v, f.index) - if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { - continue - } - - e.marshal("", reflect.ValueOf(f.name), true) - e.flow = f.flow - e.marshal("", fv, true) - } - }) -} - -func (e *Encoder) emitTime(tag string, v reflect.Value) { - t := v.Interface().(time.Time) - bytes, _ := t.MarshalText() - e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func (e *Encoder) mapping(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - - f() - - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *Encoder) emitSlice(tag string, v reflect.Value) { - if v.Type() == byteSliceType { - e.emitBase64(tag, v) - return - } - - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - - n := v.Len() - for i := 0; i < n; i++ { - e.marshal("", v.Index(i), true) - } - - yaml_sequence_end_event_initialize(&e.event) - e.emit() -} - -func (e *Encoder) emitBase64(tag string, v reflect.Value) { - if v.IsNil() { - e.emitNil() - return - } - - s := v.Bytes() - - dst := make([]byte, base64.StdEncoding.EncodedLen(len(s))) - - base64.StdEncoding.Encode(dst, s) - e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE) -} - -func (e *Encoder) emitString(tag string, v reflect.Value) { - var style yaml_scalar_style_t - s := v.String() - - if nonPrintable.MatchString(s) { - e.emitBase64(tag, v) - return - } - - if v.Type() == numberType { - style = yaml_PLAIN_SCALAR_STYLE - } else { - event := yaml_event_t{ - implicit: true, - value: []byte(s), - } - - rtag, _ := resolveInterface(event, false) - if tag == "" && rtag != yaml_STR_TAG { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if multiline.MatchString(s) { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - } - - e.emitScalar(s, "", tag, style) -} - -func (e *Encoder) emitBool(tag string, v reflect.Value) { - s := strconv.FormatBool(v.Bool()) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitInt(tag string, v reflect.Value) { - s := strconv.FormatInt(v.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitUint(tag string, v reflect.Value) { - s := strconv.FormatUint(v.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitFloat(tag string, v reflect.Value) { - f := v.Float() - - var s string - switch { - case math.IsNaN(f): - s = ".nan" - case math.IsInf(f, 1): - s = "+.inf" - case math.IsInf(f, -1): - s = "-.inf" - default: - s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits()) - } - - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitNil() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - if !implicit { - style = yaml_PLAIN_SCALAR_STYLE - } - - stag := shortTags[tag] - if stag == "" { - stag = tag - } - - yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style) - e.emit() -} - -func (e *Encoder) emitMarshaler(tag string, v reflect.Value) { - if v.Kind() == reflect.Ptr && v.IsNil() { - e.emitNil() - return - } - - m := v.Interface().(Marshaler) - if m == nil { - e.emitNil() - return - } - t, val, err := m.MarshalYAML() - if err != nil { - panic(err) - } - if val == nil { - e.emitNil() - return - } - - e.marshal(t, reflect.ValueOf(val), false) -} - -func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) { - if !v.CanAddr() { - e.marshal(tag, v, false) - return - } - - va := v.Addr() - if va.IsNil() { - e.emitNil() - return - } - - m := v.Interface().(Marshaler) - t, val, err := m.MarshalYAML() - if err != nil { - panic(err) - } - - if val == nil { - e.emitNil() - return - } - - e.marshal(t, reflect.ValueOf(val), false) -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE b/vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE deleted file mode 100644 index 050ced23f..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2006 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go deleted file mode 100644 index 8d38e3065..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go +++ /dev/null @@ -1,1230 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -/* - * The parser implements the following grammar: - * - * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - * implicit_document ::= block_node DOCUMENT-END* - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * block_node_or_indentless_sequence ::= - * ALIAS - * | properties (block_content | indentless_block_sequence)? - * | block_content - * | indentless_block_sequence - * block_node ::= ALIAS - * | properties block_content? - * | block_content - * flow_node ::= ALIAS - * | properties flow_content? - * | flow_content - * properties ::= TAG ANCHOR? | ANCHOR TAG? - * block_content ::= block_collection | flow_collection | SCALAR - * flow_content ::= flow_collection | SCALAR - * block_collection ::= block_sequence | block_mapping - * flow_collection ::= flow_sequence | flow_mapping - * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - * block_mapping ::= BLOCK-MAPPING_START - * ((KEY block_node_or_indentless_sequence?)? - * (VALUE block_node_or_indentless_sequence?)?)* - * BLOCK-END - * flow_sequence ::= FLOW-SEQUENCE-START - * (flow_sequence_entry FLOW-ENTRY)* - * flow_sequence_entry? - * FLOW-SEQUENCE-END - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * flow_mapping ::= FLOW-MAPPING-START - * (flow_mapping_entry FLOW-ENTRY)* - * flow_mapping_entry? - * FLOW-MAPPING-END - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - */ - -/* - * Peek the next token in the token queue. - */ -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -/* - * Remove the next token from the queue (must be called after peek_token). - */ -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].token_type == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -/* - * Get the next event. - */ - -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - /* Erase the event object. */ - *event = yaml_event_t{} - - /* No events after the end of the stream or error. */ - - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || - parser.state == yaml_PARSE_END_STATE { - return true - } - - /* Generate the next event. */ - - return yaml_parser_state_machine(parser, event) -} - -/* - * Set parser error. - */ - -func yaml_parser_set_parser_error(parser *yaml_parser_t, - problem string, problem_mark YAML_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, - context string, context_mark YAML_mark_t, - problem string, problem_mark YAML_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - - return false -} - -/* - * State dispatcher. - */ - -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - } - - panic("invalid parser state") -} - -/* - * Parse the production: - * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - * ************ - */ - -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - } - - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - event_type: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - - return true -} - -/* - * Parse the productions: - * implicit_document ::= block_node DOCUMENT-END* - * * - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * ************************* - */ - -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, - implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - /* Parse extra document end indicators. */ - - if !implicit { - for token.token_type == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - /* Parse an implicit document. */ - - if implicit && token.token_type != yaml_VERSION_DIRECTIVE_TOKEN && - token.token_type != yaml_TAG_DIRECTIVE_TOKEN && - token.token_type != yaml_DOCUMENT_START_TOKEN && - token.token_type != yaml_STREAM_END_TOKEN { - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - implicit: true, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - } else if token.token_type != yaml_STREAM_END_TOKEN { - /* Parse an explicit document. */ - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, - &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - - end_mark := token.end_mark - - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - } else { - /* Parse the stream end. */ - parser.state = yaml_PARSE_END_STATE - - *event = yaml_event_t{ - event_type: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - return true -} - -/* - * Parse the productions: - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * *********** - */ - -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN || - token.token_type == yaml_TAG_DIRECTIVE_TOKEN || - token.token_type == yaml_DOCUMENT_START_TOKEN || - token.token_type == yaml_DOCUMENT_END_TOKEN || - token.token_type == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } else { - return yaml_parser_parse_node(parser, event, true, false) - } -} - -/* - * Parse the productions: - * implicit_document ::= block_node DOCUMENT-END* - * ************* - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * ************* - */ - -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - implicit := true - - token := peek_token(parser) - if token == nil { - return false - } - - start_mark, end_mark := token.start_mark, token.start_mark - - if token.token_type == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - - return true -} - -/* - * Parse the productions: - * block_node_or_indentless_sequence ::= - * ALIAS - * ***** - * | properties (block_content | indentless_block_sequence)? - * ********** * - * | block_content | indentless_block_sequence - * * - * block_node ::= ALIAS - * ***** - * | properties block_content? - * ********** * - * | block_content - * * - * flow_node ::= ALIAS - * ***** - * | properties flow_content? - * ********** * - * | flow_content - * * - * properties ::= TAG ANCHOR? | ANCHOR TAG? - * ************************* - * block_content ::= block_collection | flow_collection | SCALAR - * ****** - * flow_content ::= flow_collection | SCALAR - * ****** - */ - -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, - block bool, indentless_sequence bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } else { - start_mark, end_mark := token.start_mark, token.start_mark - - var tag_handle []byte - var tag_suffix, anchor []byte - var tag_mark YAML_mark_t - if token.token_type == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type == yaml_TAG_TOKEN { - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.token_type == yaml_TAG_TOKEN { - tag_handle = token.value - tag_suffix = token.suffix - start_mark, tag_mark = token.start_mark, token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - - } - } - - var tag []byte - if tag_handle != nil { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_handle = nil - tag_suffix = nil - } else { - for i := range parser.tag_directives { - tag_directive := &parser.tag_directives[i] - if bytes.Equal(tag_directive.handle, tag_handle) { - tag = append([]byte(nil), tag_directive.prefix...) - tag = append(tag, tag_suffix...) - tag_handle = nil - tag_suffix = nil - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.token_type == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - - return true - } else { - if token.token_type == yaml_SCALAR_TOKEN { - plain_implicit := false - quoted_implicit := false - end_mark = token.end_mark - if (token.style == yaml_PLAIN_SCALAR_STYLE && len(tag) == 0) || - (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - - skip_token(parser) - return true - } else if token.token_type == yaml_FLOW_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - - return true - } else if token.token_type == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - - return true - } else if block && token.token_type == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - - return true - } else if block && token.token_type == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } else if len(anchor) > 0 || len(tag) > 0 { - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } else { - msg := "while parsing a block node" - if !block { - msg = "while parsing a flow node" - } - yaml_parser_set_parser_error_context(parser, msg, start_mark, - "did not find expected node content", token.start_mark) - return false - } - } - } - - return false -} - -/* - * Parse the productions: - * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - * ******************** *********** * ********* - */ - -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_BLOCK_ENTRY_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.token_type == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", mark, - "did not find expected '-' indicator", token.start_mark) - } -} - -/* - * Parse the productions: - * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - * *********** * - */ - -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_BLOCK_ENTRY_TOKEN && - token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, - } - return true - } -} - -/* - * Parse the productions: - * block_mapping ::= BLOCK-MAPPING_START - * ******************* - * ((KEY block_node_or_indentless_sequence?)? - * *** * - * (VALUE block_node_or_indentless_sequence?)?)* - * - * BLOCK-END - * ********* - */ - -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.token_type == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", mark, - "did not find expected key", token.start_mark) - } -} - -/* - * Parse the productions: - * block_mapping ::= BLOCK-MAPPING_START - * - * ((KEY block_node_or_indentless_sequence?)? - * - * (VALUE block_node_or_indentless_sequence?)?)* - * ***** * - * BLOCK-END - * - */ - -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } -} - -/* - * Parse the productions: - * flow_sequence ::= FLOW-SEQUENCE-START - * ******************* - * (flow_sequence_entry FLOW-ENTRY)* - * * ********** - * flow_sequence_entry? - * * - * FLOW-SEQUENCE-END - * ***************** - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * - */ - -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.token_type == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.token_type == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - - skip_token(parser) - return true - } else if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * *** * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * ***** * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, - } - - return true -} - -/* - * Parse the productions: - * flow_mapping ::= FLOW-MAPPING-START - * ****************** - * (flow_mapping_entry FLOW-ENTRY)* - * * ********** - * flow_mapping_entry? - * ****************** - * FLOW-MAPPING-END - * **************** - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * *** * - */ - -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.token_type == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.token_type == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - } else if token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -/* - * Parse the productions: - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * ***** * - */ - -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, - event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - - if token.token_type == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -/* - * Generate an empty scalar event. - */ - -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, - mark YAML_mark_t) bool { - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - - return true -} - -/* - * Parse directives. - */ - -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - for token.token_type == yaml_VERSION_DIRECTIVE_TOKEN || - token.token_type == yaml_TAG_DIRECTIVE_TOKEN { - if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || - token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.token_type == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - - if !yaml_parser_append_tag_directive(parser, value, false, - token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - - return true -} - -/* - * Append a tag directive to the directives stack. - */ - -func yaml_parser_append_tag_directive(parser *yaml_parser_t, - value yaml_tag_directive_t, allow_duplicates bool, mark YAML_mark_t) bool { - for i := range parser.tag_directives { - tag := &parser.tag_directives[i] - if bytes.Equal(value.handle, tag.handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - parser.tag_directives = append(parser.tag_directives, value) - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go deleted file mode 100644 index 5631da2dc..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go +++ /dev/null @@ -1,465 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io" -) - -/* - * Set the reader error and return 0. - */ - -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, - offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - - return false -} - -/* - * Byte order marks. - */ -const ( - BOM_UTF8 = "\xef\xbb\xbf" - BOM_UTF16LE = "\xff\xfe" - BOM_UTF16BE = "\xfe\xff" -) - -/* - * Determine the input stream encoding by checking the BOM symbol. If no BOM is - * found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. - */ - -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - /* Ensure that we had enough bytes in the raw buffer. */ - for !parser.eof && - len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - /* Determine the encoding. */ - raw := parser.raw_buffer - pos := parser.raw_buffer_pos - remaining := len(raw) - pos - if remaining >= 2 && - raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if remaining >= 2 && - raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if remaining >= 3 && - raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - - return true -} - -/* - * Update the raw buffer. - */ - -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - /* Return if the raw buffer is full. */ - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - /* Return on EOF. */ - - if parser.eof { - return true - } - - /* Move the remaining bytes in the raw buffer to the beginning. */ - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - /* Call the read handler to fill the buffer. */ - size_read, err := parser.read_handler(parser, - parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), - parser.offset, -1) - } - - return true -} - -/* - * Ensure that the buffer contains at least `length` characters. - * Return 1 on success, 0 on failure. - * - * The length is supposed to be significantly less that the buffer size. - */ - -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - /* Read handler must be set. */ - if parser.read_handler == nil { - panic("read handler must be set") - } - - /* If the EOF flag is set and the raw buffer is empty, do nothing. */ - - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - /* Return if the buffer contains enough characters. */ - - if parser.unread >= length { - return true - } - - /* Determine the input encoding if it is not known yet. */ - - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - /* Move the unread characters to the beginning of the buffer. */ - buffer_end := len(parser.buffer) - if 0 < parser.buffer_pos && - parser.buffer_pos < buffer_end { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_end -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_end { - buffer_end = 0 - parser.buffer_pos = 0 - } - - parser.buffer = parser.buffer[:cap(parser.buffer)] - - /* Fill the buffer until it has enough characters. */ - first := true - for parser.unread < length { - /* Fill the raw buffer if necessary. */ - - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_end] - return false - } - } - first = false - - /* Decode the raw buffer. */ - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var w int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - incomplete := false - - /* Decode the next character. */ - - switch parser.encoding { - case yaml_UTF8_ENCODING: - - /* - * Decode a UTF-8 character. Check RFC 3629 - * (http://www.ietf.org/rfc/rfc3629.txt) for more details. - * - * The following table (taken from the RFC) is used for - * decoding. - * - * Char. number range | UTF-8 octet sequence - * (hexadecimal) | (binary) - * --------------------+------------------------------------ - * 0000 0000-0000 007F | 0xxxxxxx - * 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - * 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - * 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - * - * Additionally, the characters in the range 0xD800-0xDFFF - * are prohibited as they are reserved for use with UTF-16 - * surrogate pairs. - */ - - /* Determine the length of the UTF-8 sequence. */ - - octet := parser.raw_buffer[parser.raw_buffer_pos] - w = width(octet) - - /* Check if the leading octet is valid. */ - - if w == 0 { - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - /* Check if the raw buffer contains an incomplete character. */ - - if w > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - incomplete = true - break - } - - /* Decode the leading octet. */ - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - /* Check and decode the trailing octets. */ - - for k := 1; k < w; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - /* Check if the octet is valid. */ - - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - /* Decode the octet. */ - - value = (value << 6) + rune(octet&0x3F) - } - - /* Check the length of the sequence against the value. */ - switch { - case w == 1: - case w == 2 && value >= 0x80: - case w == 3 && value >= 0x800: - case w == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - /* Check the range of the value. */ - - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - case yaml_UTF16LE_ENCODING, - yaml_UTF16BE_ENCODING: - - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - /* - * The UTF-16 encoding is not as simple as one might - * naively think. Check RFC 2781 - * (http://www.ietf.org/rfc/rfc2781.txt). - * - * Normally, two subsequent bytes describe a Unicode - * character. However a special technique (called a - * surrogate pair) is used for specifying character - * values larger than 0xFFFF. - * - * A surrogate pair consists of two pseudo-characters: - * high surrogate area (0xD800-0xDBFF) - * low surrogate area (0xDC00-0xDFFF) - * - * The following formulas are used for decoding - * and encoding characters using surrogate pairs: - * - * U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - * U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - * W1 = 110110yyyyyyyyyy - * W2 = 110111xxxxxxxxxx - * - * where U is the character value, W1 is the high surrogate - * area, W2 is the low surrogate area. - */ - - /* Check for incomplete UTF-16 character. */ - - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - incomplete = true - break - } - - /* Get the character. */ - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - /* Check for unexpected low surrogate area. */ - - if (value & 0xFC00) == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - /* Check for a high surrogate area. */ - - if (value & 0xFC00) == 0xD800 { - - w = 4 - - /* Check for incomplete surrogate pair. */ - - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - incomplete = true - break - } - - /* Get the next character. */ - - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - /* Check for a low surrogate area. */ - - if (value2 & 0xFC00) != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - /* Generate the value of the surrogate pair. */ - - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - w = 2 - } - - break - - default: - panic("Impossible") /* Impossible. */ - } - - /* Check if the raw buffer contains enough bytes to form a character. */ - - if incomplete { - break - } - - /* - * Check if the character is in the allowed range: - * #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - * | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - * | [#x10000-#x10FFFF] (32 bit) - */ - - if !(value == 0x09 || value == 0x0A || value == 0x0D || - (value >= 0x20 && value <= 0x7E) || - (value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) || - (value >= 0xE000 && value <= 0xFFFD) || - (value >= 0x10000 && value <= 0x10FFFF)) { - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - /* Move the raw pointers. */ - - parser.raw_buffer_pos += w - parser.offset += w - - /* Finally put the character into the buffer. */ - - /* 0000 0000-0000 007F . 0xxxxxxx */ - if value <= 0x7F { - parser.buffer[buffer_end] = byte(value) - } else if value <= 0x7FF { - /* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - /* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F)) - } else { - /* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F)) - } - - buffer_end += w - parser.unread++ - } - - /* On EOF, put NUL into the buffer and return. */ - - if parser.eof { - parser.buffer[buffer_end] = 0 - buffer_end++ - parser.buffer = parser.buffer[:buffer_end] - parser.unread++ - return true - } - - } - - parser.buffer = parser.buffer[:buffer_end] - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go deleted file mode 100644 index fb9e8be89..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go +++ /dev/null @@ -1,449 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "encoding/base64" - "fmt" - "math" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -var byteSliceType = reflect.TypeOf([]byte(nil)) - -var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)} -var bool_values map[string]bool -var null_values map[string]bool - -var signs = []byte{'-', '+'} -var nulls = []byte{'~', 'n', 'N'} -var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'} - -var timestamp_regexp *regexp.Regexp -var ymd_regexp *regexp.Regexp - -func init() { - bool_values = make(map[string]bool) - bool_values["y"] = true - bool_values["yes"] = true - bool_values["n"] = false - bool_values["no"] = false - bool_values["true"] = true - bool_values["false"] = false - bool_values["on"] = true - bool_values["off"] = false - - null_values = make(map[string]bool) - null_values["~"] = true - null_values["null"] = true - null_values["Null"] = true - null_values["NULL"] = true - - timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$") - ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$") -} - -func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) { - val := string(event.value) - - if null_values[val] { - v.Set(reflect.Zero(v.Type())) - return yaml_NULL_TAG, nil - } - - switch v.Kind() { - case reflect.String: - if useNumber && v.Type() == numberType { - tag, i := resolveInterface(event, useNumber) - if n, ok := i.(Number); ok { - v.Set(reflect.ValueOf(n)) - return tag, nil - } - return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark) - } - - return resolve_string(val, v, event) - case reflect.Bool: - return resolve_bool(val, v, event) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return resolve_int(val, v, useNumber, event) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return resolve_uint(val, v, useNumber, event) - case reflect.Float32, reflect.Float64: - return resolve_float(val, v, useNumber, event) - case reflect.Interface: - _, i := resolveInterface(event, useNumber) - if i != nil { - v.Set(reflect.ValueOf(i)) - } else { - v.Set(reflect.Zero(v.Type())) - } - - case reflect.Struct: - return resolve_time(val, v, event) - case reflect.Slice: - if v.Type() != byteSliceType { - return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark) - } - b, err := decode_binary(event.value, event) - if err != nil { - return "", err - } - - v.Set(reflect.ValueOf(b)) - default: - return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark) - } - - return yaml_STR_TAG, nil -} - -func hasBinaryTag(event yaml_event_t) bool { - for _, tag := range binary_tags { - if bytes.Equal(event.tag, tag) { - return true - } - } - return false -} - -func decode_binary(value []byte, event yaml_event_t) ([]byte, error) { - b := make([]byte, base64.StdEncoding.DecodedLen(len(value))) - n, err := base64.StdEncoding.Decode(b, value) - if err != nil { - return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark) - } - return b[:n], nil -} - -func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) { - if len(event.tag) > 0 { - if hasBinaryTag(event) { - b, err := decode_binary(event.value, event) - if err != nil { - return "", err - } - val = string(b) - } - } - v.SetString(val) - return yaml_STR_TAG, nil -} - -func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) { - b, found := bool_values[strings.ToLower(val)] - if !found { - return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark) - } - - v.SetBool(b) - return yaml_BOOL_TAG, nil -} - -func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - original := val - val = strings.Replace(val, "_", "", -1) - var value uint64 - - isNumberValue := v.Type() == numberType - - sign := int64(1) - if val[0] == '-' { - sign = -1 - val = val[1:] - } else if val[0] == '+' { - val = val[1:] - } - - base := 0 - if val == "0" { - if isNumberValue { - v.SetString("0") - } else { - v.Set(reflect.Zero(v.Type())) - } - - return yaml_INT_TAG, nil - } - - if strings.HasPrefix(val, "0o") { - base = 8 - val = val[2:] - } - - value, err := strconv.ParseUint(val, base, 64) - if err != nil { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - - var val64 int64 - if value <= math.MaxInt64 { - val64 = int64(value) - if sign == -1 { - val64 = -val64 - } - } else if sign == -1 && value == uint64(math.MaxInt64)+1 { - val64 = math.MinInt64 - } else { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - - if isNumberValue { - v.SetString(strconv.FormatInt(val64, 10)) - } else { - if v.OverflowInt(val64) { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - v.SetInt(val64) - } - - return yaml_INT_TAG, nil -} - -func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - original := val - val = strings.Replace(val, "_", "", -1) - var value uint64 - - isNumberValue := v.Type() == numberType - - if val[0] == '-' { - return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark) - } - - if val[0] == '+' { - val = val[1:] - } - - base := 0 - if val == "0" { - if isNumberValue { - v.SetString("0") - } else { - v.Set(reflect.Zero(v.Type())) - } - - return yaml_INT_TAG, nil - } - - if strings.HasPrefix(val, "0o") { - base = 8 - val = val[2:] - } - - value, err := strconv.ParseUint(val, base, 64) - if err != nil { - return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) - } - - if isNumberValue { - v.SetString(strconv.FormatUint(value, 10)) - } else { - if v.OverflowUint(value) { - return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) - } - - v.SetUint(value) - } - - return yaml_INT_TAG, nil -} - -func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - val = strings.Replace(val, "_", "", -1) - var value float64 - - isNumberValue := v.Type() == numberType - typeBits := 64 - if !isNumberValue { - typeBits = v.Type().Bits() - } - - sign := 1 - if val[0] == '-' { - sign = -1 - val = val[1:] - } else if val[0] == '+' { - val = val[1:] - } - - valLower := strings.ToLower(val) - if valLower == ".inf" { - value = math.Inf(sign) - } else if valLower == ".nan" { - value = math.NaN() - } else { - var err error - value, err = strconv.ParseFloat(val, typeBits) - value *= float64(sign) - - if err != nil { - return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) - } - } - - if isNumberValue { - v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits)) - } else { - if v.OverflowFloat(value) { - return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) - } - - v.SetFloat(value) - } - - return yaml_FLOAT_TAG, nil -} - -func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) { - var parsedTime time.Time - matches := ymd_regexp.FindStringSubmatch(val) - if len(matches) > 0 { - year, _ := strconv.Atoi(matches[1]) - month, _ := strconv.Atoi(matches[2]) - day, _ := strconv.Atoi(matches[3]) - parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) - } else { - matches = timestamp_regexp.FindStringSubmatch(val) - if len(matches) == 0 { - return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark) - } - - year, _ := strconv.Atoi(matches[1]) - month, _ := strconv.Atoi(matches[2]) - day, _ := strconv.Atoi(matches[3]) - hour, _ := strconv.Atoi(matches[4]) - min, _ := strconv.Atoi(matches[5]) - sec, _ := strconv.Atoi(matches[6]) - - nsec := 0 - if matches[7] != "" { - millis, _ := strconv.Atoi(matches[7]) - nsec = int(time.Duration(millis) * time.Millisecond) - } - - loc := time.UTC - if matches[8] != "" { - sign := matches[8][0] - hr, _ := strconv.Atoi(matches[8][1:]) - min := 0 - if matches[9] != "" { - min, _ = strconv.Atoi(matches[9]) - } - - zoneOffset := (hr*60 + min) * 60 - if sign == '-' { - zoneOffset = -zoneOffset - } - - loc = time.FixedZone("", zoneOffset) - } - parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc) - } - - v.Set(reflect.ValueOf(parsedTime)) - return "", nil -} - -func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) { - val := string(event.value) - if len(event.tag) == 0 && !event.implicit { - return "", val - } - - if len(val) == 0 { - return yaml_NULL_TAG, nil - } - - var result interface{} - - sign := false - c := val[0] - switch { - case bytes.IndexByte(signs, c) != -1: - sign = true - fallthrough - case c >= '0' && c <= '9': - i := int64(0) - result = &i - if useNumber { - var n Number - result = &n - } - - v := reflect.ValueOf(result).Elem() - if _, err := resolve_int(val, v, useNumber, event); err == nil { - return yaml_INT_TAG, v.Interface() - } - - f := float64(0) - result = &f - if useNumber { - var n Number - result = &n - } - - v = reflect.ValueOf(result).Elem() - if _, err := resolve_float(val, v, useNumber, event); err == nil { - return yaml_FLOAT_TAG, v.Interface() - } - - if !sign { - t := time.Time{} - if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil { - return "", t - } - } - case bytes.IndexByte(nulls, c) != -1: - if null_values[val] { - return yaml_NULL_TAG, nil - } - b := false - if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { - return yaml_BOOL_TAG, b - } - case c == '.': - f := float64(0) - result = &f - if useNumber { - var n Number - result = &n - } - - v := reflect.ValueOf(result).Elem() - if _, err := resolve_float(val, v, useNumber, event); err == nil { - return yaml_FLOAT_TAG, v.Interface() - } - case bytes.IndexByte(bools, c) != -1: - b := false - if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { - return yaml_BOOL_TAG, b - } - } - - if hasBinaryTag(event) { - bytes, err := decode_binary(event.value, event) - if err == nil { - return yaml_BINARY_TAG, bytes - } - } - - return yaml_STR_TAG, val -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go deleted file mode 100644 index 25c29816e..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "fmt" - "os" -) - -func Run_parser(cmd string, args []string) { - for i := 0; i < len(args); i++ { - fmt.Printf("[%d] Scanning '%s'", i, args[i]) - file, err := os.Open(args[i]) - if err != nil { - panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error())) - } - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_reader(&parser, file) - - failed := false - token := yaml_token_t{} - count := 0 - for { - if !yaml_parser_scan(&parser, &token) { - failed = true - break - } - - if token.token_type == yaml_STREAM_END_TOKEN { - break - } - count++ - } - - file.Close() - - msg := "SUCCESS" - if failed { - msg = "FAILED" - if parser.error != yaml_NO_ERROR { - m := parser.problem_mark - fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n", - parser.context, parser.problem, m.line, m.column) - } - } - fmt.Printf("%s (%d tokens)\n", msg, count) - } -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go deleted file mode 100644 index 5c080a063..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go +++ /dev/null @@ -1,3318 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -/* - * Introduction - * ************ - * - * The following notes assume that you are familiar with the YAML specification - * (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in - * some cases we are less restrictive that it requires. - * - * The process of transforming a YAML stream into a sequence of events is - * divided on two steps: Scanning and Parsing. - * - * The Scanner transforms the input stream into a sequence of tokens, while the - * parser transform the sequence of tokens produced by the Scanner into a - * sequence of parsing events. - * - * The Scanner is rather clever and complicated. The Parser, on the contrary, - * is a straightforward implementation of a recursive-descendant parser (or, - * LL(1) parser, as it is usually called). - * - * Actually there are two issues of Scanning that might be called "clever", the - * rest is quite straightforward. The issues are "block collection start" and - * "simple keys". Both issues are explained below in details. - * - * Here the Scanning step is explained and implemented. We start with the list - * of all the tokens produced by the Scanner together with short descriptions. - * - * Now, tokens: - * - * STREAM-START(encoding) # The stream start. - * STREAM-END # The stream end. - * VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. - * TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. - * DOCUMENT-START # '---' - * DOCUMENT-END # '...' - * BLOCK-SEQUENCE-START # Indentation increase denoting a block - * BLOCK-MAPPING-START # sequence or a block mapping. - * BLOCK-END # Indentation decrease. - * FLOW-SEQUENCE-START # '[' - * FLOW-SEQUENCE-END # ']' - * BLOCK-SEQUENCE-START # '{' - * BLOCK-SEQUENCE-END # '}' - * BLOCK-ENTRY # '-' - * FLOW-ENTRY # ',' - * KEY # '?' or nothing (simple keys). - * VALUE # ':' - * ALIAS(anchor) # '*anchor' - * ANCHOR(anchor) # '&anchor' - * TAG(handle,suffix) # '!handle!suffix' - * SCALAR(value,style) # A scalar. - * - * The following two tokens are "virtual" tokens denoting the beginning and the - * end of the stream: - * - * STREAM-START(encoding) - * STREAM-END - * - * We pass the information about the input stream encoding with the - * STREAM-START token. - * - * The next two tokens are responsible for tags: - * - * VERSION-DIRECTIVE(major,minor) - * TAG-DIRECTIVE(handle,prefix) - * - * Example: - * - * %YAML 1.1 - * %TAG ! !foo - * %TAG !yaml! tag:yaml.org,2002: - * --- - * - * The correspoding sequence of tokens: - * - * STREAM-START(utf-8) - * VERSION-DIRECTIVE(1,1) - * TAG-DIRECTIVE("!","!foo") - * TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") - * DOCUMENT-START - * STREAM-END - * - * Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole - * line. - * - * The document start and end indicators are represented by: - * - * DOCUMENT-START - * DOCUMENT-END - * - * Note that if a YAML stream contains an implicit document (without '---' - * and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be - * produced. - * - * In the following examples, we present whole documents together with the - * produced tokens. - * - * 1. An implicit document: - * - * 'a scalar' - * - * Tokens: - * - * STREAM-START(utf-8) - * SCALAR("a scalar",single-quoted) - * STREAM-END - * - * 2. An explicit document: - * - * --- - * 'a scalar' - * ... - * - * Tokens: - * - * STREAM-START(utf-8) - * DOCUMENT-START - * SCALAR("a scalar",single-quoted) - * DOCUMENT-END - * STREAM-END - * - * 3. Several documents in a stream: - * - * 'a scalar' - * --- - * 'another scalar' - * --- - * 'yet another scalar' - * - * Tokens: - * - * STREAM-START(utf-8) - * SCALAR("a scalar",single-quoted) - * DOCUMENT-START - * SCALAR("another scalar",single-quoted) - * DOCUMENT-START - * SCALAR("yet another scalar",single-quoted) - * STREAM-END - * - * We have already introduced the SCALAR token above. The following tokens are - * used to describe aliases, anchors, tag, and scalars: - * - * ALIAS(anchor) - * ANCHOR(anchor) - * TAG(handle,suffix) - * SCALAR(value,style) - * - * The following series of examples illustrate the usage of these tokens: - * - * 1. A recursive sequence: - * - * &A [ *A ] - * - * Tokens: - * - * STREAM-START(utf-8) - * ANCHOR("A") - * FLOW-SEQUENCE-START - * ALIAS("A") - * FLOW-SEQUENCE-END - * STREAM-END - * - * 2. A tagged scalar: - * - * !!float "3.14" # A good approximation. - * - * Tokens: - * - * STREAM-START(utf-8) - * TAG("!!","float") - * SCALAR("3.14",double-quoted) - * STREAM-END - * - * 3. Various scalar styles: - * - * --- # Implicit empty plain scalars do not produce tokens. - * --- a plain scalar - * --- 'a single-quoted scalar' - * --- "a double-quoted scalar" - * --- |- - * a literal scalar - * --- >- - * a folded - * scalar - * - * Tokens: - * - * STREAM-START(utf-8) - * DOCUMENT-START - * DOCUMENT-START - * SCALAR("a plain scalar",plain) - * DOCUMENT-START - * SCALAR("a single-quoted scalar",single-quoted) - * DOCUMENT-START - * SCALAR("a double-quoted scalar",double-quoted) - * DOCUMENT-START - * SCALAR("a literal scalar",literal) - * DOCUMENT-START - * SCALAR("a folded scalar",folded) - * STREAM-END - * - * Now it's time to review collection-related tokens. We will start with - * flow collections: - * - * FLOW-SEQUENCE-START - * FLOW-SEQUENCE-END - * FLOW-MAPPING-START - * FLOW-MAPPING-END - * FLOW-ENTRY - * KEY - * VALUE - * - * The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and - * FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' - * correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the - * indicators '?' and ':', which are used for denoting mapping keys and values, - * are represented by the KEY and VALUE tokens. - * - * The following examples show flow collections: - * - * 1. A flow sequence: - * - * [item 1, item 2, item 3] - * - * Tokens: - * - * STREAM-START(utf-8) - * FLOW-SEQUENCE-START - * SCALAR("item 1",plain) - * FLOW-ENTRY - * SCALAR("item 2",plain) - * FLOW-ENTRY - * SCALAR("item 3",plain) - * FLOW-SEQUENCE-END - * STREAM-END - * - * 2. A flow mapping: - * - * { - * a simple key: a value, # Note that the KEY token is produced. - * ? a complex key: another value, - * } - * - * Tokens: - * - * STREAM-START(utf-8) - * FLOW-MAPPING-START - * KEY - * SCALAR("a simple key",plain) - * VALUE - * SCALAR("a value",plain) - * FLOW-ENTRY - * KEY - * SCALAR("a complex key",plain) - * VALUE - * SCALAR("another value",plain) - * FLOW-ENTRY - * FLOW-MAPPING-END - * STREAM-END - * - * A simple key is a key which is not denoted by the '?' indicator. Note that - * the Scanner still produce the KEY token whenever it encounters a simple key. - * - * For scanning block collections, the following tokens are used (note that we - * repeat KEY and VALUE here): - * - * BLOCK-SEQUENCE-START - * BLOCK-MAPPING-START - * BLOCK-END - * BLOCK-ENTRY - * KEY - * VALUE - * - * The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation - * increase that precedes a block collection (cf. the INDENT token in Python). - * The token BLOCK-END denote indentation decrease that ends a block collection - * (cf. the DEDENT token in Python). However YAML has some syntax pecularities - * that makes detections of these tokens more complex. - * - * The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators - * '-', '?', and ':' correspondingly. - * - * The following examples show how the tokens BLOCK-SEQUENCE-START, - * BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: - * - * 1. Block sequences: - * - * - item 1 - * - item 2 - * - - * - item 3.1 - * - item 3.2 - * - - * key 1: value 1 - * key 2: value 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-ENTRY - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 3.1",plain) - * BLOCK-ENTRY - * SCALAR("item 3.2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * 2. Block mappings: - * - * a simple key: a value # The KEY token is produced here. - * ? a complex key - * : another value - * a mapping: - * key 1: value 1 - * key 2: value 2 - * a sequence: - * - item 1 - * - item 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("a simple key",plain) - * VALUE - * SCALAR("a value",plain) - * KEY - * SCALAR("a complex key",plain) - * VALUE - * SCALAR("another value",plain) - * KEY - * SCALAR("a mapping",plain) - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * KEY - * SCALAR("a sequence",plain) - * VALUE - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * YAML does not always require to start a new block collection from a new - * line. If the current line contains only '-', '?', and ':' indicators, a new - * block collection may start at the current line. The following examples - * illustrate this case: - * - * 1. Collections in a sequence: - * - * - - item 1 - * - item 2 - * - key 1: value 1 - * key 2: value 2 - * - ? complex key - * : complex value - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("complex key") - * VALUE - * SCALAR("complex value") - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * 2. Collections in a mapping: - * - * ? a sequence - * : - item 1 - * - item 2 - * ? a mapping - * : key 1: value 1 - * key 2: value 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("a sequence",plain) - * VALUE - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * KEY - * SCALAR("a mapping",plain) - * VALUE - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * YAML also permits non-indented sequences if they are included into a block - * mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: - * - * key: - * - item 1 # BLOCK-SEQUENCE-START is NOT produced here. - * - item 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("key",plain) - * VALUE - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - */ - -/* - * Ensure that the buffer contains the required number of characters. - * Return 1 on success, 0 on failure (reader error or memory error). - */ -func cache(parser *yaml_parser_t, length int) bool { - if parser.unread >= length { - return true - } - - return yaml_parser_update_buffer(parser, length) -} - -/* - * Advance the buffer pointer. - */ -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf_at(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break_at(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -/* - * Copy a character to a string buffer and advance pointers. - */ - -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -/* - * Copy a line break character to a string buffer and advance pointers. - */ -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - if buf[pos] == '\r' && buf[pos+1] == '\n' { - /* CR LF . LF */ - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - } else if buf[pos] == '\r' || buf[pos] == '\n' { - /* CR|LF . LF */ - s = append(s, '\n') - parser.buffer_pos += 1 - } else if buf[pos] == '\xC2' && buf[pos+1] == '\x85' { - /* NEL . LF */ - s = append(s, '\n') - parser.buffer_pos += 2 - } else if buf[pos] == '\xE2' && buf[pos+1] == '\x80' && - (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9') { - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - } else { - return s - } - - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -/* - * Get the next token. - */ - -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - /* Erase the token object. */ - *token = yaml_token_t{} - - /* No tokens after STREAM-END or error. */ - - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - /* Ensure that the tokens queue contains enough tokens. */ - - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - /* Fetch the next token from the queue. */ - - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.token_available = false - parser.tokens_parsed++ - - if token.token_type == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - - return true -} - -/* - * Set the scanner error and return 0. - */ - -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, - context_mark YAML_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark YAML_mark_t, problem string) bool { - context := "while parsing a %TAG directive" - if directive { - context = "while parsing a tag" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -/* - * Ensure that the tokens queue contains at least one token which can be - * returned to the Parser. - */ - -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - /* While we need more tokens to fetch, do it. */ - - for { - /* - * Check if we really need to fetch more tokens. - */ - - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - /* Queue is empty. */ - - need_more_tokens = true - } else { - - /* Check if any potential simple key may occupy the head position. */ - - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - if simple_key.possible && - simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - if len(parser.simple_keys) > 0 { - - } - /* We are finished. */ - - if !need_more_tokens { - break - } - - /* Fetch the next token. */ - - if !yaml_parser_fetch_next_token(parser) { - return false - } - - } - - parser.token_available = true - - return true -} - -/* - * The dispatcher for token fetchers. - */ - -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - /* Ensure that the buffer is initialized. */ - - if !cache(parser, 1) { - return false - } - - /* Check if we just started scanning. Fetch STREAM-START then. */ - - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - /* Eat whitespaces and comments until we reach the next token. */ - - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - /* Remove obsolete potential simple keys. */ - - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - /* Check the indentation level against the current column. */ - - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - /* - * Ensure that the buffer contains at least 4 characters. 4 is the length - * of the longest indicators ('--- ' and '... '). - */ - - if !cache(parser, 4) { - return false - } - - /* Is it the end of the stream? */ - buf := parser.buffer - pos := parser.buffer_pos - - if is_z(buf[pos]) { - return yaml_parser_fetch_stream_end(parser) - } - - /* Is it a directive? */ - - if parser.mark.column == 0 && buf[pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - /* Is it the document start indicator? */ - - if parser.mark.column == 0 && - buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && - is_blankz_at(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, - yaml_DOCUMENT_START_TOKEN) - } - - /* Is it the document end indicator? */ - - if parser.mark.column == 0 && - buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && - is_blankz_at(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, - yaml_DOCUMENT_END_TOKEN) - } - - /* Is it the flow sequence start indicator? */ - - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, - yaml_FLOW_SEQUENCE_START_TOKEN) - } - - /* Is it the flow mapping start indicator? */ - - if buf[pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, - yaml_FLOW_MAPPING_START_TOKEN) - } - - /* Is it the flow sequence end indicator? */ - - if buf[pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - /* Is it the flow mapping end indicator? */ - - if buf[pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - /* Is it the flow entry indicator? */ - - if buf[pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - /* Is it the block entry indicator? */ - if buf[pos] == '-' && is_blankz_at(buf, pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - /* Is it the key indicator? */ - if buf[pos] == '?' && - (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) { - return yaml_parser_fetch_key(parser) - } - - /* Is it the value indicator? */ - if buf[pos] == ':' && - (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) { - return yaml_parser_fetch_value(parser) - } - - /* Is it an alias? */ - if buf[pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - /* Is it an anchor? */ - - if buf[pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - /* Is it a tag? */ - - if buf[pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - /* Is it a literal scalar? */ - if buf[pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - /* Is it a folded scalar? */ - if buf[pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - /* Is it a single-quoted scalar? */ - - if buf[pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - /* Is it a double-quoted scalar? */ - if buf[pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - /* - * Is it a plain scalar? - * - * A plain scalar may start with any non-blank characters except - * - * '-', '?', ':', ',', '[', ']', '{', '}', - * '#', '&', '*', '!', '|', '>', '\'', '\"', - * '%', '@', '`'. - * - * In the block context (and, for the '-' indicator, in the flow context - * too), it may also start with the characters - * - * '-', '?', ':' - * - * if it is followed by a non-space character. - * - * The last rule is more restrictive than the specification requires. - */ - - b := buf[pos] - if !(is_blankz_at(buf, pos) || b == '-' || - b == '?' || b == ':' || - b == ',' || b == '[' || - b == ']' || b == '{' || - b == '}' || b == '#' || - b == '&' || b == '*' || - b == '!' || b == '|' || - b == '>' || b == '\'' || - b == '"' || b == '%' || - b == '@' || b == '`') || - (b == '-' && !is_blank(buf[pos+1])) || - (parser.flow_level == 0 && - (buf[pos] == '?' || buf[pos] == ':') && - !is_blank(buf[pos+1])) { - return yaml_parser_fetch_plain_scalar(parser) - } - - /* - * If we don't determine the token type so far, it is an error. - */ - - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -/* - * Check the list of potential simple keys and remove the positions that - * cannot contain simple keys anymore. - */ - -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - /* Check for a potential simple key for each flow level. */ - - for i := range parser.simple_keys { - /* - * The specification requires that a simple key - * - * - is limited to a single line, - * - is shorter than 1024 characters. - */ - - simple_key := &parser.simple_keys[i] - if simple_key.possible && - (simple_key.mark.line < parser.mark.line || - simple_key.mark.index+1024 < parser.mark.index) { - - /* Check if the potential simple key to be removed is required. */ - - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - - simple_key.possible = false - } - } - - return true -} - -/* - * Check if a simple key may start at the current position and add it if - * needed. - */ - -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - /* - * A simple key is required at the current position if the scanner is in - * the block context and the current column coincides with the indentation - * level. - */ - - required := (parser.flow_level == 0 && - parser.indent == parser.mark.column) - - /* - * A simple key is required only when it is the first token in the current - * line. Therefore it is always allowed. But we add a check anyway. - */ - if required && !parser.simple_key_allowed { - panic("impossible") /* Impossible. */ - } - - /* - * If the current position may start a simple key, save it. - */ - - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - - return true -} - -/* - * Remove a potential simple key at the current flow level. - */ - -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - if simple_key.possible { - /* If the key is required, it is an error. */ - - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - } - - /* Remove the key from the stack. */ - - simple_key.possible = false - - return true -} - -/* - * Increase the flow level and resize the simple key list if needed. - */ - -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - /* Reset the simple key on the next level. */ - - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - /* Increase the flow level. */ - - parser.flow_level++ - - return true -} - -/* - * Decrease the flow level. - */ - -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - - return true -} - -/* - * Push the current indentation level to the stack and set the new level - * the current column is greater than the indentation level. In this case, - * append or insert the specified token into the token queue. - * - */ - -func yaml_parser_roll_indent(parser *yaml_parser_t, column int, - number int, token_type yaml_token_type_t, mark YAML_mark_t) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - if parser.indent == -1 || parser.indent < column { - /* - * Push the current indentation level to the stack and set the new - * indentation level. - */ - - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - /* Create a token and insert it into the queue. */ - token := yaml_token_t{ - token_type: token_type, - start_mark: mark, - end_mark: mark, - } - - // number == -1 -> enqueue otherwise insert - if number > -1 { - number -= parser.tokens_parsed - } - insert_token(parser, number, &token) - } - - return true -} - -/* - * Pop indentation levels from the indents stack until the current level - * becomes less or equal to the column. For each indentation level, append - * the BLOCK-END token. - */ - -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - /* - * column is unsigned and parser->indent is signed, so if - * parser->indent is less than zero the conditional in the while - * loop below is incorrect. Guard against that. - */ - - if parser.indent < 0 { - return true - } - - /* Loop through the indentation levels in the stack. */ - - for parser.indent > column { - /* Create a token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - insert_token(parser, -1, &token) - - /* Pop the indentation level. */ - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - - } - - return true -} - -/* - * Pop indentation levels from the indents stack until the current - * level resets to -1. For each indentation level, append the - * BLOCK-END token. - */ - -func yaml_parser_reset_indent(parser *yaml_parser_t) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - /* Loop through the indentation levels in the stack. */ - - for parser.indent > -1 { - /* Create a token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - insert_token(parser, -1, &token) - - /* Pop the indentation level. */ - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - - return true -} - -/* - * Initialize the scanner and produce the STREAM-START token. - */ - -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - /* Set the initial indentation. */ - - parser.indent = -1 - - /* Initialize the simple key stack. */ - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - /* A simple key is allowed at the beginning of the stream. */ - - parser.simple_key_allowed = true - - /* We have started. */ - - parser.stream_start_produced = true - - /* Create the STREAM-START token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the STREAM-END token and shut down the scanner. - */ - -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - /* Force new line. */ - - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Create the STREAM-END token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. - */ - -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. */ - var token yaml_token_t - if !yaml_parser_scan_directive(parser, &token) { - return false - } - - /* Append the token to the queue. */ - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the DOCUMENT-START or DOCUMENT-END token. - */ - -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Consume the token. */ - - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - /* Create the DOCUMENT-START or DOCUMENT-END token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. - */ - -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* The indicators '[' and '{' may start a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* Increase the flow level. */ - - if !yaml_parser_increase_flow_level(parser) { - return false - } - - /* A simple key may follow the indicators '[' and '{'. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. - */ - -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* Reset any potential simple key on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Decrease the flow level. */ - - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - /* No simple keys after the indicators ']' and '}'. */ - - parser.simple_key_allowed = false - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-ENTRY token. - */ - -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after ','. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-ENTRY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the BLOCK-ENTRY token. - */ - -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - - /* Check if the scanner is in the block context. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a new entry. */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - - /* Add the BLOCK-SEQUENCE-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - /* - * It is an error for the '-' indicator to occur in the flow context, - * but we let the Parser detect and report about it because the Parser - * is able to point to the context. - */ - } - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after '-'. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the BLOCK-ENTRY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the KEY token. - */ - -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - /* In the block context, additional checks are required. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a new key (not nessesary simple). */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - - /* Add the BLOCK-MAPPING-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after '?' in the block context. */ - - parser.simple_key_allowed = (parser.flow_level == 0) - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the KEY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the VALUE token. - */ - -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - /* Have we found a simple key? */ - - if simple_key.possible { - - /* Create the KEY token and insert it into the queue. */ - - token := yaml_token_t{ - token_type: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - - insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - /* In the block context, we may need to add the BLOCK-MAPPING-START token. */ - - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - /* Remove the simple key. */ - - simple_key.possible = false - - /* A simple key cannot follow another simple key. */ - - parser.simple_key_allowed = false - } else { - /* The ':' indicator follows a complex key. */ - - /* In the block context, extra checks are required. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a complex value. */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - /* Add the BLOCK-MAPPING-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - /* Simple keys after ':' are allowed in the block context. */ - - parser.simple_key_allowed = (parser.flow_level == 0) - } - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the VALUE token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the ALIAS or ANCHOR token. - */ - -func yaml_parser_fetch_anchor(parser *yaml_parser_t, token_type yaml_token_type_t) bool { - - /* An anchor or an alias could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow an anchor or an alias. */ - - parser.simple_key_allowed = false - - /* Create the ALIAS or ANCHOR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, token_type) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the TAG token. - */ - -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - /* A tag could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a tag. */ - - parser.simple_key_allowed = false - - /* Create the TAG token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. - */ - -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - /* Remove any potential simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* A simple key may follow a block scalar. */ - - parser.simple_key_allowed = true - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. - */ - -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - - /* A plain scalar could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a flow scalar. */ - - parser.simple_key_allowed = false - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,plain) token. - */ - -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - /* A plain scalar could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a flow scalar. */ - - parser.simple_key_allowed = false - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Eat whitespaces and comments until the next token is found. - */ - -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - /* Until the next token is not found. */ - - for { - /* Allow the BOM mark to start a line. */ - - if !cache(parser, 1) { - return false - } - - if parser.mark.column == 0 && is_bom_at(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - /* - * Eat whitespaces. - * - * Tabs are allowed: - * - * - in the flow context; - * - in the block context, but not at the beginning of the line or - * after '-', '?', or ':' (complex value). - */ - - if !cache(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || - ((parser.flow_level > 0 || !parser.simple_key_allowed) && - parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Eat a comment until a line break. */ - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* If it is a line break, eat it. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - skip_line(parser) - - /* In the block context, a new line may start a simple key. */ - - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - /* We have found a token. */ - - break - } - } - - return true -} - -/* - * Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - */ - -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - /* Eat '%'. */ - - start_mark := parser.mark - - skip(parser) - - /* Scan the directive name. */ - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - /* Is it a YAML directive? */ - var major, minor int - if bytes.Equal(name, []byte("YAML")) { - /* Scan the VERSION directive value. */ - - if !yaml_parser_scan_version_directive_value(parser, start_mark, - &major, &minor) { - return false - } - - end_mark := parser.mark - - /* Create a VERSION-DIRECTIVE token. */ - - *token = yaml_token_t{ - token_type: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - } else if bytes.Equal(name, []byte("TAG")) { - /* Is it a TAG directive? */ - /* Scan the TAG directive value. */ - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, - &handle, &prefix) { - return false - } - - end_mark := parser.mark - - /* Create a TAG-DIRECTIVE token. */ - - *token = yaml_token_t{ - token_type: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - } else { - /* Unknown directive. */ - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - /* Eat the rest of the line including any comments. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* Check if we are at the end of the line. */ - - if !is_breakz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - /* Eat a line break. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -/* - * Scan the directive name. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^ - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^ - */ - -func yaml_parser_scan_directive_name(parser *yaml_parser_t, - start_mark YAML_mark_t, name *[]byte) bool { - - /* Consume the directive name. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Check if the name is empty. */ - - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - /* Check for an blank character after the name. */ - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - - *name = s - - return true -} - -/* - * Scan the value of VERSION-DIRECTIVE. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^^^ - */ - -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, - start_mark YAML_mark_t, major *int, minor *int) bool { - /* Eat whitespaces. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Consume the major version number. */ - - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - /* Eat '.'. */ - - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - /* Consume the minor version number. */ - - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - - return true -} - -const MAX_NUMBER_LENGTH = 9 - -/* - * Scan the version number of VERSION-DIRECTIVE. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^ - * %YAML 1.1 # a comment \n - * ^ - */ - -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, - start_mark YAML_mark_t, number *int) bool { - - /* Repeat while the next character is digit. */ - - if !cache(parser, 1) { - return false - } - - value := 0 - length := 0 - for is_digit(parser.buffer[parser.buffer_pos]) { - /* Check if the number is too long. */ - - length++ - if length > MAX_NUMBER_LENGTH { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - - value = value*10 + as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - - if !cache(parser, 1) { - return false - } - } - - /* Check if the number was present. */ - - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - - *number = value - - return true -} - -/* - * Scan the value of a TAG-DIRECTIVE token. - * - * Scope: - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - */ - -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, - start_mark YAML_mark_t, handle, prefix *[]byte) bool { - - /* Eat whitespaces. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Scan a handle. */ - var handle_value []byte - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - /* Expect a whitespace. */ - - if !cache(parser, 1) { - return false - } - - if !is_blank(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - /* Eat whitespaces. */ - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Scan a prefix. */ - var prefix_value []byte - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - /* Expect a whitespace or line break. */ - - if !cache(parser, 1) { - return false - } - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, - token_type yaml_token_type_t) bool { - - /* Eat the indicator character. */ - - start_mark := parser.mark - - skip(parser) - - /* Consume the value. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - b := parser.buffer[parser.buffer_pos] - if len(s) == 0 || !(is_blankz_at(parser.buffer, parser.buffer_pos) || b == '?' || - b == ':' || b == ',' || - b == ']' || b == '}' || - b == '%' || b == '@' || - b == '`') { - context := "while scanning an anchor" - if token_type != yaml_ANCHOR_TOKEN { - context = "while scanning an alias" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - /* Create a token. */ - *token = yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - start_mark := parser.mark - - /* Check if the tag is in the canonical form. */ - - if !cache(parser, 2) { - return false - } - - var handle []byte - var suffix []byte - if parser.buffer[parser.buffer_pos+1] == '<' { - /* Set the handle to '' */ - - /* Eat '!<' */ - - skip(parser) - skip(parser) - - /* Consume the tag value. */ - - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - /* Check for '>' and eat it. */ - - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else if is_blank(parser.buffer[parser.buffer_pos+1]) { - // NON-SPECIFIED - skip(parser) - } else { - /* The tag has either the '!suffix' or the '!handle!suffix' form. */ - - /* First, try to scan a handle. */ - - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - /* Check if it is, indeed, handle. */ - - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - /* Scan the suffix now. */ - - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - /* It wasn't a handle after all. Scan the rest of the tag. */ - - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - /* Set the handle to '!'. */ - - handle = []byte{'!'} - - /* - * A special case: the '!' tag. Set the handle to '' and the - * suffix to '!'. - */ - - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - - } - } - - /* Check the character which ends the tag. */ - - if !cache(parser, 1) { - return false - } - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - - return true -} - -/* - * Scan a tag handle. - */ - -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, - start_mark YAML_mark_t, handle *[]byte) bool { - - /* Check the initial '!' character. */ - - if !cache(parser, 1) { - return false - } - - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - /* Copy the '!' character. */ - var s []byte - s = read(parser, s) - - /* Copy all subsequent alphabetical and numerical characters. */ - - if !cache(parser, 1) { - return false - } - - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Check if the trailing character is '!' and copy it. */ - - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - /* - * It's either the '!' tag or not really a tag handle. If it's a %TAG - * directive, it's an error. If it's a tag token, it must be a part of - * URI. - */ - - if directive && !(s[0] == '!' && len(s) == 1) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - - return true -} - -/* - * Scan a tag. - */ - -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, - head []byte, start_mark YAML_mark_t, uri *[]byte) bool { - - var s []byte - /* - * Copy the head if needed. - * - * Note that we don't copy the leading '!' character. - */ - if len(head) > 1 { - s = append(s, head[1:]...) - } - - /* Scan the tag. */ - if !cache(parser, 1) { - return false - } - - /* - * The set of characters that may appear in URI is as follows: - * - * '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - * '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - * '%'. - */ - - b := parser.buffer[parser.buffer_pos] - for is_alpha(b) || b == ';' || - b == '/' || b == '?' || - b == ':' || b == '@' || - b == '&' || b == '=' || - b == '+' || b == '$' || - b == ',' || b == '.' || - b == '!' || b == '~' || - b == '*' || b == '\'' || - b == '(' || b == ')' || - b == '[' || b == ']' || - b == '%' { - /* Check if it is a URI-escape sequence. */ - - if b == '%' { - if !yaml_parser_scan_uri_escapes(parser, - directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - - if !cache(parser, 1) { - return false - } - b = parser.buffer[parser.buffer_pos] - } - - /* Check if the tag is non-empty. */ - - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - - *uri = s - - return true -} - -/* - * Decode an URI-escape sequence corresponding to a single UTF-8 character. - */ - -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, - start_mark YAML_mark_t, s *[]byte) bool { - - /* Decode the required number of characters. */ - w := 10 - for w > 0 { - - /* Check for a URI-escaped octet. */ - - if !cache(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer[parser.buffer_pos+1]) && - is_hex(parser.buffer[parser.buffer_pos+2])) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - /* Get the octet. */ - octet := byte((as_hex(parser.buffer[parser.buffer_pos+1]) << 4) + - as_hex(parser.buffer[parser.buffer_pos+2])) - - /* If it is the leading octet, determine the length of the UTF-8 sequence. */ - - if w == 10 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - /* Check if the trailing octet is correct. */ - - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - /* Copy the octet and move the pointers. */ - - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - - return true -} - -/* - * Scan a block scalar. - */ - -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, - literal bool) bool { - - /* Eat the indicator '|' or '>'. */ - - start_mark := parser.mark - - skip(parser) - - /* Scan the additional block scalar indicators. */ - - if !cache(parser, 1) { - return false - } - - /* Check for a chomping indicator. */ - chomping := 0 - increment := 0 - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - /* Set the chomping method and eat the indicator. */ - - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - - skip(parser) - - /* Check for an indentation indicator. */ - - if !cache(parser, 1) { - return false - } - - if is_digit(parser.buffer[parser.buffer_pos]) { - /* Check that the indentation is greater than 0. */ - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - /* Get the indentation level and eat the indicator. */ - - increment = as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - } - } else if is_digit(parser.buffer[parser.buffer_pos]) { - - /* Do the same as above, but in the opposite order. */ - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - increment = as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - - if !cache(parser, 1) { - return false - } - - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - - skip(parser) - } - } - - /* Eat whitespaces and comments to the end of the line. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* Check if we are at the end of the line. */ - - if !is_breakz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - /* Eat a line break. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - - skip_line(parser) - } - - end_mark := parser.mark - - /* Set the indentation level if it was specified. */ - indent := 0 - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - /* Scan the leading line breaks and determine the indentation level if needed. */ - var trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, - start_mark, &end_mark) { - return false - } - - /* Scan the block scalar content. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - var leading_break []byte - leading_blank := false - trailing_blank := false - for parser.mark.column == indent && !is_z(parser.buffer[parser.buffer_pos]) { - - /* - * We are at the beginning of a non-empty line. - */ - - /* Is it a trailing whitespace? */ - - trailing_blank = is_blank(parser.buffer[parser.buffer_pos]) - - /* Check if we need to fold the leading line break. */ - - if !literal && len(leading_break) > 0 && leading_break[0] == '\n' && - !leading_blank && !trailing_blank { - /* Do we need to join the lines by space? */ - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - leading_break = leading_break[:0] - } - - /* Append the remaining line breaks. */ - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - /* Is it a leading whitespace? */ - - leading_blank = is_blank(parser.buffer[parser.buffer_pos]) - - /* Consume the current line. */ - - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Consume the line break. */ - - if !cache(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - /* Eat the following indentation spaces and line breaks. */ - - if !yaml_parser_scan_block_scalar_breaks(parser, - &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - /* Chomp the tail. */ - - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - - return true -} - -/* - * Scan indentation spaces and line breaks for a block scalar. Determine the - * indentation level if needed. - */ - -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, - indent *int, breaks *[]byte, - start_mark YAML_mark_t, end_mark *YAML_mark_t) bool { - - *end_mark = parser.mark - - /* Eat the indentation spaces and line breaks. */ - max_indent := 0 - for { - /* Eat the indentation spaces. */ - - if !cache(parser, 1) { - return false - } - - for (*indent == 0 || parser.mark.column < *indent) && - is_space(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - /* Check for a tab character messing the indentation. */ - - if (*indent == 0 || parser.mark.column < *indent) && - is_tab(parser.buffer[parser.buffer_pos]) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - /* Have we found a non-empty line? */ - - if !is_break_at(parser.buffer, parser.buffer_pos) { - break - } - - /* Consume the line break. */ - - if !cache(parser, 2) { - return false - } - - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - /* Determine the indentation level if needed. */ - - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - - return true -} - -/* - * Scan a quoted scalar. - */ - -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, - single bool) bool { - - /* Eat the left quote. */ - - start_mark := parser.mark - - skip(parser) - - /* Consume the content of the quoted scalar. */ - var s []byte - var leading_break []byte - var trailing_breaks []byte - var whitespaces []byte - for { - /* Check that there are no document indicators at the beginning of the line. */ - - if !cache(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz_at(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - /* Check for EOF. */ - - if is_z(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - /* Consume non-blank characters. */ - - if !cache(parser, 2) { - return false - } - - leading_blanks := false - - for !is_blankz_at(parser.buffer, parser.buffer_pos) { - /* Check for an escaped single quote. */ - - if single && parser.buffer[parser.buffer_pos] == '\'' && - parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - /* Check for the right quote. */ - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - /* Check for the right quote. */ - break - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && - is_break_at(parser.buffer, parser.buffer_pos+1) { - - /* Check for an escaped line break. */ - if !cache(parser, 3) { - return false - } - - skip(parser) - skip_line(parser) - leading_blanks = true - break - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - - /* Check for an escape sequence. */ - - code_length := 0 - - /* Check the escape character. */ - - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '/': - s = append(s, '/') - case '\\': - s = append(s, '\\') - case 'N': /* NEL (#x85) */ - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': /* #xA0 */ - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': /* LS (#x2028) */ - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': /* PS (#x2029) */ - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - /* Consume an arbitrary escape code. */ - - if code_length > 0 { - value := 0 - - /* Scan the character value. */ - - if !cache(parser, code_length) { - return false - } - - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer[parser.buffer_pos+k]) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer[parser.buffer_pos+k]) - } - - /* Check the value and write the character. */ - - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - /* Advance the pointer. */ - - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - /* It is a non-escaped non-blank character. */ - - s = read(parser, s) - } - - if !cache(parser, 2) { - return false - } - } - - /* Check if we are at the end of the scalar. */ - b := parser.buffer[parser.buffer_pos] - if single { - if b == '\'' { - break - } - } else if b == '"' { - break - } - - /* Consume blank characters. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) || is_break_at(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer[parser.buffer_pos]) { - /* Consume a space or a tab character. */ - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if !cache(parser, 2) { - return false - } - - /* Check if it is a first line break. */ - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - - if !cache(parser, 1) { - return false - } - } - - /* Join the whitespaces or fold line breaks. */ - - if leading_blanks { - /* Do we need to fold line breaks? */ - - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - } - - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - leading_break = leading_break[:0] - trailing_breaks = trailing_breaks[:0] - } - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - /* Eat the right quote. */ - - skip(parser) - - end_mark := parser.mark - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - return true -} - -/* - * Scan a plain scalar. - */ - -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - var s []byte - var leading_break []byte - var trailing_breaks []byte - var whitespaces []byte - leading_blanks := false - indent := parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - /* Consume the content of the plain scalar. */ - - for { - /* Check for a document indicator. */ - - if !cache(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz_at(parser.buffer, parser.buffer_pos+3) { - break - } - - /* Check for a comment. */ - - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - /* Consume non-blank characters. */ - - for !is_blankz_at(parser.buffer, parser.buffer_pos) { - /* Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". */ - - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz_at(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - /* Check for indicators that may end a plain scalar. */ - b := parser.buffer[parser.buffer_pos] - if (b == ':' && is_blankz_at(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (b == ',' || b == ':' || - b == '?' || b == '[' || - b == ']' || b == '{' || - b == '}')) { - break - } - - /* Check if we need to join whitespaces and breaks. */ - - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - /* Do we need to fold line breaks? */ - - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - } - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - leading_break = leading_break[:0] - trailing_breaks = trailing_breaks[:0] - } - - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - /* Copy the character. */ - - s = read(parser, s) - end_mark = parser.mark - - if !cache(parser, 2) { - return false - } - } - - /* Is it the end? */ - - if !(is_blank(parser.buffer[parser.buffer_pos]) || - is_break_at(parser.buffer, parser.buffer_pos)) { - break - } - - /* Consume blank characters. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) || - is_break_at(parser.buffer, parser.buffer_pos) { - - if is_blank(parser.buffer[parser.buffer_pos]) { - /* Check for tab character that abuse indentation. */ - - if leading_blanks && parser.mark.column < indent && - is_tab(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") - return false - } - - /* Consume a space or a tab character. */ - - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if !cache(parser, 2) { - return false - } - - /* Check if it is a first line break. */ - - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if !cache(parser, 1) { - return false - } - } - - /* Check indentation level. */ - - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - /* Note that we change the 'simple_key_allowed' flag. */ - - if leading_blanks { - parser.simple_key_allowed = true - } - - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go deleted file mode 100644 index f153aee46..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "reflect" - "sort" - "strings" - "sync" - "unicode" -) - -// A field represents a single field found in a struct. -type field struct { - name string - tag bool - index []int - typ reflect.Type - omitEmpty bool - flow bool -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("yaml") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft, - opts.Contains("omitempty"), opts.Contains("flow")}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, field{name: ft.Name(), index: index, typ: ft}) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for _, i := range index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - } - v = v.Field(i) - } - return v -} - -func typeByIndex(t reflect.Type, index []int) reflect.Type { - for _, i := range index { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - t = t.Field(i).Type - } - return t -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { - av, ak := getElem(sv[i]) - bv, bk := getElem(sv[j]) - if ak == reflect.String && bk == reflect.String { - return av.String() < bv.String() - } - - return ak < bk -} - -func getElem(v reflect.Value) (reflect.Value, reflect.Kind) { - k := v.Kind() - for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() { - v = v.Elem() - k = v.Kind() - } - - return v, k -} - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go deleted file mode 100644 index a76b63363..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -/* - * Set the writer error and return 0. - */ - -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - - return false -} - -/* - * Flush the output buffer. - */ - -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("Write handler must be set") /* Write handler must be set. */ - } - if emitter.encoding == yaml_ANY_ENCODING { - panic("Encoding must be set") /* Output encoding must be set. */ - } - - /* Check if the buffer is empty. */ - - if emitter.buffer_pos == 0 { - return true - } - - /* If the output encoding is UTF-8, we don't need to recode the buffer. */ - - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, - emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - /* Recode the buffer into the raw buffer. */ - - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - - /* - * See the "reader.c" code for more details on UTF-8 encoding. Note - * that we assume that the buffer contains a valid UTF-8 sequence. - */ - - /* Read the next UTF-8 character. */ - - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - - pos += w - - /* Write the character. */ - - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - /* Write the character using a surrogate pair (check "reader.c"). */ - - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - /* Write the raw buffer. */ - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go deleted file mode 100644 index de4c05ad8..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -const ( - yaml_VERSION_MAJOR = 0 - yaml_VERSION_MINOR = 1 - yaml_VERSION_PATCH = 6 - yaml_VERSION_STRING = "0.1.6" -) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go deleted file mode 100644 index 2b3b7d749..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go +++ /dev/null @@ -1,891 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -const ( - INPUT_RAW_BUFFER_SIZE = 1024 - - /* - * The size of the input buffer. - * - * It should be possible to decode the whole raw buffer. - */ - INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3) - - /* - * The size of the output buffer. - */ - - OUTPUT_BUFFER_SIZE = 512 - - /* - * The size of the output raw buffer. - * - * It should be possible to encode the whole output buffer. - */ - - OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2) - - INITIAL_STACK_SIZE = 16 - INITIAL_QUEUE_SIZE = 16 -) - -func width(b byte) int { - if b&0x80 == 0 { - return 1 - } - - if b&0xE0 == 0xC0 { - return 2 - } - - if b&0xF0 == 0xE0 { - return 3 - } - - if b&0xF8 == 0xF0 { - return 4 - } - - return 0 -} - -func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) { - w := width(src[*src_pos]) - switch w { - case 4: - dest[*dest_pos+3] = src[*src_pos+3] - fallthrough - case 3: - dest[*dest_pos+2] = src[*src_pos+2] - fallthrough - case 2: - dest[*dest_pos+1] = src[*src_pos+1] - fallthrough - case 1: - dest[*dest_pos] = src[*src_pos] - default: - panic("invalid width") - } - *dest_pos += w - *src_pos += w -} - -// /* -// * Check if the character at the specified position is an alphabetical -// * character, a digit, '_', or '-'. -// */ - -func is_alpha(b byte) bool { - return (b >= '0' && b <= '9') || - (b >= 'A' && b <= 'Z') || - (b >= 'a' && b <= 'z') || - b == '_' || b == '-' -} - -// /* -// * Check if the character at the specified position is a digit. -// */ -// -func is_digit(b byte) bool { - return b >= '0' && b <= '9' -} - -// /* -// * Get the value of a digit. -// */ -// -func as_digit(b byte) int { - return int(b) - '0' -} - -// /* -// * Check if the character at the specified position is a hex-digit. -// */ -// -func is_hex(b byte) bool { - return (b >= '0' && b <= '9') || - (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -// -// /* -// * Get the value of a hex-digit. -// */ -// -func as_hex(b byte) int { - if b >= 'A' && b <= 'F' { - return int(b) - 'A' + 10 - } else if b >= 'a' && b <= 'f' { - return int(b) - 'a' + 10 - } - return int(b) - '0' -} - -// #define AS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ -// ((string).pointer[offset] - (yaml_char_t) '0')) - -// /* -// * Check if the character is a line break, space, tab, or NUL. -// */ -func is_blankz_at(b []byte, i int) bool { - return is_blank(b[i]) || is_breakz_at(b, i) -} - -// /* -// * Check if the character at the specified position is a line break. -// */ -func is_break_at(b []byte, i int) bool { - return b[i] == '\r' || /* CR (#xD)*/ - b[i] == '\n' || /* LF (#xA) */ - (b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */ - (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */ - (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */ -} - -func is_breakz_at(b []byte, i int) bool { - return is_break_at(b, i) || is_z(b[i]) -} - -func is_crlf_at(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// /* -// * Check if the character at the specified position is NUL. -// */ -func is_z(b byte) bool { - return b == 0x0 -} - -// /* -// * Check if the character at the specified position is space. -// */ -func is_space(b byte) bool { - return b == ' ' -} - -// -// /* -// * Check if the character at the specified position is tab. -// */ -func is_tab(b byte) bool { - return b == '\t' -} - -// /* -// * Check if the character at the specified position is blank (space or tab). -// */ -func is_blank(b byte) bool { - return is_space(b) || is_tab(b) -} - -// /* -// * Check if the character is ASCII. -// */ -func is_ascii(b byte) bool { - return b <= '\x7f' -} - -// /* -// * Check if the character can be printed unescaped. -// */ -func is_printable_at(b []byte, i int) bool { - return ((b[i] == 0x0A) || /* . == #x0A */ - (b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */ - (b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */ - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && /* && . != #xFEFF */ - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - // collapse the slice - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - // move the tokens down - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - // readjust the length - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// /* -// * Check if the character at the specified position is BOM. -// */ -// -func is_bom_at(b []byte, i int) bool { - return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF -} - -// -// #ifdef HAVE_CONFIG_H -// #include -// #endif -// -// #include "./yaml.h" -// -// #include -// #include -// -// /* -// * Memory management. -// */ -// -// yaml_DECLARE(void *) -// yaml_malloc(size_t size); -// -// yaml_DECLARE(void *) -// yaml_realloc(void *ptr, size_t size); -// -// yaml_DECLARE(void) -// yaml_free(void *ptr); -// -// yaml_DECLARE(yaml_char_t *) -// yaml_strdup(const yaml_char_t *); -// -// /* -// * Reader: Ensure that the buffer contains at least `length` characters. -// */ -// -// yaml_DECLARE(int) -// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length); -// -// /* -// * Scanner: Ensure that the token stack contains at least one token ready. -// */ -// -// yaml_DECLARE(int) -// yaml_parser_fetch_more_tokens(yaml_parser_t *parser); -// -// /* -// * The size of the input raw buffer. -// */ -// -// #define INPUT_RAW_BUFFER_SIZE 16384 -// -// /* -// * The size of the input buffer. -// * -// * It should be possible to decode the whole raw buffer. -// */ -// -// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3) -// -// /* -// * The size of the output buffer. -// */ -// -// #define OUTPUT_BUFFER_SIZE 16384 -// -// /* -// * The size of the output raw buffer. -// * -// * It should be possible to encode the whole output buffer. -// */ -// -// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2) -// -// /* -// * The size of other stacks and queues. -// */ -// -// #define INITIAL_STACK_SIZE 16 -// #define INITIAL_QUEUE_SIZE 16 -// #define INITIAL_STRING_SIZE 16 -// -// /* -// * Buffer management. -// */ -// -// #define BUFFER_INIT(context,buffer,size) \ -// (((buffer).start = yaml_malloc(size)) ? \ -// ((buffer).last = (buffer).pointer = (buffer).start, \ -// (buffer).end = (buffer).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define BUFFER_DEL(context,buffer) \ -// (yaml_free((buffer).start), \ -// (buffer).start = (buffer).pointer = (buffer).end = 0) -// -// /* -// * String management. -// */ -// -// typedef struct { -// yaml_char_t *start; -// yaml_char_t *end; -// yaml_char_t *pointer; -// } yaml_string_t; -// -// yaml_DECLARE(int) -// yaml_string_extend(yaml_char_t **start, -// yaml_char_t **pointer, yaml_char_t **end); -// -// yaml_DECLARE(int) -// yaml_string_join( -// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end, -// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end); -// -// #define NULL_STRING { NULL, NULL, NULL } -// -// #define STRING(string,length) { (string), (string)+(length), (string) } -// -// #define STRING_ASSIGN(value,string,length) \ -// ((value).start = (string), \ -// (value).end = (string)+(length), \ -// (value).pointer = (string)) -// -// #define STRING_INIT(context,string,size) \ -// (((string).start = yaml_malloc(size)) ? \ -// ((string).pointer = (string).start, \ -// (string).end = (string).start+(size), \ -// memset((string).start, 0, (size)), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define STRING_DEL(context,string) \ -// (yaml_free((string).start), \ -// (string).start = (string).pointer = (string).end = 0) -// -// #define STRING_EXTEND(context,string) \ -// (((string).pointer+5 < (string).end) \ -// || yaml_string_extend(&(string).start, \ -// &(string).pointer, &(string).end)) -// -// #define CLEAR(context,string) \ -// ((string).pointer = (string).start, \ -// memset((string).start, 0, (string).end-(string).start)) -// -// #define JOIN(context,string_a,string_b) \ -// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \ -// &(string_a).end, &(string_b).start, \ -// &(string_b).pointer, &(string_b).end)) ? \ -// ((string_b).pointer = (string_b).start, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// /* -// * String check operations. -// */ -// -// /* -// * Check the octet at the specified position. -// */ -// -// #define CHECK_AT(string,octet,offset) \ -// ((string).pointer[offset] == (yaml_char_t)(octet)) -// -// /* -// * Check the current octet in the buffer. -// */ -// -// #define CHECK(string,octet) CHECK_AT((string),(octet),0) -// -// /* -// * Check if the character at the specified position is an alphabetical -// * character, a digit, '_', or '-'. -// */ -// -// #define IS_ALPHA_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'Z') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'z') || \ -// (string).pointer[offset] == '_' || \ -// (string).pointer[offset] == '-') -// -// #define IS_ALPHA(string) IS_ALPHA_AT((string),0) -// -// /* -// * Check if the character at the specified position is a digit. -// */ -// -// #define IS_DIGIT_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9')) -// -// #define IS_DIGIT(string) IS_DIGIT_AT((string),0) -// -// /* -// * Get the value of a digit. -// */ -// -// #define AS_DIGIT_AT(string,offset) \ -// ((string).pointer[offset] - (yaml_char_t) '0') -// -// #define AS_DIGIT(string) AS_DIGIT_AT((string),0) -// -// /* -// * Check if the character at the specified position is a hex-digit. -// */ -// -// #define IS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f')) -// -// #define IS_HEX(string) IS_HEX_AT((string),0) -// -// /* -// * Get the value of a hex-digit. -// */ -// -// #define AS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ -// ((string).pointer[offset] - (yaml_char_t) '0')) -// -// #define AS_HEX(string) AS_HEX_AT((string),0) -// -// /* -// * Check if the character is ASCII. -// */ -// -// #define IS_ASCII_AT(string,offset) \ -// ((string).pointer[offset] <= (yaml_char_t) '\x7F') -// -// #define IS_ASCII(string) IS_ASCII_AT((string),0) -// -// /* -// * Check if the character can be printed unescaped. -// */ -// -// #define IS_PRINTABLE_AT(string,offset) \ -// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \ -// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \ -// && (string).pointer[offset] <= 0x7E) \ -// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \ -// && (string).pointer[offset+1] >= 0xA0) \ -// || ((string).pointer[offset] > 0xC2 \ -// && (string).pointer[offset] < 0xED) \ -// || ((string).pointer[offset] == 0xED \ -// && (string).pointer[offset+1] < 0xA0) \ -// || ((string).pointer[offset] == 0xEE) \ -// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \ -// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \ -// && (string).pointer[offset+2] == 0xBF) \ -// && !((string).pointer[offset+1] == 0xBF \ -// && ((string).pointer[offset+2] == 0xBE \ -// || (string).pointer[offset+2] == 0xBF)))) -// -// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0) -// -// /* -// * Check if the character at the specified position is NUL. -// */ -// -// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset)) -// -// #define IS_Z(string) IS_Z_AT((string),0) -// -// /* -// * Check if the character at the specified position is BOM. -// */ -// -// #define IS_BOM_AT(string,offset) \ -// (CHECK_AT((string),'\xEF',(offset)) \ -// && CHECK_AT((string),'\xBB',(offset)+1) \ -// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */ -// -// #define IS_BOM(string) IS_BOM_AT(string,0) -// -// /* -// * Check if the character at the specified position is space. -// */ -// -// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset)) -// -// #define IS_SPACE(string) IS_SPACE_AT((string),0) -// -// /* -// * Check if the character at the specified position is tab. -// */ -// -// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset)) -// -// #define IS_TAB(string) IS_TAB_AT((string),0) -// -// /* -// * Check if the character at the specified position is blank (space or tab). -// */ -// -// #define IS_BLANK_AT(string,offset) \ -// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset))) -// -// #define IS_BLANK(string) IS_BLANK_AT((string),0) -// -// /* -// * Check if the character at the specified position is a line break. -// */ -// -// #define IS_BREAK_AT(string,offset) \ -// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \ -// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \ -// || (CHECK_AT((string),'\xC2',(offset)) \ -// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \ -// || (CHECK_AT((string),'\xE2',(offset)) \ -// && CHECK_AT((string),'\x80',(offset)+1) \ -// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \ -// || (CHECK_AT((string),'\xE2',(offset)) \ -// && CHECK_AT((string),'\x80',(offset)+1) \ -// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */ -// -// #define IS_BREAK(string) IS_BREAK_AT((string),0) -// -// #define IS_CRLF_AT(string,offset) \ -// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1)) -// -// #define IS_CRLF(string) IS_CRLF_AT((string),0) -// -// /* -// * Check if the character is a line break or NUL. -// */ -// -// #define IS_BREAKZ_AT(string,offset) \ -// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset))) -// -// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0) -// -// /* -// * Check if the character is a line break, space, or NUL. -// */ -// -// #define IS_SPACEZ_AT(string,offset) \ -// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) -// -// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0) -// -// /* -// * Check if the character is a line break, space, tab, or NUL. -// */ -// -// #define IS_BLANKZ_AT(string,offset) \ -// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) -// -// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0) -// -// /* -// * Determine the width of the character. -// */ -// -// #define WIDTH_AT(string,offset) \ -// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \ -// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \ -// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \ -// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0) -// -// #define WIDTH(string) WIDTH_AT((string),0) -// -// /* -// * Move the string pointer to the next character. -// */ -// -// #define MOVE(string) ((string).pointer += WIDTH((string))) -// -// /* -// * Copy a character and move the pointers of both strings. -// */ -// -// #define COPY(string_a,string_b) \ -// ((*(string_b).pointer & 0x80) == 0x00 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xE0) == 0xC0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xF0) == 0xE0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xF8) == 0xF0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : 0) -// -// /* -// * Stack and queue management. -// */ -// -// yaml_DECLARE(int) -// yaml_stack_extend(void **start, void **top, void **end); -// -// yaml_DECLARE(int) -// yaml_queue_extend(void **start, void **head, void **tail, void **end); -// -// #define STACK_INIT(context,stack,size) \ -// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \ -// ((stack).top = (stack).start, \ -// (stack).end = (stack).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define STACK_DEL(context,stack) \ -// (yaml_free((stack).start), \ -// (stack).start = (stack).top = (stack).end = 0) -// -// #define STACK_EMPTY(context,stack) \ -// ((stack).start == (stack).top) -// -// #define PUSH(context,stack,value) \ -// (((stack).top != (stack).end \ -// || yaml_stack_extend((void **)&(stack).start, \ -// (void **)&(stack).top, (void **)&(stack).end)) ? \ -// (*((stack).top++) = value, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define POP(context,stack) \ -// (*(--(stack).top)) -// -// #define QUEUE_INIT(context,queue,size) \ -// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \ -// ((queue).head = (queue).tail = (queue).start, \ -// (queue).end = (queue).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define QUEUE_DEL(context,queue) \ -// (yaml_free((queue).start), \ -// (queue).start = (queue).head = (queue).tail = (queue).end = 0) -// -// #define QUEUE_EMPTY(context,queue) \ -// ((queue).head == (queue).tail) -// -// #define ENQUEUE(context,queue,value) \ -// (((queue).tail != (queue).end \ -// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ -// (void **)&(queue).tail, (void **)&(queue).end)) ? \ -// (*((queue).tail++) = value, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define DEQUEUE(context,queue) \ -// (*((queue).head++)) -// -// #define QUEUE_INSERT(context,queue,index,value) \ -// (((queue).tail != (queue).end \ -// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ -// (void **)&(queue).tail, (void **)&(queue).end)) ? \ -// (memmove((queue).head+(index)+1,(queue).head+(index), \ -// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \ -// *((queue).head+(index)) = value, \ -// (queue).tail++, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// /* -// * Token initializers. -// */ -// -// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \ -// (memset(&(token), 0, sizeof(yaml_token_t)), \ -// (token).type = (token_type), \ -// (token).start_mark = (token_start_mark), \ -// (token).end_mark = (token_end_mark)) -// -// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \ -// (token).data.stream_start.encoding = (token_encoding)) -// -// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark))) -// -// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \ -// (token).data.alias.value = (token_value)) -// -// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \ -// (token).data.anchor.value = (token_value)) -// -// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \ -// (token).data.tag.handle = (token_handle), \ -// (token).data.tag.suffix = (token_suffix)) -// -// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \ -// (token).data.scalar.value = (token_value), \ -// (token).data.scalar.length = (token_length), \ -// (token).data.scalar.style = (token_style)) -// -// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ -// (token).data.version_directive.major = (token_major), \ -// (token).data.version_directive.minor = (token_minor)) -// -// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ -// (token).data.tag_directive.handle = (token_handle), \ -// (token).data.tag_directive.prefix = (token_prefix)) -// -// /* -// * Event initializers. -// */ -// -// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \ -// (memset(&(event), 0, sizeof(yaml_event_t)), \ -// (event).type = (event_type), \ -// (event).start_mark = (event_start_mark), \ -// (event).end_mark = (event_end_mark)) -// -// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.stream_start.encoding = (event_encoding)) -// -// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark))) -// -// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \ -// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.document_start.version_directive = (event_version_directive), \ -// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \ -// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \ -// (event).data.document_start.implicit = (event_implicit)) -// -// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \ -// (event).data.document_end.implicit = (event_implicit)) -// -// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \ -// (event).data.alias.anchor = (event_anchor)) -// -// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \ -// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \ -// (event).data.scalar.anchor = (event_anchor), \ -// (event).data.scalar.tag = (event_tag), \ -// (event).data.scalar.value = (event_value), \ -// (event).data.scalar.length = (event_length), \ -// (event).data.scalar.plain_implicit = (event_plain_implicit), \ -// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \ -// (event).data.scalar.style = (event_style)) -// -// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \ -// event_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.sequence_start.anchor = (event_anchor), \ -// (event).data.sequence_start.tag = (event_tag), \ -// (event).data.sequence_start.implicit = (event_implicit), \ -// (event).data.sequence_start.style = (event_style)) -// -// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark))) -// -// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \ -// event_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.mapping_start.anchor = (event_anchor), \ -// (event).data.mapping_start.tag = (event_tag), \ -// (event).data.mapping_start.implicit = (event_implicit), \ -// (event).data.mapping_start.style = (event_style)) -// -// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark))) -// -// /* -// * Document initializer. -// */ -// -// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \ -// document_version_directive,document_tag_directives_start, \ -// document_tag_directives_end,document_start_implicit, \ -// document_end_implicit,document_start_mark,document_end_mark) \ -// (memset(&(document), 0, sizeof(yaml_document_t)), \ -// (document).nodes.start = (document_nodes_start), \ -// (document).nodes.end = (document_nodes_end), \ -// (document).nodes.top = (document_nodes_start), \ -// (document).version_directive = (document_version_directive), \ -// (document).tag_directives.start = (document_tag_directives_start), \ -// (document).tag_directives.end = (document_tag_directives_end), \ -// (document).start_implicit = (document_start_implicit), \ -// (document).end_implicit = (document_end_implicit), \ -// (document).start_mark = (document_start_mark), \ -// (document).end_mark = (document_end_mark)) -// -// /* -// * Node initializers. -// */ -// -// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \ -// (memset(&(node), 0, sizeof(yaml_node_t)), \ -// (node).type = (node_type), \ -// (node).tag = (node_tag), \ -// (node).start_mark = (node_start_mark), \ -// (node).end_mark = (node_end_mark)) -// -// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.scalar.value = (node_value), \ -// (node).data.scalar.length = (node_length), \ -// (node).data.scalar.style = (node_style)) -// -// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.sequence.items.start = (node_items_start), \ -// (node).data.sequence.items.end = (node_items_end), \ -// (node).data.sequence.items.top = (node_items_start), \ -// (node).data.sequence.style = (node_style)) -// -// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.mapping.pairs.start = (node_pairs_start), \ -// (node).data.mapping.pairs.end = (node_pairs_end), \ -// (node).data.mapping.pairs.top = (node_pairs_start), \ -// (node).data.mapping.style = (node_style)) -// diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go deleted file mode 100644 index d608dbb36..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go +++ /dev/null @@ -1,953 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "fmt" - "io" -) - -/** The version directive data. */ -type yaml_version_directive_t struct { - major int // The major version number - minor int // The minor version number -} - -/** The tag directive data. */ -type yaml_tag_directive_t struct { - handle []byte // The tag handle - prefix []byte // The tag prefix -} - -/** The stream encoding. */ -type yaml_encoding_t int - -const ( - /** Let the parser choose the encoding. */ - yaml_ANY_ENCODING yaml_encoding_t = iota - /** The defau lt UTF-8 encoding. */ - yaml_UTF8_ENCODING - /** The UTF-16-LE encoding with BOM. */ - yaml_UTF16LE_ENCODING - /** The UTF-16-BE encoding with BOM. */ - yaml_UTF16BE_ENCODING -) - -/** Line break types. */ -type yaml_break_t int - -const ( - yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */ - yaml_CR_BREAK /** Use CR for line breaks (Mac style). */ - yaml_LN_BREAK /** Use LN for line breaks (Unix style). */ - yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */ -) - -/** Many bad things could happen with the parser and emitter. */ -type YAML_error_type_t int - -const ( - /** No error is produced. */ - yaml_NO_ERROR YAML_error_type_t = iota - - /** Cannot allocate or reallocate a block of memory. */ - yaml_MEMORY_ERROR - - /** Cannot read or decode the input stream. */ - yaml_READER_ERROR - /** Cannot scan the input stream. */ - yaml_SCANNER_ERROR - /** Cannot parse the input stream. */ - yaml_PARSER_ERROR - /** Cannot compose a YAML document. */ - yaml_COMPOSER_ERROR - - /** Cannot write to the output stream. */ - yaml_WRITER_ERROR - /** Cannot emit a YAML stream. */ - yaml_EMITTER_ERROR -) - -/** The pointer position. */ -type YAML_mark_t struct { - /** The position index. */ - index int - - /** The position line. */ - line int - - /** The position column. */ - column int -} - -func (m YAML_mark_t) String() string { - return fmt.Sprintf("line %d, column %d", m.line, m.column) -} - -/** @} */ - -/** - * @defgroup styles Node Styles - * @{ - */ - -type yaml_style_t int - -/** Scalar styles. */ -type yaml_scalar_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - /** The plain scalar style. */ - yaml_PLAIN_SCALAR_STYLE - - /** The single-quoted scalar style. */ - yaml_SINGLE_QUOTED_SCALAR_STYLE - /** The double-quoted scalar style. */ - yaml_DOUBLE_QUOTED_SCALAR_STYLE - - /** The literal scalar style. */ - yaml_LITERAL_SCALAR_STYLE - /** The folded scalar style. */ - yaml_FOLDED_SCALAR_STYLE -) - -/** Sequence styles. */ -type yaml_sequence_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - /** The block sequence style. */ - yaml_BLOCK_SEQUENCE_STYLE - /** The flow sequence style. */ - yaml_FLOW_SEQUENCE_STYLE -) - -/** Mapping styles. */ -type yaml_mapping_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - /** The block mapping style. */ - yaml_BLOCK_MAPPING_STYLE - /** The flow mapping style. */ - yaml_FLOW_MAPPING_STYLE - -/* yaml_FLOW_SET_MAPPING_STYLE */ -) - -/** @} */ - -/** - * @defgroup tokens Tokens - * @{ - */ - -/** Token types. */ -type yaml_token_type_t int - -const ( - /** An empty token. */ - yaml_NO_TOKEN yaml_token_type_t = iota - - /** A STREAM-START token. */ - yaml_STREAM_START_TOKEN - /** A STREAM-END token. */ - yaml_STREAM_END_TOKEN - - /** A VERSION-DIRECTIVE token. */ - yaml_VERSION_DIRECTIVE_TOKEN - /** A TAG-DIRECTIVE token. */ - yaml_TAG_DIRECTIVE_TOKEN - /** A DOCUMENT-START token. */ - yaml_DOCUMENT_START_TOKEN - /** A DOCUMENT-END token. */ - yaml_DOCUMENT_END_TOKEN - - /** A BLOCK-SEQUENCE-START token. */ - yaml_BLOCK_SEQUENCE_START_TOKEN - /** A BLOCK-SEQUENCE-END token. */ - yaml_BLOCK_MAPPING_START_TOKEN - /** A BLOCK-END token. */ - yaml_BLOCK_END_TOKEN - - /** A FLOW-SEQUENCE-START token. */ - yaml_FLOW_SEQUENCE_START_TOKEN - /** A FLOW-SEQUENCE-END token. */ - yaml_FLOW_SEQUENCE_END_TOKEN - /** A FLOW-MAPPING-START token. */ - yaml_FLOW_MAPPING_START_TOKEN - /** A FLOW-MAPPING-END token. */ - yaml_FLOW_MAPPING_END_TOKEN - - /** A BLOCK-ENTRY token. */ - yaml_BLOCK_ENTRY_TOKEN - /** A FLOW-ENTRY token. */ - yaml_FLOW_ENTRY_TOKEN - /** A KEY token. */ - yaml_KEY_TOKEN - /** A VALUE token. */ - yaml_VALUE_TOKEN - - /** An ALIAS token. */ - yaml_ALIAS_TOKEN - /** An ANCHOR token. */ - yaml_ANCHOR_TOKEN - /** A TAG token. */ - yaml_TAG_TOKEN - /** A SCALAR token. */ - yaml_SCALAR_TOKEN -) - -/** The token structure. */ -type yaml_token_t struct { - - /** The token type. */ - token_type yaml_token_type_t - - /** The token data. */ - /** The stream start (for @c yaml_STREAM_START_TOKEN). */ - encoding yaml_encoding_t - - /** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */ - /** The anchor (for @c ). */ - /** The scalar value (for @c ). */ - value []byte - - /** The tag suffix. */ - suffix []byte - - /** The scalar value (for @c yaml_SCALAR_TOKEN). */ - /** The scalar style. */ - style yaml_scalar_style_t - - /** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */ - version_directive yaml_version_directive_t - - /** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */ - prefix []byte - - /** The beginning of the token. */ - start_mark YAML_mark_t - /** The end of the token. */ - end_mark YAML_mark_t - - major, minor int -} - -/** - * @defgroup events Events - * @{ - */ - -/** Event types. */ -type yaml_event_type_t int - -const ( - /** An empty event. */ - yaml_NO_EVENT yaml_event_type_t = iota - - /** A STREAM-START event. */ - yaml_STREAM_START_EVENT - /** A STREAM-END event. */ - yaml_STREAM_END_EVENT - - /** A DOCUMENT-START event. */ - yaml_DOCUMENT_START_EVENT - /** A DOCUMENT-END event. */ - yaml_DOCUMENT_END_EVENT - - /** An ALIAS event. */ - yaml_ALIAS_EVENT - /** A SCALAR event. */ - yaml_SCALAR_EVENT - - /** A SEQUENCE-START event. */ - yaml_SEQUENCE_START_EVENT - /** A SEQUENCE-END event. */ - yaml_SEQUENCE_END_EVENT - - /** A MAPPING-START event. */ - yaml_MAPPING_START_EVENT - /** A MAPPING-END event. */ - yaml_MAPPING_END_EVENT -) - -/** The event structure. */ -type yaml_event_t struct { - - /** The event type. */ - event_type yaml_event_type_t - - /** The stream parameters (for @c yaml_STREAM_START_EVENT). */ - encoding yaml_encoding_t - - /** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */ - version_directive *yaml_version_directive_t - - /** The beginning and end of the tag directives list. */ - tag_directives []yaml_tag_directive_t - - /** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */ - /** Is the document indicator implicit? */ - implicit bool - - /** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The anchor. */ - anchor []byte - - /** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The tag. */ - tag []byte - /** The scalar value. */ - value []byte - - /** Is the tag optional for the plain style? */ - plain_implicit bool - /** Is the tag optional for any non-plain style? */ - quoted_implicit bool - - /** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The sequence style. */ - /** The scalar style. */ - style yaml_style_t - - /** The beginning of the event. */ - start_mark, end_mark YAML_mark_t -} - -/** - * @defgroup nodes Nodes - * @{ - */ - -const ( - /** The tag @c !!null with the only possible value: @c null. */ - yaml_NULL_TAG = "tag:yaml.org,2002:null" - /** The tag @c !!bool with the values: @c true and @c falce. */ - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" - /** The tag @c !!str for string values. */ - yaml_STR_TAG = "tag:yaml.org,2002:str" - /** The tag @c !!int for integer values. */ - yaml_INT_TAG = "tag:yaml.org,2002:int" - /** The tag @c !!float for float values. */ - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" - /** The tag @c !!timestamp for date and time values. */ - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" - - /** The tag @c !!seq is used to denote sequences. */ - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" - /** The tag @c !!map is used to denote mapping. */ - yaml_MAP_TAG = "tag:yaml.org,2002:map" - - /** The default scalar tag is @c !!str. */ - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG - /** The default sequence tag is @c !!seq. */ - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG - /** The default mapping tag is @c !!map. */ - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG - - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" -) - -/** Node types. */ -type yaml_node_type_t int - -const ( - /** An empty node. */ - yaml_NO_NODE yaml_node_type_t = iota - - /** A scalar node. */ - yaml_SCALAR_NODE - /** A sequence node. */ - yaml_SEQUENCE_NODE - /** A mapping node. */ - yaml_MAPPING_NODE -) - -/** An element of a sequence node. */ -type yaml_node_item_t int - -/** An element of a mapping node. */ -type yaml_node_pair_t struct { - /** The key of the element. */ - key int - /** The value of the element. */ - value int -} - -/** The node structure. */ -type yaml_node_t struct { - - /** The node type. */ - node_type yaml_node_type_t - - /** The node tag. */ - tag []byte - - /** The scalar parameters (for @c yaml_SCALAR_NODE). */ - scalar struct { - /** The scalar value. */ - value []byte - /** The scalar style. */ - style yaml_scalar_style_t - } - - /** The sequence parameters (for @c yaml_SEQUENCE_NODE). */ - sequence struct { - /** The stack of sequence items. */ - items []yaml_node_item_t - /** The sequence style. */ - style yaml_sequence_style_t - } - - /** The mapping parameters (for @c yaml_MAPPING_NODE). */ - mapping struct { - /** The stack of mapping pairs (key, value). */ - pairs []yaml_node_pair_t - /** The mapping style. */ - style yaml_mapping_style_t - } - - /** The beginning of the node. */ - start_mark YAML_mark_t - /** The end of the node. */ - end_mark YAML_mark_t -} - -/** The document structure. */ -type yaml_document_t struct { - - /** The document nodes. */ - nodes []yaml_node_t - - /** The version directive. */ - version_directive *yaml_version_directive_t - - /** The list of tag directives. */ - tags []yaml_tag_directive_t - - /** Is the document start indicator implicit? */ - start_implicit bool - /** Is the document end indicator implicit? */ - end_implicit bool - - /** The beginning of the document. */ - start_mark YAML_mark_t - /** The end of the document. */ - end_mark YAML_mark_t -} - -/** - * The prototype of a read handler. - * - * The read handler is called when the parser needs to read more bytes from the - * source. The handler should write not more than @a size bytes to the @a - * buffer. The number of written bytes should be set to the @a length variable. - * - * @param[in,out] data A pointer to an application data specified by - * yaml_parser_set_input(). - * @param[out] buffer The buffer to write the data from the source. - * @param[in] size The size of the buffer. - * @param[out] size_read The actual number of bytes read from the source. - * - * @returns On success, the handler should return @c 1. If the handler failed, - * the returned value should be @c 0. On EOF, the handler should set the - * @a size_read to @c 0 and return @c 1. - */ - -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -/** - * This structure holds information about a potential simple key. - */ - -type yaml_simple_key_t struct { - /** Is a simple key possible? */ - possible bool - - /** Is a simple key required? */ - required bool - - /** The number of the token. */ - token_number int - - /** The position mark. */ - mark YAML_mark_t -} - -/** - * The states of the parser. - */ -type yaml_parser_state_t int - -const ( - /** Expect STREAM-START. */ - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - /** Expect the beginning of an implicit document. */ - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - /** Expect DOCUMENT-START. */ - yaml_PARSE_DOCUMENT_START_STATE - /** Expect the content of a document. */ - yaml_PARSE_DOCUMENT_CONTENT_STATE - /** Expect DOCUMENT-END. */ - yaml_PARSE_DOCUMENT_END_STATE - /** Expect a block node. */ - yaml_PARSE_BLOCK_NODE_STATE - /** Expect a block node or indentless sequence. */ - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE - /** Expect a flow node. */ - yaml_PARSE_FLOW_NODE_STATE - /** Expect the first entry of a block sequence. */ - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - /** Expect an entry of a block sequence. */ - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - /** Expect an entry of an indentless sequence. */ - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - /** Expect the first key of a block mapping. */ - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - /** Expect a block mapping key. */ - yaml_PARSE_BLOCK_MAPPING_KEY_STATE - /** Expect a block mapping value. */ - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - /** Expect the first entry of a flow sequence. */ - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - /** Expect an entry of a flow sequence. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - /** Expect a key of an ordered mapping. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - /** Expect a value of an ordered mapping. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - /** Expect the and of an ordered mapping entry. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - /** Expect the first key of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - /** Expect a key of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_KEY_STATE - /** Expect a value of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_VALUE_STATE - /** Expect an empty value of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE - /** Expect nothing. */ - yaml_PARSE_END_STATE -) - -/** - * This structure holds aliases data. - */ - -type yaml_alias_data_t struct { - /** The anchor. */ - anchor []byte - /** The node id. */ - index int - /** The anchor mark. */ - mark YAML_mark_t -} - -/** - * The parser structure. - * - * All members are internal. Manage the structure using the @c yaml_parser_ - * family of functions. - */ - -type yaml_parser_t struct { - - /** - * @name Error handling - * @{ - */ - - /** Error type. */ - error YAML_error_type_t - /** Error description. */ - problem string - /** The byte about which the problem occured. */ - problem_offset int - /** The problematic value (@c -1 is none). */ - problem_value int - /** The problem position. */ - problem_mark YAML_mark_t - /** The error context. */ - context string - /** The context position. */ - context_mark YAML_mark_t - - /** - * @} - */ - - /** - * @name Reader stuff - * @{ - */ - - /** Read handler. */ - read_handler yaml_read_handler_t - - /** Reader input data. */ - input_reader io.Reader - input []byte - input_pos int - - /** EOF flag */ - eof bool - - /** The working buffer. */ - buffer []byte - buffer_pos int - - /* The number of unread characters in the buffer. */ - unread int - - /** The raw buffer. */ - raw_buffer []byte - raw_buffer_pos int - - /** The input encoding. */ - encoding yaml_encoding_t - - /** The offset of the current position (in bytes). */ - offset int - - /** The mark of the current position. */ - mark YAML_mark_t - - /** - * @} - */ - - /** - * @name Scanner stuff - * @{ - */ - - /** Have we started to scan the input stream? */ - stream_start_produced bool - - /** Have we reached the end of the input stream? */ - stream_end_produced bool - - /** The number of unclosed '[' and '{' indicators. */ - flow_level int - - /** The tokens queue. */ - tokens []yaml_token_t - tokens_head int - - /** The number of tokens fetched from the queue. */ - tokens_parsed int - - /* Does the tokens queue contain a token ready for dequeueing. */ - token_available bool - - /** The indentation levels stack. */ - indents []int - - /** The current indentation level. */ - indent int - - /** May a simple key occur at the current position? */ - simple_key_allowed bool - - /** The stack of simple keys. */ - simple_keys []yaml_simple_key_t - - /** - * @} - */ - - /** - * @name Parser stuff - * @{ - */ - - /** The parser states stack. */ - states []yaml_parser_state_t - - /** The current parser state. */ - state yaml_parser_state_t - - /** The stack of marks. */ - marks []YAML_mark_t - - /** The list of TAG directives. */ - tag_directives []yaml_tag_directive_t - - /** - * @} - */ - - /** - * @name Dumper stuff - * @{ - */ - - /** The alias data. */ - aliases []yaml_alias_data_t - - /** The currently parsed document. */ - document *yaml_document_t - - /** - * @} - */ - -} - -/** - * The prototype of a write handler. - * - * The write handler is called when the emitter needs to flush the accumulated - * characters to the output. The handler should write @a size bytes of the - * @a buffer to the output. - * - * @param[in,out] data A pointer to an application data specified by - * yaml_emitter_set_output(). - * @param[in] buffer The buffer with bytes to be written. - * @param[in] size The size of the buffer. - * - * @returns On success, the handler should return @c 1. If the handler failed, - * the returned value should be @c 0. - */ - -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -/** The emitter states. */ -type yaml_emitter_state_t int - -const ( - /** Expect STREAM-START. */ - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - /** Expect the first DOCUMENT-START or STREAM-END. */ - yaml_EMIT_FIRST_DOCUMENT_START_STATE - /** Expect DOCUMENT-START or STREAM-END. */ - yaml_EMIT_DOCUMENT_START_STATE - /** Expect the content of a document. */ - yaml_EMIT_DOCUMENT_CONTENT_STATE - /** Expect DOCUMENT-END. */ - yaml_EMIT_DOCUMENT_END_STATE - /** Expect the first item of a flow sequence. */ - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - /** Expect an item of a flow sequence. */ - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE - /** Expect the first key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - /** Expect a key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_KEY_STATE - /** Expect a value for a simple key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE - /** Expect a value of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_VALUE_STATE - /** Expect the first item of a block sequence. */ - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - /** Expect an item of a block sequence. */ - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE - /** Expect the first key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - /** Expect the key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_KEY_STATE - /** Expect a value for a simple key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE - /** Expect a value of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE - /** Expect nothing. */ - yaml_EMIT_END_STATE -) - -/** - * The emitter structure. - * - * All members are internal. Manage the structure using the @c yaml_emitter_ - * family of functions. - */ - -type yaml_emitter_t struct { - - /** - * @name Error handling - * @{ - */ - - /** Error type. */ - error YAML_error_type_t - /** Error description. */ - problem string - - /** - * @} - */ - - /** - * @name Writer stuff - * @{ - */ - - /** Write handler. */ - write_handler yaml_write_handler_t - - /** Standard (string or file) output data. */ - output_buffer *[]byte - output_writer io.Writer - - /** The working buffer. */ - buffer []byte - buffer_pos int - - /** The raw buffer. */ - raw_buffer []byte - raw_buffer_pos int - - /** The stream encoding. */ - encoding yaml_encoding_t - - /** - * @} - */ - - /** - * @name Emitter stuff - * @{ - */ - - /** If the output is in the canonical style? */ - canonical bool - /** The number of indentation spaces. */ - best_indent int - /** The preferred width of the output lines. */ - best_width int - /** Allow unescaped non-ASCII characters? */ - unicode bool - /** The preferred line break. */ - line_break yaml_break_t - - /** The stack of states. */ - states []yaml_emitter_state_t - - /** The current emitter state. */ - state yaml_emitter_state_t - - /** The event queue. */ - events []yaml_event_t - events_head int - - /** The stack of indentation levels. */ - indents []int - - /** The list of tag directives. */ - tag_directives []yaml_tag_directive_t - - /** The current indentation level. */ - indent int - - /** The current flow level. */ - flow_level int - - /** Is it the document root context? */ - root_context bool - /** Is it a sequence context? */ - sequence_context bool - /** Is it a mapping context? */ - mapping_context bool - /** Is it a simple mapping key context? */ - simple_key_context bool - - /** The current line. */ - line int - /** The current column. */ - column int - /** If the last character was a whitespace? */ - whitespace bool - /** If the last character was an indentation character (' ', '-', '?', ':')? */ - indention bool - /** If an explicit document end is required? */ - open_ended bool - - /** Anchor analysis. */ - anchor_data struct { - /** The anchor value. */ - anchor []byte - /** Is it an alias? */ - alias bool - } - - /** Tag analysis. */ - tag_data struct { - /** The tag handle. */ - handle []byte - /** The tag suffix. */ - suffix []byte - } - - /** Scalar analysis. */ - scalar_data struct { - /** The scalar value. */ - value []byte - /** Does the scalar contain line breaks? */ - multiline bool - /** Can the scalar be expessed in the flow plain style? */ - flow_plain_allowed bool - /** Can the scalar be expressed in the block plain style? */ - block_plain_allowed bool - /** Can the scalar be expressed in the single quoted style? */ - single_quoted_allowed bool - /** Can the scalar be expressed in the literal or folded styles? */ - block_allowed bool - /** The output style. */ - style yaml_scalar_style_t - } - - /** - * @} - */ - - /** - * @name Dumper stuff - * @{ - */ - - /** If the stream was already opened? */ - opened bool - /** If the stream was already closed? */ - closed bool - - /** The information associated with the document nodes. */ - anchors *struct { - /** The number of references. */ - references int - /** The anchor id. */ - anchor int - /** If the node has been emitted? */ - serialized bool - } - - /** The last assigned anchor id. */ - last_anchor_id int - - /** The currently emitted document. */ - document *yaml_document_t - - /** - * @} - */ - -} diff --git a/vendor/github.com/coreos/go-oidc/LICENSE b/vendor/github.com/coreos/go-oidc/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/coreos/go-oidc/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/go-oidc/NOTICE b/vendor/github.com/coreos/go-oidc/NOTICE deleted file mode 100644 index b39ddfa5c..000000000 --- a/vendor/github.com/coreos/go-oidc/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-oidc/http/client.go b/vendor/github.com/coreos/go-oidc/http/client.go deleted file mode 100644 index fd079b495..000000000 --- a/vendor/github.com/coreos/go-oidc/http/client.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "net/http" - -type Client interface { - Do(*http.Request) (*http.Response, error) -} diff --git a/vendor/github.com/coreos/go-oidc/http/http.go b/vendor/github.com/coreos/go-oidc/http/http.go deleted file mode 100644 index f0d051b5f..000000000 --- a/vendor/github.com/coreos/go-oidc/http/http.go +++ /dev/null @@ -1,159 +0,0 @@ -package http - -import ( - "encoding/base64" - "encoding/json" - "errors" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "time" - - "github.com/coreos/pkg/capnslog" -) - -var ( - log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http") -) - -func WriteError(w http.ResponseWriter, code int, msg string) { - e := struct { - Error string `json:"error"` - }{ - Error: msg, - } - b, err := json.Marshal(e) - if err != nil { - log.Errorf("Failed marshaling %#v to JSON: %v", e, err) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - w.Write(b) -} - -// BasicAuth parses a username and password from the request's -// Authorization header. This was pulled from golang master: -// https://codereview.appspot.com/76540043 -func BasicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} - -func cacheControlMaxAge(hdr string) (time.Duration, bool, error) { - for _, field := range strings.Split(hdr, ",") { - parts := strings.SplitN(strings.TrimSpace(field), "=", 2) - k := strings.ToLower(strings.TrimSpace(parts[0])) - if k != "max-age" { - continue - } - - if len(parts) == 1 { - return 0, false, errors.New("max-age has no value") - } - - v := strings.TrimSpace(parts[1]) - if v == "" { - return 0, false, errors.New("max-age has empty value") - } - - age, err := strconv.Atoi(v) - if err != nil { - return 0, false, err - } - - if age <= 0 { - return 0, false, nil - } - - return time.Duration(age) * time.Second, true, nil - } - - return 0, false, nil -} - -func expires(date, expires string) (time.Duration, bool, error) { - if date == "" || expires == "" { - return 0, false, nil - } - - te, err := time.Parse(time.RFC1123, expires) - if err != nil { - return 0, false, err - } - - td, err := time.Parse(time.RFC1123, date) - if err != nil { - return 0, false, err - } - - ttl := te.Sub(td) - - // headers indicate data already expired, caller should not - // have to care about this case - if ttl <= 0 { - return 0, false, nil - } - - return ttl, true, nil -} - -func Cacheable(hdr http.Header) (time.Duration, bool, error) { - ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control")) - if err != nil || ok { - return ttl, ok, err - } - - return expires(hdr.Get("Date"), hdr.Get("Expires")) -} - -// MergeQuery appends additional query values to an existing URL. -func MergeQuery(u url.URL, q url.Values) url.URL { - uv := u.Query() - for k, vs := range q { - for _, v := range vs { - uv.Add(k, v) - } - } - u.RawQuery = uv.Encode() - return u -} - -// NewResourceLocation appends a resource id to the end of the requested URL path. -func NewResourceLocation(reqURL *url.URL, id string) string { - var u url.URL - u = *reqURL - u.Path = path.Join(u.Path, id) - u.RawQuery = "" - u.Fragment = "" - return u.String() -} - -// CopyRequest returns a clone of the provided *http.Request. -// The returned object is a shallow copy of the struct and a -// deep copy of its Header field. -func CopyRequest(r *http.Request) *http.Request { - r2 := *r - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return &r2 -} diff --git a/vendor/github.com/coreos/go-oidc/http/middleware.go b/vendor/github.com/coreos/go-oidc/http/middleware.go deleted file mode 100644 index 270b3bc08..000000000 --- a/vendor/github.com/coreos/go-oidc/http/middleware.go +++ /dev/null @@ -1,14 +0,0 @@ -package http - -import ( - "net/http" -) - -type LoggingMiddleware struct { - Next http.Handler -} - -func (l *LoggingMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) { - log.Infof("HTTP %s %v", r.Method, r.URL) - l.Next.ServeHTTP(w, r) -} diff --git a/vendor/github.com/coreos/go-oidc/http/url.go b/vendor/github.com/coreos/go-oidc/http/url.go deleted file mode 100644 index df60eb1a6..000000000 --- a/vendor/github.com/coreos/go-oidc/http/url.go +++ /dev/null @@ -1,29 +0,0 @@ -package http - -import ( - "errors" - "net/url" -) - -// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty -// since `url.Parse("")` does not return an error. Must contian a scheme and a host. -func ParseNonEmptyURL(u string) (*url.URL, error) { - if u == "" { - return nil, errors.New("url is empty") - } - - ur, err := url.Parse(u) - if err != nil { - return nil, err - } - - if ur.Scheme == "" { - return nil, errors.New("url scheme is empty") - } - - if ur.Host == "" { - return nil, errors.New("url host is empty") - } - - return ur, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/claims.go b/vendor/github.com/coreos/go-oidc/jose/claims.go deleted file mode 100644 index 8b48bfd23..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/claims.go +++ /dev/null @@ -1,126 +0,0 @@ -package jose - -import ( - "encoding/json" - "fmt" - "math" - "time" -) - -type Claims map[string]interface{} - -func (c Claims) Add(name string, value interface{}) { - c[name] = value -} - -func (c Claims) StringClaim(name string) (string, bool, error) { - cl, ok := c[name] - if !ok { - return "", false, nil - } - - v, ok := cl.(string) - if !ok { - return "", false, fmt.Errorf("unable to parse claim as string: %v", name) - } - - return v, true, nil -} - -func (c Claims) StringsClaim(name string) ([]string, bool, error) { - cl, ok := c[name] - if !ok { - return nil, false, nil - } - - if v, ok := cl.([]string); ok { - return v, true, nil - } - - // When unmarshaled, []string will become []interface{}. - if v, ok := cl.([]interface{}); ok { - var ret []string - for _, vv := range v { - str, ok := vv.(string) - if !ok { - return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) - } - ret = append(ret, str) - } - return ret, true, nil - } - - return nil, false, fmt.Errorf("unable to parse claim as string array: %v", name) -} - -func (c Claims) Int64Claim(name string) (int64, bool, error) { - cl, ok := c[name] - if !ok { - return 0, false, nil - } - - v, ok := cl.(int64) - if !ok { - vf, ok := cl.(float64) - if !ok { - return 0, false, fmt.Errorf("unable to parse claim as int64: %v", name) - } - v = int64(vf) - } - - return v, true, nil -} - -func (c Claims) Float64Claim(name string) (float64, bool, error) { - cl, ok := c[name] - if !ok { - return 0, false, nil - } - - v, ok := cl.(float64) - if !ok { - vi, ok := cl.(int64) - if !ok { - return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name) - } - v = float64(vi) - } - - return v, true, nil -} - -func (c Claims) TimeClaim(name string) (time.Time, bool, error) { - v, ok, err := c.Float64Claim(name) - if !ok || err != nil { - return time.Time{}, ok, err - } - - s := math.Trunc(v) - ns := (v - s) * math.Pow(10, 9) - return time.Unix(int64(s), int64(ns)).UTC(), true, nil -} - -func decodeClaims(payload []byte) (Claims, error) { - var c Claims - if err := json.Unmarshal(payload, &c); err != nil { - return nil, fmt.Errorf("malformed JWT claims, unable to decode: %v", err) - } - return c, nil -} - -func marshalClaims(c Claims) ([]byte, error) { - b, err := json.Marshal(c) - if err != nil { - return nil, err - } - return b, nil -} - -func encodeClaims(c Claims) (string, error) { - b, err := marshalClaims(c) - if err != nil { - return "", err - } - - return encodeSegment(b), nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jose.go b/vendor/github.com/coreos/go-oidc/jose/jose.go deleted file mode 100644 index 620992659..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jose.go +++ /dev/null @@ -1,112 +0,0 @@ -package jose - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "strings" -) - -const ( - HeaderMediaType = "typ" - HeaderKeyAlgorithm = "alg" - HeaderKeyID = "kid" -) - -const ( - // Encryption Algorithm Header Parameter Values for JWS - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6 - AlgHS256 = "HS256" - AlgHS384 = "HS384" - AlgHS512 = "HS512" - AlgRS256 = "RS256" - AlgRS384 = "RS384" - AlgRS512 = "RS512" - AlgES256 = "ES256" - AlgES384 = "ES384" - AlgES512 = "ES512" - AlgPS256 = "PS256" - AlgPS384 = "PS384" - AlgPS512 = "PS512" - AlgNone = "none" -) - -const ( - // Algorithm Header Parameter Values for JWE - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1 - AlgRSA15 = "RSA1_5" - AlgRSAOAEP = "RSA-OAEP" - AlgRSAOAEP256 = "RSA-OAEP-256" - AlgA128KW = "A128KW" - AlgA192KW = "A192KW" - AlgA256KW = "A256KW" - AlgDir = "dir" - AlgECDHES = "ECDH-ES" - AlgECDHESA128KW = "ECDH-ES+A128KW" - AlgECDHESA192KW = "ECDH-ES+A192KW" - AlgECDHESA256KW = "ECDH-ES+A256KW" - AlgA128GCMKW = "A128GCMKW" - AlgA192GCMKW = "A192GCMKW" - AlgA256GCMKW = "A256GCMKW" - AlgPBES2HS256A128KW = "PBES2-HS256+A128KW" - AlgPBES2HS384A192KW = "PBES2-HS384+A192KW" - AlgPBES2HS512A256KW = "PBES2-HS512+A256KW" -) - -const ( - // Encryption Algorithm Header Parameter Values for JWE - // See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22 - EncA128CBCHS256 = "A128CBC-HS256" - EncA128CBCHS384 = "A128CBC-HS384" - EncA256CBCHS512 = "A256CBC-HS512" - EncA128GCM = "A128GCM" - EncA192GCM = "A192GCM" - EncA256GCM = "A256GCM" -) - -type JOSEHeader map[string]string - -func (j JOSEHeader) Validate() error { - if _, exists := j[HeaderKeyAlgorithm]; !exists { - return fmt.Errorf("header missing %q parameter", HeaderKeyAlgorithm) - } - - return nil -} - -func decodeHeader(seg string) (JOSEHeader, error) { - b, err := decodeSegment(seg) - if err != nil { - return nil, err - } - - var h JOSEHeader - err = json.Unmarshal(b, &h) - if err != nil { - return nil, err - } - - return h, nil -} - -func encodeHeader(h JOSEHeader) (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - - return encodeSegment(b), nil -} - -// Decode JWT specific base64url encoding with padding stripped -func decodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l != 0 { - seg += strings.Repeat("=", 4-l) - } - return base64.URLEncoding.DecodeString(seg) -} - -// Encode JWT specific base64url encoding with padding stripped -func encodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwk.go b/vendor/github.com/coreos/go-oidc/jose/jwk.go deleted file mode 100644 index b7a8e2355..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jwk.go +++ /dev/null @@ -1,135 +0,0 @@ -package jose - -import ( - "bytes" - "encoding/base64" - "encoding/binary" - "encoding/json" - "math/big" - "strings" -) - -// JSON Web Key -// https://tools.ietf.org/html/draft-ietf-jose-json-web-key-36#page-5 -type JWK struct { - ID string - Type string - Alg string - Use string - Exponent int - Modulus *big.Int - Secret []byte -} - -type jwkJSON struct { - ID string `json:"kid"` - Type string `json:"kty"` - Alg string `json:"alg"` - Use string `json:"use"` - Exponent string `json:"e"` - Modulus string `json:"n"` -} - -func (j *JWK) MarshalJSON() ([]byte, error) { - t := jwkJSON{ - ID: j.ID, - Type: j.Type, - Alg: j.Alg, - Use: j.Use, - Exponent: encodeExponent(j.Exponent), - Modulus: encodeModulus(j.Modulus), - } - - return json.Marshal(&t) -} - -func (j *JWK) UnmarshalJSON(data []byte) error { - var t jwkJSON - err := json.Unmarshal(data, &t) - if err != nil { - return err - } - - e, err := decodeExponent(t.Exponent) - if err != nil { - return err - } - - n, err := decodeModulus(t.Modulus) - if err != nil { - return err - } - - j.ID = t.ID - j.Type = t.Type - j.Alg = t.Alg - j.Use = t.Use - j.Exponent = e - j.Modulus = n - - return nil -} - -type JWKSet struct { - Keys []JWK `json:"keys"` -} - -func decodeExponent(e string) (int, error) { - decE, err := decodeBase64URLPaddingOptional(e) - if err != nil { - return 0, err - } - var eBytes []byte - if len(decE) < 8 { - eBytes = make([]byte, 8-len(decE), 8) - eBytes = append(eBytes, decE...) - } else { - eBytes = decE - } - eReader := bytes.NewReader(eBytes) - var E uint64 - err = binary.Read(eReader, binary.BigEndian, &E) - if err != nil { - return 0, err - } - return int(E), nil -} - -func encodeExponent(e int) string { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(e)) - var idx int - for ; idx < 8; idx++ { - if b[idx] != 0x0 { - break - } - } - return base64.URLEncoding.EncodeToString(b[idx:]) -} - -// Turns a URL encoded modulus of a key into a big int. -func decodeModulus(n string) (*big.Int, error) { - decN, err := decodeBase64URLPaddingOptional(n) - if err != nil { - return nil, err - } - N := big.NewInt(0) - N.SetBytes(decN) - return N, nil -} - -func encodeModulus(n *big.Int) string { - return base64.URLEncoding.EncodeToString(n.Bytes()) -} - -// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not. -// The stdlib version currently doesn't handle this. -// We can get rid of this is if this bug: -// https://github.com/golang/go/issues/4237 -// ever closes. -func decodeBase64URLPaddingOptional(e string) ([]byte, error) { - if m := len(e) % 4; m != 0 { - e += strings.Repeat("=", 4-m) - } - return base64.URLEncoding.DecodeString(e) -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jws.go b/vendor/github.com/coreos/go-oidc/jose/jws.go deleted file mode 100644 index 1049ece83..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jws.go +++ /dev/null @@ -1,51 +0,0 @@ -package jose - -import ( - "fmt" - "strings" -) - -type JWS struct { - RawHeader string - Header JOSEHeader - RawPayload string - Payload []byte - Signature []byte -} - -// Given a raw encoded JWS token parses it and verifies the structure. -func ParseJWS(raw string) (JWS, error) { - parts := strings.Split(raw, ".") - if len(parts) != 3 { - return JWS{}, fmt.Errorf("malformed JWS, only %d segments", len(parts)) - } - - rawSig := parts[2] - jws := JWS{ - RawHeader: parts[0], - RawPayload: parts[1], - } - - header, err := decodeHeader(jws.RawHeader) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode header, %s", err) - } - if err = header.Validate(); err != nil { - return JWS{}, fmt.Errorf("malformed JWS, %s", err) - } - jws.Header = header - - payload, err := decodeSegment(jws.RawPayload) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode payload: %s", err) - } - jws.Payload = payload - - sig, err := decodeSegment(rawSig) - if err != nil { - return JWS{}, fmt.Errorf("malformed JWS, unable to decode signature: %s", err) - } - jws.Signature = sig - - return jws, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/jwt.go b/vendor/github.com/coreos/go-oidc/jose/jwt.go deleted file mode 100644 index 3b3e9634b..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/jwt.go +++ /dev/null @@ -1,82 +0,0 @@ -package jose - -import "strings" - -type JWT JWS - -func ParseJWT(token string) (jwt JWT, err error) { - jws, err := ParseJWS(token) - if err != nil { - return - } - - return JWT(jws), nil -} - -func NewJWT(header JOSEHeader, claims Claims) (jwt JWT, err error) { - jwt = JWT{} - - jwt.Header = header - jwt.Header[HeaderMediaType] = "JWT" - - claimBytes, err := marshalClaims(claims) - if err != nil { - return - } - jwt.Payload = claimBytes - - eh, err := encodeHeader(header) - if err != nil { - return - } - jwt.RawHeader = eh - - ec, err := encodeClaims(claims) - if err != nil { - return - } - jwt.RawPayload = ec - - return -} - -func (j *JWT) KeyID() (string, bool) { - kID, ok := j.Header[HeaderKeyID] - return kID, ok -} - -func (j *JWT) Claims() (Claims, error) { - return decodeClaims(j.Payload) -} - -// Encoded data part of the token which may be signed. -func (j *JWT) Data() string { - return strings.Join([]string{j.RawHeader, j.RawPayload}, ".") -} - -// Full encoded JWT token string in format: header.claims.signature -func (j *JWT) Encode() string { - d := j.Data() - s := encodeSegment(j.Signature) - return strings.Join([]string{d, s}, ".") -} - -func NewSignedJWT(claims Claims, s Signer) (*JWT, error) { - header := JOSEHeader{ - HeaderKeyAlgorithm: s.Alg(), - HeaderKeyID: s.ID(), - } - - jwt, err := NewJWT(header, claims) - if err != nil { - return nil, err - } - - sig, err := s.Sign([]byte(jwt.Data())) - if err != nil { - return nil, err - } - jwt.Signature = sig - - return &jwt, nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig.go b/vendor/github.com/coreos/go-oidc/jose/sig.go deleted file mode 100755 index 7b2b253cc..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig.go +++ /dev/null @@ -1,24 +0,0 @@ -package jose - -import ( - "fmt" -) - -type Verifier interface { - ID() string - Alg() string - Verify(sig []byte, data []byte) error -} - -type Signer interface { - Verifier - Sign(data []byte) (sig []byte, err error) -} - -func NewVerifier(jwk JWK) (Verifier, error) { - if jwk.Type != "RSA" { - return nil, fmt.Errorf("unsupported key type %q", jwk.Type) - } - - return NewVerifierRSA(jwk) -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go b/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go deleted file mode 100755 index b3ca3ef3d..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig_hmac.go +++ /dev/null @@ -1,67 +0,0 @@ -package jose - -import ( - "bytes" - "crypto" - "crypto/hmac" - _ "crypto/sha256" - "errors" - "fmt" -) - -type VerifierHMAC struct { - KeyID string - Hash crypto.Hash - Secret []byte -} - -type SignerHMAC struct { - VerifierHMAC -} - -func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) { - if jwk.Alg != "" && jwk.Alg != "HS256" { - return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) - } - - v := VerifierHMAC{ - KeyID: jwk.ID, - Secret: jwk.Secret, - Hash: crypto.SHA256, - } - - return &v, nil -} - -func (v *VerifierHMAC) ID() string { - return v.KeyID -} - -func (v *VerifierHMAC) Alg() string { - return "HS256" -} - -func (v *VerifierHMAC) Verify(sig []byte, data []byte) error { - h := hmac.New(v.Hash.New, v.Secret) - h.Write(data) - if !bytes.Equal(sig, h.Sum(nil)) { - return errors.New("invalid hmac signature") - } - return nil -} - -func NewSignerHMAC(kid string, secret []byte) *SignerHMAC { - return &SignerHMAC{ - VerifierHMAC: VerifierHMAC{ - KeyID: kid, - Secret: secret, - Hash: crypto.SHA256, - }, - } -} - -func (s *SignerHMAC) Sign(data []byte) ([]byte, error) { - h := hmac.New(s.Hash.New, s.Secret) - h.Write(data) - return h.Sum(nil), nil -} diff --git a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go b/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go deleted file mode 100755 index 004e45dd8..000000000 --- a/vendor/github.com/coreos/go-oidc/jose/sig_rsa.go +++ /dev/null @@ -1,67 +0,0 @@ -package jose - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "fmt" -) - -type VerifierRSA struct { - KeyID string - Hash crypto.Hash - PublicKey rsa.PublicKey -} - -type SignerRSA struct { - PrivateKey rsa.PrivateKey - VerifierRSA -} - -func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) { - if jwk.Alg != "" && jwk.Alg != "RS256" { - return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg) - } - - v := VerifierRSA{ - KeyID: jwk.ID, - PublicKey: rsa.PublicKey{ - N: jwk.Modulus, - E: jwk.Exponent, - }, - Hash: crypto.SHA256, - } - - return &v, nil -} - -func NewSignerRSA(kid string, key rsa.PrivateKey) *SignerRSA { - return &SignerRSA{ - PrivateKey: key, - VerifierRSA: VerifierRSA{ - KeyID: kid, - PublicKey: key.PublicKey, - Hash: crypto.SHA256, - }, - } -} - -func (v *VerifierRSA) ID() string { - return v.KeyID -} - -func (v *VerifierRSA) Alg() string { - return "RS256" -} - -func (v *VerifierRSA) Verify(sig []byte, data []byte) error { - h := v.Hash.New() - h.Write(data) - return rsa.VerifyPKCS1v15(&v.PublicKey, v.Hash, h.Sum(nil), sig) -} - -func (s *SignerRSA) Sign(data []byte) ([]byte, error) { - h := s.Hash.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, &s.PrivateKey, s.Hash, h.Sum(nil)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/key.go b/vendor/github.com/coreos/go-oidc/key/key.go deleted file mode 100644 index 208c1fc14..000000000 --- a/vendor/github.com/coreos/go-oidc/key/key.go +++ /dev/null @@ -1,153 +0,0 @@ -package key - -import ( - "crypto/rand" - "crypto/rsa" - "encoding/hex" - "encoding/json" - "io" - "time" - - "github.com/coreos/go-oidc/jose" -) - -func NewPublicKey(jwk jose.JWK) *PublicKey { - return &PublicKey{jwk: jwk} -} - -type PublicKey struct { - jwk jose.JWK -} - -func (k *PublicKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&k.jwk) -} - -func (k *PublicKey) UnmarshalJSON(data []byte) error { - var jwk jose.JWK - if err := json.Unmarshal(data, &jwk); err != nil { - return err - } - k.jwk = jwk - return nil -} - -func (k *PublicKey) ID() string { - return k.jwk.ID -} - -func (k *PublicKey) Verifier() (jose.Verifier, error) { - return jose.NewVerifierRSA(k.jwk) -} - -type PrivateKey struct { - KeyID string - PrivateKey *rsa.PrivateKey -} - -func (k *PrivateKey) ID() string { - return k.KeyID -} - -func (k *PrivateKey) Signer() jose.Signer { - return jose.NewSignerRSA(k.ID(), *k.PrivateKey) -} - -func (k *PrivateKey) JWK() jose.JWK { - return jose.JWK{ - ID: k.KeyID, - Type: "RSA", - Alg: "RS256", - Use: "sig", - Exponent: k.PrivateKey.PublicKey.E, - Modulus: k.PrivateKey.PublicKey.N, - } -} - -type KeySet interface { - ExpiresAt() time.Time -} - -type PublicKeySet struct { - keys []PublicKey - index map[string]*PublicKey - expiresAt time.Time -} - -func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet { - keys := make([]PublicKey, len(jwks)) - index := make(map[string]*PublicKey) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - index[keys[i].ID()] = &keys[i] - } - return &PublicKeySet{ - keys: keys, - index: index, - expiresAt: exp, - } -} - -func (s *PublicKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PublicKeySet) Keys() []PublicKey { - return s.keys -} - -func (s *PublicKeySet) Key(id string) *PublicKey { - return s.index[id] -} - -type PrivateKeySet struct { - keys []*PrivateKey - ActiveKeyID string - expiresAt time.Time -} - -func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet { - return &PrivateKeySet{ - keys: keys, - ActiveKeyID: keys[0].ID(), - expiresAt: exp.UTC(), - } -} - -func (s *PrivateKeySet) Keys() []*PrivateKey { - return s.keys -} - -func (s *PrivateKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PrivateKeySet) Active() *PrivateKey { - for i, k := range s.keys { - if k.ID() == s.ActiveKeyID { - return s.keys[i] - } - } - - return nil -} - -type GeneratePrivateKeyFunc func() (*PrivateKey, error) - -func GeneratePrivateKey() (*PrivateKey, error) { - pk, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, err - } - keyID := make([]byte, 20) - if _, err := io.ReadFull(rand.Reader, keyID); err != nil { - return nil, err - } - - k := PrivateKey{ - KeyID: hex.EncodeToString(keyID), - PrivateKey: pk, - } - - return &k, nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/manager.go b/vendor/github.com/coreos/go-oidc/key/manager.go deleted file mode 100644 index 476ab6a8d..000000000 --- a/vendor/github.com/coreos/go-oidc/key/manager.go +++ /dev/null @@ -1,99 +0,0 @@ -package key - -import ( - "errors" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/pkg/health" -) - -type PrivateKeyManager interface { - ExpiresAt() time.Time - Signer() (jose.Signer, error) - JWKs() ([]jose.JWK, error) - PublicKeys() ([]PublicKey, error) - - WritableKeySetRepo - health.Checkable -} - -func NewPrivateKeyManager() PrivateKeyManager { - return &privateKeyManager{ - clock: clockwork.NewRealClock(), - } -} - -type privateKeyManager struct { - keySet *PrivateKeySet - clock clockwork.Clock -} - -func (m *privateKeyManager) ExpiresAt() time.Time { - if m.keySet == nil { - return m.clock.Now().UTC() - } - - return m.keySet.ExpiresAt() -} - -func (m *privateKeyManager) Signer() (jose.Signer, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - return m.keySet.Active().Signer(), nil -} - -func (m *privateKeyManager) JWKs() ([]jose.JWK, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - keys := m.keySet.Keys() - jwks := make([]jose.JWK, len(keys)) - for i, k := range keys { - jwks[i] = k.JWK() - } - return jwks, nil -} - -func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) { - jwks, err := m.JWKs() - if err != nil { - return nil, err - } - keys := make([]PublicKey, len(jwks)) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - } - return keys, nil -} - -func (m *privateKeyManager) Healthy() error { - if m.keySet == nil { - return errors.New("private key manager uninitialized") - } - - if len(m.keySet.Keys()) == 0 { - return errors.New("private key manager zero keys") - } - - if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) { - return errors.New("private key manager keys expired") - } - - return nil -} - -func (m *privateKeyManager) Set(keySet KeySet) error { - privKeySet, ok := keySet.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - - m.keySet = privKeySet - return nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/repo.go b/vendor/github.com/coreos/go-oidc/key/repo.go deleted file mode 100644 index 1acdeb361..000000000 --- a/vendor/github.com/coreos/go-oidc/key/repo.go +++ /dev/null @@ -1,55 +0,0 @@ -package key - -import ( - "errors" - "sync" -) - -var ErrorNoKeys = errors.New("no keys found") - -type WritableKeySetRepo interface { - Set(KeySet) error -} - -type ReadableKeySetRepo interface { - Get() (KeySet, error) -} - -type PrivateKeySetRepo interface { - WritableKeySetRepo - ReadableKeySetRepo -} - -func NewPrivateKeySetRepo() PrivateKeySetRepo { - return &memPrivateKeySetRepo{} -} - -type memPrivateKeySetRepo struct { - mu sync.RWMutex - pks PrivateKeySet -} - -func (r *memPrivateKeySetRepo) Set(ks KeySet) error { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } else if pks == nil { - return errors.New("nil KeySet") - } - - r.mu.Lock() - defer r.mu.Unlock() - - r.pks = *pks - return nil -} - -func (r *memPrivateKeySetRepo) Get() (KeySet, error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.pks.keys == nil { - return nil, ErrorNoKeys - } - return KeySet(&r.pks), nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/rotate.go b/vendor/github.com/coreos/go-oidc/key/rotate.go deleted file mode 100644 index 9c5508bc1..000000000 --- a/vendor/github.com/coreos/go-oidc/key/rotate.go +++ /dev/null @@ -1,165 +0,0 @@ -package key - -import ( - "errors" - "time" - - "github.com/coreos/pkg/capnslog" - ptime "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" -) - -var ( - log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "key") - - ErrorPrivateKeysExpired = errors.New("private keys have expired") -) - -func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { - return &PrivateKeyRotator{ - repo: repo, - ttl: ttl, - - keep: 2, - generateKey: GeneratePrivateKey, - clock: clockwork.NewRealClock(), - } -} - -type PrivateKeyRotator struct { - repo PrivateKeySetRepo - generateKey GeneratePrivateKeyFunc - clock clockwork.Clock - keep int - ttl time.Duration -} - -func (r *PrivateKeyRotator) expiresAt() time.Time { - return r.clock.Now().UTC().Add(r.ttl) -} - -func (r *PrivateKeyRotator) Healthy() error { - pks, err := r.privateKeySet() - if err != nil { - return err - } - - if r.clock.Now().After(pks.ExpiresAt()) { - return ErrorPrivateKeysExpired - } - - return nil -} - -func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) { - ks, err := r.repo.Get() - if err != nil { - return nil, err - } - - pks, ok := ks.(*PrivateKeySet) - if !ok { - return nil, errors.New("unable to cast to PrivateKeySet") - } - return pks, nil -} - -func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) { - pks, err := r.privateKeySet() - if err == ErrorNoKeys { - log.Infof("No keys in private key set; must rotate immediately") - return 0, nil - } - if err != nil { - return 0, err - } - - now := r.clock.Now() - - // Ideally, we want to rotate after half the TTL has elapsed. - idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2) - - // If we are past the ideal rotation time, rotate immediatly. - return max(0, idealRotationTime.Sub(now)), nil -} - -func max(a, b time.Duration) time.Duration { - if a > b { - return a - } - return b -} - -func (r *PrivateKeyRotator) Run() chan struct{} { - attempt := func() { - k, err := r.generateKey() - if err != nil { - log.Errorf("Failed generating signing key: %v", err) - return - } - - exp := r.expiresAt() - if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil { - log.Errorf("Failed key rotation: %v", err) - return - } - - log.Infof("Rotated signing keys: id=%s expiresAt=%s", k.ID(), exp) - } - - stop := make(chan struct{}) - go func() { - for { - var nextRotation time.Duration - var sleep time.Duration - var err error - for { - if nextRotation, err = r.nextRotation(); err == nil { - break - } - sleep = ptime.ExpBackoff(sleep, time.Minute) - log.Errorf("error getting nextRotation, retrying in %v: %v", sleep, err) - time.Sleep(sleep) - } - - log.Infof("will rotate keys in %v", nextRotation) - select { - case <-r.clock.After(nextRotation): - attempt() - case <-stop: - return - } - } - }() - - return stop -} - -func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error { - ks, err := repo.Get() - if err != nil && err != ErrorNoKeys { - return err - } - - var keys []*PrivateKey - if ks != nil { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - keys = pks.Keys() - } - - keys = append([]*PrivateKey{k}, keys...) - if l := len(keys); l > keep { - keys = keys[0:keep] - } - - nks := PrivateKeySet{ - keys: keys, - ActiveKeyID: k.ID(), - expiresAt: exp, - } - - return repo.Set(KeySet(&nks)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/sync.go b/vendor/github.com/coreos/go-oidc/key/sync.go deleted file mode 100644 index e8d5d03d8..000000000 --- a/vendor/github.com/coreos/go-oidc/key/sync.go +++ /dev/null @@ -1,91 +0,0 @@ -package key - -import ( - "errors" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/pkg/timeutil" -) - -func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { - return &KeySetSyncer{ - readable: r, - writable: w, - clock: clockwork.NewRealClock(), - } -} - -type KeySetSyncer struct { - readable ReadableKeySetRepo - writable WritableKeySetRepo - clock clockwork.Clock -} - -func (s *KeySetSyncer) Run() chan struct{} { - stop := make(chan struct{}) - go func() { - var failing bool - var next time.Duration - for { - exp, err := syncKeySet(s.readable, s.writable, s.clock) - if err != nil || exp == 0 { - if !failing { - failing = true - next = time.Second - } else { - next = timeutil.ExpBackoff(next, time.Minute) - } - if exp == 0 { - log.Errorf("Synced to already expired key set, retrying in %v: %v", next, err) - - } else { - log.Errorf("Failed syncing key set, retrying in %v: %v", next, err) - } - } else { - failing = false - next = exp / 2 - log.Infof("Synced key set, checking again in %v", next) - } - - select { - case <-s.clock.After(next): - continue - case <-stop: - return - } - } - }() - - return stop -} - -func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) { - return syncKeySet(r, w, clockwork.NewRealClock()) -} - -// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire. -// If keyset has already expired, returns a zero duration. -func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) { - var ks KeySet - ks, err = r.Get() - if err != nil { - return - } - - if ks == nil { - err = errors.New("no source KeySet") - return - } - - if err = w.Set(ks); err != nil { - return - } - - now := clock.Now() - if ks.ExpiresAt().After(now) { - exp = ks.ExpiresAt().Sub(now) - } - return -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/error.go b/vendor/github.com/coreos/go-oidc/oauth2/error.go deleted file mode 100644 index 50d890949..000000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/error.go +++ /dev/null @@ -1,29 +0,0 @@ -package oauth2 - -const ( - ErrorAccessDenied = "access_denied" - ErrorInvalidClient = "invalid_client" - ErrorInvalidGrant = "invalid_grant" - ErrorInvalidRequest = "invalid_request" - ErrorServerError = "server_error" - ErrorUnauthorizedClient = "unauthorized_client" - ErrorUnsupportedGrantType = "unsupported_grant_type" - ErrorUnsupportedResponseType = "unsupported_response_type" -) - -type Error struct { - Type string `json:"error"` - Description string `json:"error_description,omitempty"` - State string `json:"state,omitempty"` -} - -func (e *Error) Error() string { - if e.Description != "" { - return e.Type + ": " + e.Description - } - return e.Type -} - -func NewError(typ string) *Error { - return &Error{Type: typ} -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go deleted file mode 100644 index 72d1d6715..000000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go +++ /dev/null @@ -1,416 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "mime" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - phttp "github.com/coreos/go-oidc/http" -) - -// ResponseTypesEqual compares two response_type values. If either -// contains a space, it is treated as an unordered list. For example, -// comparing "code id_token" and "id_token code" would evaluate to true. -func ResponseTypesEqual(r1, r2 string) bool { - if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") { - // fast route, no split needed - return r1 == r2 - } - - // split, sort, and compare - r1Fields := strings.Fields(r1) - r2Fields := strings.Fields(r2) - if len(r1Fields) != len(r2Fields) { - return false - } - sort.Strings(r1Fields) - sort.Strings(r2Fields) - for i, r1Field := range r1Fields { - if r1Field != r2Fields[i] { - return false - } - } - return true -} - -const ( - // OAuth2.0 response types registered by OIDC. - // - // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents - ResponseTypeCode = "code" - ResponseTypeCodeIDToken = "code id_token" - ResponseTypeCodeIDTokenToken = "code id_token token" - ResponseTypeIDToken = "id_token" - ResponseTypeIDTokenToken = "id_token token" - ResponseTypeToken = "token" - ResponseTypeNone = "none" -) - -const ( - GrantTypeAuthCode = "authorization_code" - GrantTypeClientCreds = "client_credentials" - GrantTypeUserCreds = "password" - GrantTypeImplicit = "implicit" - GrantTypeRefreshToken = "refresh_token" - - AuthMethodClientSecretPost = "client_secret_post" - AuthMethodClientSecretBasic = "client_secret_basic" - AuthMethodClientSecretJWT = "client_secret_jwt" - AuthMethodPrivateKeyJWT = "private_key_jwt" -) - -type Config struct { - Credentials ClientCredentials - Scope []string - RedirectURL string - AuthURL string - TokenURL string - - // Must be one of the AuthMethodXXX methods above. Right now, only - // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported. - AuthMethod string -} - -type Client struct { - hc phttp.Client - creds ClientCredentials - scope []string - authURL *url.URL - redirectURL *url.URL - tokenURL *url.URL - authMethod string -} - -type ClientCredentials struct { - ID string - Secret string -} - -func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) { - if len(cfg.Credentials.ID) == 0 { - err = errors.New("missing client id") - return - } - - if len(cfg.Credentials.Secret) == 0 { - err = errors.New("missing client secret") - return - } - - if cfg.AuthMethod == "" { - cfg.AuthMethod = AuthMethodClientSecretBasic - } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic { - err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod) - return - } - - au, err := phttp.ParseNonEmptyURL(cfg.AuthURL) - if err != nil { - return - } - - tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL) - if err != nil { - return - } - - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return - } - - c = &Client{ - creds: cfg.Credentials, - scope: cfg.Scope, - redirectURL: ru, - authURL: au, - tokenURL: tu, - hc: hc, - authMethod: cfg.AuthMethod, - } - - return -} - -// Return the embedded HTTP client -func (c *Client) HttpClient() phttp.Client { - return c.hc -} - -// Generate the url for initial redirect to oauth provider. -func (c *Client) AuthCodeURL(state, accessType, prompt string) string { - v := c.commonURLValues() - v.Set("state", state) - if strings.ToLower(accessType) == "offline" { - v.Set("access_type", "offline") - } - - if prompt != "" { - v.Set("prompt", prompt) - } - v.Set("response_type", "code") - - q := v.Encode() - u := *c.authURL - if u.RawQuery == "" { - u.RawQuery = q - } else { - u.RawQuery += "&" + q - } - return u.String() -} - -func (c *Client) commonURLValues() url.Values { - return url.Values{ - "redirect_uri": {c.redirectURL.String()}, - "scope": {strings.Join(c.scope, " ")}, - "client_id": {c.creds.ID}, - } -} - -func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) { - var req *http.Request - var err error - switch c.authMethod { - case AuthMethodClientSecretPost: - values.Set("client_secret", c.creds.Secret) - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - case AuthMethodClientSecretBasic: - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - encodedID := url.QueryEscape(c.creds.ID) - encodedSecret := url.QueryEscape(c.creds.Secret) - req.SetBasicAuth(encodedID, encodedSecret) - default: - panic("misconfigured client: auth method not supported") - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req, nil - -} - -// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type. -// May not be supported by all OAuth2 servers. -func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(scope, " ")}, - "grant_type": {GrantTypeClientCreds}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type -// May not be supported by all OAuth2 servers. -func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(c.scope, " ")}, - "grant_type": {GrantTypeUserCreds}, - "username": {username}, - "password": {password}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// RequestToken requests a token from the Token Endpoint with the specified grantType. -// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code. -// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token. -func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) { - v := c.commonURLValues() - - v.Set("grant_type", grantType) - v.Set("client_secret", c.creds.Secret) - switch grantType { - case GrantTypeAuthCode: - v.Set("code", value) - case GrantTypeRefreshToken: - v.Set("refresh_token", value) - default: - err = fmt.Errorf("unsupported grant_type: %v", grantType) - return - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299 - - contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return - } - - result = TokenResponse{ - RawBody: body, - } - - newError := func(typ, desc, state string) error { - if typ == "" { - return fmt.Errorf("unrecognized error %s", body) - } - return &Error{typ, desc, state} - } - - if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" { - var vals url.Values - vals, err = url.ParseQuery(string(body)) - if err != nil { - return - } - if error := vals.Get("error"); error != "" || badStatusCode { - err = newError(error, vals.Get("error_description"), vals.Get("state")) - return - } - e := vals.Get("expires_in") - if e == "" { - e = vals.Get("expires") - } - if e != "" { - result.Expires, err = strconv.Atoi(e) - if err != nil { - return - } - } - result.AccessToken = vals.Get("access_token") - result.TokenType = vals.Get("token_type") - result.IDToken = vals.Get("id_token") - result.RefreshToken = vals.Get("refresh_token") - result.Scope = vals.Get("scope") - } else { - var r struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - RefreshToken string `json:"refresh_token"` - Scope string `json:"scope"` - State string `json:"state"` - ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string - Expires int `json:"expires"` - Error string `json:"error"` - Desc string `json:"error_description"` - } - if err = json.Unmarshal(body, &r); err != nil { - return - } - if r.Error != "" || badStatusCode { - err = newError(r.Error, r.Desc, r.State) - return - } - result.AccessToken = r.AccessToken - result.TokenType = r.TokenType - result.IDToken = r.IDToken - result.RefreshToken = r.RefreshToken - result.Scope = r.Scope - if expiresIn, err := r.ExpiresIn.Int64(); err != nil { - result.Expires = r.Expires - } else { - result.Expires = int(expiresIn) - } - } - return -} - -type TokenResponse struct { - AccessToken string - TokenType string - Expires int - IDToken string - RefreshToken string // OPTIONAL. - Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED. - RawBody []byte // In case callers need some other non-standard info from the token response -} - -type AuthCodeRequest struct { - ResponseType string - ClientID string - RedirectURL *url.URL - Scope []string - State string -} - -func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) { - acr := AuthCodeRequest{ - ResponseType: q.Get("response_type"), - ClientID: q.Get("client_id"), - State: q.Get("state"), - Scope: make([]string, 0), - } - - qs := strings.TrimSpace(q.Get("scope")) - if qs != "" { - acr.Scope = strings.Split(qs, " ") - } - - err := func() error { - if acr.ClientID == "" { - return NewError(ErrorInvalidRequest) - } - - redirectURL := q.Get("redirect_uri") - if redirectURL != "" { - ru, err := url.Parse(redirectURL) - if err != nil { - return NewError(ErrorInvalidRequest) - } - acr.RedirectURL = ru - } - - return nil - }() - - return acr, err -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/client.go b/vendor/github.com/coreos/go-oidc/oidc/client.go deleted file mode 100644 index 7a3cb40f6..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/client.go +++ /dev/null @@ -1,846 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/mail" - "net/url" - "sync" - "time" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" - "github.com/coreos/go-oidc/oauth2" -) - -const ( - // amount of time that must pass after the last key sync - // completes before another attempt may begin - keySyncWindow = 5 * time.Second -) - -var ( - DefaultScope = []string{"openid", "email", "profile"} - - supportedAuthMethods = map[string]struct{}{ - oauth2.AuthMethodClientSecretBasic: struct{}{}, - oauth2.AuthMethodClientSecretPost: struct{}{}, - } -) - -type ClientCredentials oauth2.ClientCredentials - -type ClientIdentity struct { - Credentials ClientCredentials - Metadata ClientMetadata -} - -type JWAOptions struct { - // SigningAlg specifies an JWA alg for signing JWTs. - // - // Specifying this field implies different actions depending on the context. It may - // require objects be serialized and signed as a JWT instead of plain JSON, or - // require an existing JWT object use the specified alg. - // - // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata - SigningAlg string - // EncryptionAlg, if provided, specifies that the returned or sent object be stored - // (or nested) within a JWT object and encrypted with the provided JWA alg. - EncryptionAlg string - // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If - // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults - // to A128CBC-HS256. - // - // If EncryptionEnc is provided EncryptionAlg must also be specified. - EncryptionEnc string -} - -func (opt JWAOptions) valid() error { - if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" { - return errors.New("encryption encoding provided with no encryption algorithm") - } - return nil -} - -func (opt JWAOptions) defaults() JWAOptions { - if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" { - opt.EncryptionEnc = jose.EncA128CBCHS256 - } - return opt -} - -var ( - // Ensure ClientMetadata satisfies these interfaces. - _ json.Marshaler = &ClientMetadata{} - _ json.Unmarshaler = &ClientMetadata{} -) - -// ClientMetadata holds metadata that the authorization server associates -// with a client identifier. The fields range from human-facing display -// strings such as client name, to items that impact the security of the -// protocol, such as the list of valid redirect URIs. -// -// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata -// -// TODO: support language specific claim representations -// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts -type ClientMetadata struct { - RedirectURIs []url.URL // Required - - // A list of OAuth 2.0 "response_type" values that the client wishes to restrict - // itself to. Either "code", "token", or another registered extension. - // - // If omitted, only "code" will be used. - ResponseTypes []string - // A list of OAuth 2.0 grant types the client wishes to restrict itself to. - // The grant type values used by OIDC are "authorization_code", "implicit", - // and "refresh_token". - // - // If ommitted, only "authorization_code" will be used. - GrantTypes []string - // "native" or "web". If omitted, "web". - ApplicationType string - - // List of email addresses. - Contacts []mail.Address - // Name of client to be presented to the end-user. - ClientName string - // URL that references a logo for the Client application. - LogoURI *url.URL - // URL of the home page of the Client. - ClientURI *url.URL - // Profile data policies and terms of use to be provided to the end user. - PolicyURI *url.URL - TermsOfServiceURI *url.URL - - // URL to or the value of the client's JSON Web Key Set document. - JWKSURI *url.URL - JWKS *jose.JWKSet - - // URL referencing a flie with a single JSON array of redirect URIs. - SectorIdentifierURI *url.URL - - SubjectType string - - // Options to restrict the JWS alg and enc values used for server responses and requests. - IDTokenResponseOptions JWAOptions - UserInfoResponseOptions JWAOptions - RequestObjectOptions JWAOptions - - // Client requested authorization method and signing options for the token endpoint. - // - // Defaults to "client_secret_basic" - TokenEndpointAuthMethod string - TokenEndpointAuthSigningAlg string - - // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized - // user must reauthroize. - // - // If 0, no limitation is placed on the maximum. - DefaultMaxAge int64 - // RequireAuthTime specifies if the auth_time claim in the ID token is required. - RequireAuthTime bool - - // Default Authentication Context Class Reference values for authentication requests. - DefaultACRValues []string - - // URI that a third party can use to initiate a login by the relaying party. - // - // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin - InitiateLoginURI *url.URL - // Pre-registered request_uri values that may be cached by the server. - RequestURIs []url.URL -} - -// Defaults returns a shallow copy of ClientMetadata with default -// values replacing omitted fields. -func (m ClientMetadata) Defaults() ClientMetadata { - if len(m.ResponseTypes) == 0 { - m.ResponseTypes = []string{oauth2.ResponseTypeCode} - } - if len(m.GrantTypes) == 0 { - m.GrantTypes = []string{oauth2.GrantTypeAuthCode} - } - if m.ApplicationType == "" { - m.ApplicationType = "web" - } - if m.TokenEndpointAuthMethod == "" { - m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic - } - m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults() - m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults() - m.RequestObjectOptions = m.RequestObjectOptions.defaults() - return m -} - -func (m *ClientMetadata) MarshalJSON() ([]byte, error) { - e := m.toEncodableStruct() - return json.Marshal(&e) -} - -func (m *ClientMetadata) UnmarshalJSON(data []byte) error { - var e encodableClientMetadata - if err := json.Unmarshal(data, &e); err != nil { - return err - } - meta, err := e.toStruct() - if err != nil { - return err - } - if err := meta.Valid(); err != nil { - return err - } - *m = meta - return nil -} - -type encodableClientMetadata struct { - RedirectURIs []string `json:"redirect_uris"` // Required - ResponseTypes []string `json:"response_types,omitempty"` - GrantTypes []string `json:"grant_types,omitempty"` - ApplicationType string `json:"application_type,omitempty"` - Contacts []string `json:"contacts,omitempty"` - ClientName string `json:"client_name,omitempty"` - LogoURI string `json:"logo_uri,omitempty"` - ClientURI string `json:"client_uri,omitempty"` - PolicyURI string `json:"policy_uri,omitempty"` - TermsOfServiceURI string `json:"tos_uri,omitempty"` - JWKSURI string `json:"jwks_uri,omitempty"` - JWKS *jose.JWKSet `json:"jwks,omitempty"` - SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"` - SubjectType string `json:"subject_type,omitempty"` - IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"` - IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"` - IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"` - UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"` - UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"` - UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"` - RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"` - RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"` - RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"` - TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"` - TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"` - DefaultMaxAge int64 `json:"default_max_age,omitempty"` - RequireAuthTime bool `json:"require_auth_time,omitempty"` - DefaultACRValues []string `json:"default_acr_values,omitempty"` - InitiateLoginURI string `json:"initiate_login_uri,omitempty"` - RequestURIs []string `json:"request_uris,omitempty"` -} - -func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) { - p := stickyErrParser{} - m := ClientMetadata{ - RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"), - ResponseTypes: c.ResponseTypes, - GrantTypes: c.GrantTypes, - ApplicationType: c.ApplicationType, - Contacts: p.parseEmails(c.Contacts, "contacts"), - ClientName: c.ClientName, - LogoURI: p.parseURI(c.LogoURI, "logo_uri"), - ClientURI: p.parseURI(c.ClientURI, "client_uri"), - PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"), - TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"), - JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"), - JWKS: c.JWKS, - SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"), - SubjectType: c.SubjectType, - TokenEndpointAuthMethod: c.TokenEndpointAuthMethod, - TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg, - DefaultMaxAge: c.DefaultMaxAge, - RequireAuthTime: c.RequireAuthTime, - DefaultACRValues: c.DefaultACRValues, - InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"), - RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"), - IDTokenResponseOptions: JWAOptions{ - c.IDTokenSignedResponseAlg, - c.IDTokenEncryptedResponseAlg, - c.IDTokenEncryptedResponseEnc, - }, - UserInfoResponseOptions: JWAOptions{ - c.UserInfoSignedResponseAlg, - c.UserInfoEncryptedResponseAlg, - c.UserInfoEncryptedResponseEnc, - }, - RequestObjectOptions: JWAOptions{ - c.RequestObjectSigningAlg, - c.RequestObjectEncryptionAlg, - c.RequestObjectEncryptionEnc, - }, - } - if p.firstErr != nil { - return ClientMetadata{}, p.firstErr - } - return m, nil -} - -// stickyErrParser parses URIs and email addresses. Once it encounters -// a parse error, subsequent calls become no-op. -type stickyErrParser struct { - firstErr error -} - -func (p *stickyErrParser) parseURI(s, field string) *url.URL { - if p.firstErr != nil || s == "" { - return nil - } - u, err := url.Parse(s) - if err == nil { - if u.Host == "" { - err = errors.New("no host in URI") - } else if u.Scheme != "http" && u.Scheme != "https" { - err = errors.New("invalid URI scheme") - } - } - if err != nil { - p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err) - return nil - } - return u -} - -func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL { - if p.firstErr != nil || len(s) == 0 { - return nil - } - uris := make([]url.URL, len(s)) - for i, val := range s { - if val == "" { - p.firstErr = fmt.Errorf("invalid URI in field %s", field) - return nil - } - if u := p.parseURI(val, field); u != nil { - uris[i] = *u - } - } - return uris -} - -func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address { - if p.firstErr != nil || len(s) == 0 { - return nil - } - addrs := make([]mail.Address, len(s)) - for i, addr := range s { - if addr == "" { - p.firstErr = fmt.Errorf("invalid email in field %s", field) - return nil - } - a, err := mail.ParseAddress(addr) - if err != nil { - p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err) - return nil - } - addrs[i] = *a - } - return addrs -} - -func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata { - return encodableClientMetadata{ - RedirectURIs: urisToStrings(m.RedirectURIs), - ResponseTypes: m.ResponseTypes, - GrantTypes: m.GrantTypes, - ApplicationType: m.ApplicationType, - Contacts: emailsToStrings(m.Contacts), - ClientName: m.ClientName, - LogoURI: uriToString(m.LogoURI), - ClientURI: uriToString(m.ClientURI), - PolicyURI: uriToString(m.PolicyURI), - TermsOfServiceURI: uriToString(m.TermsOfServiceURI), - JWKSURI: uriToString(m.JWKSURI), - JWKS: m.JWKS, - SectorIdentifierURI: uriToString(m.SectorIdentifierURI), - SubjectType: m.SubjectType, - IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg, - IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg, - IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc, - UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg, - UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg, - UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc, - RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg, - RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg, - RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc, - TokenEndpointAuthMethod: m.TokenEndpointAuthMethod, - TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg, - DefaultMaxAge: m.DefaultMaxAge, - RequireAuthTime: m.RequireAuthTime, - DefaultACRValues: m.DefaultACRValues, - InitiateLoginURI: uriToString(m.InitiateLoginURI), - RequestURIs: urisToStrings(m.RequestURIs), - } -} - -func uriToString(u *url.URL) string { - if u == nil { - return "" - } - return u.String() -} - -func urisToStrings(urls []url.URL) []string { - if len(urls) == 0 { - return nil - } - sli := make([]string, len(urls)) - for i, u := range urls { - sli[i] = u.String() - } - return sli -} - -func emailsToStrings(addrs []mail.Address) []string { - if len(addrs) == 0 { - return nil - } - sli := make([]string, len(addrs)) - for i, addr := range addrs { - sli[i] = addr.String() - } - return sli -} - -// Valid determines if a ClientMetadata conforms with the OIDC specification. -// -// Valid is called by UnmarshalJSON. -// -// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for -// URLs fields where the OIDC spec requires it. This may change in future releases -// of this package. See: https://github.com/coreos/go-oidc/issues/34 -func (m *ClientMetadata) Valid() error { - if len(m.RedirectURIs) == 0 { - return errors.New("zero redirect URLs") - } - - validURI := func(u *url.URL, fieldName string) error { - if u.Host == "" { - return fmt.Errorf("no host for uri field %s", fieldName) - } - if u.Scheme != "http" && u.Scheme != "https" { - return fmt.Errorf("uri field %s scheme is not http or https", fieldName) - } - return nil - } - - uris := []struct { - val *url.URL - name string - }{ - {m.LogoURI, "logo_uri"}, - {m.ClientURI, "client_uri"}, - {m.PolicyURI, "policy_uri"}, - {m.TermsOfServiceURI, "tos_uri"}, - {m.JWKSURI, "jwks_uri"}, - {m.SectorIdentifierURI, "sector_identifier_uri"}, - {m.InitiateLoginURI, "initiate_login_uri"}, - } - - for _, uri := range uris { - if uri.val == nil { - continue - } - if err := validURI(uri.val, uri.name); err != nil { - return err - } - } - - uriLists := []struct { - vals []url.URL - name string - }{ - {m.RedirectURIs, "redirect_uris"}, - {m.RequestURIs, "request_uris"}, - } - for _, list := range uriLists { - for _, uri := range list.vals { - if err := validURI(&uri, list.name); err != nil { - return err - } - } - } - - options := []struct { - option JWAOptions - name string - }{ - {m.IDTokenResponseOptions, "id_token response"}, - {m.UserInfoResponseOptions, "userinfo response"}, - {m.RequestObjectOptions, "request_object"}, - } - for _, option := range options { - if err := option.option.valid(); err != nil { - return fmt.Errorf("invalid JWA values for %s: %v", option.name, err) - } - } - return nil -} - -type ClientRegistrationResponse struct { - ClientID string // Required - ClientSecret string - RegistrationAccessToken string - RegistrationClientURI string - // If IsZero is true, unspecified. - ClientIDIssuedAt time.Time - // Time at which the client_secret will expire. - // If IsZero is true, it will not expire. - ClientSecretExpiresAt time.Time - - ClientMetadata -} - -type encodableClientRegistrationResponse struct { - ClientID string `json:"client_id"` // Required - ClientSecret string `json:"client_secret,omitempty"` - RegistrationAccessToken string `json:"registration_access_token,omitempty"` - RegistrationClientURI string `json:"registration_client_uri,omitempty"` - ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"` - // Time at which the client_secret will expire, in seconds since the epoch. - // If 0 it will not expire. - ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required - - encodableClientMetadata -} - -func unixToSec(t time.Time) int64 { - if t.IsZero() { - return 0 - } - return t.Unix() -} - -func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) { - e := encodableClientRegistrationResponse{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RegistrationAccessToken: c.RegistrationAccessToken, - RegistrationClientURI: c.RegistrationClientURI, - ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt), - ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt), - encodableClientMetadata: c.ClientMetadata.toEncodableStruct(), - } - return json.Marshal(&e) -} - -func secToUnix(sec int64) time.Time { - if sec == 0 { - return time.Time{} - } - return time.Unix(sec, 0) -} - -func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error { - var e encodableClientRegistrationResponse - if err := json.Unmarshal(data, &e); err != nil { - return err - } - if e.ClientID == "" { - return errors.New("no client_id in client registration response") - } - metadata, err := e.encodableClientMetadata.toStruct() - if err != nil { - return err - } - *c = ClientRegistrationResponse{ - ClientID: e.ClientID, - ClientSecret: e.ClientSecret, - RegistrationAccessToken: e.RegistrationAccessToken, - RegistrationClientURI: e.RegistrationClientURI, - ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt), - ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt), - ClientMetadata: metadata, - } - return nil -} - -type ClientConfig struct { - HTTPClient phttp.Client - Credentials ClientCredentials - Scope []string - RedirectURL string - ProviderConfig ProviderConfig - KeySet key.PublicKeySet -} - -func NewClient(cfg ClientConfig) (*Client, error) { - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return nil, fmt.Errorf("invalid redirect URL: %v", err) - } - - c := Client{ - credentials: cfg.Credentials, - httpClient: cfg.HTTPClient, - scope: cfg.Scope, - redirectURL: ru.String(), - providerConfig: newProviderConfigRepo(cfg.ProviderConfig), - keySet: cfg.KeySet, - } - - if c.httpClient == nil { - c.httpClient = http.DefaultClient - } - - if c.scope == nil { - c.scope = make([]string, len(DefaultScope)) - copy(c.scope, DefaultScope) - } - - return &c, nil -} - -type Client struct { - httpClient phttp.Client - providerConfig *providerConfigRepo - credentials ClientCredentials - redirectURL string - scope []string - keySet key.PublicKeySet - providerSyncer *ProviderConfigSyncer - - keySetSyncMutex sync.RWMutex - lastKeySetSync time.Time -} - -func (c *Client) Healthy() error { - now := time.Now().UTC() - - cfg := c.providerConfig.Get() - - if cfg.Empty() { - return errors.New("oidc client provider config empty") - } - - if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) { - return errors.New("oidc client provider config expired") - } - - return nil -} - -func (c *Client) OAuthClient() (*oauth2.Client, error) { - cfg := c.providerConfig.Get() - authMethod, err := chooseAuthMethod(cfg) - if err != nil { - return nil, err - } - - ocfg := oauth2.Config{ - Credentials: oauth2.ClientCredentials(c.credentials), - RedirectURL: c.redirectURL, - AuthURL: cfg.AuthEndpoint.String(), - TokenURL: cfg.TokenEndpoint.String(), - Scope: c.scope, - AuthMethod: authMethod, - } - - return oauth2.NewClient(c.httpClient, ocfg) -} - -func chooseAuthMethod(cfg ProviderConfig) (string, error) { - if len(cfg.TokenEndpointAuthMethodsSupported) == 0 { - return oauth2.AuthMethodClientSecretBasic, nil - } - - for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported { - if _, ok := supportedAuthMethods[authMethod]; ok { - return authMethod, nil - } - } - - return "", errors.New("no supported auth methods") -} - -// SyncProviderConfig starts the provider config syncer -func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} { - r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL) - s := NewProviderConfigSyncer(r, c.providerConfig) - stop := s.Run() - s.WaitUntilInitialSync() - return stop -} - -func (c *Client) maybeSyncKeys() error { - tooSoon := func() bool { - return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow)) - } - - // ignore request to sync keys if a sync operation has been - // attempted too recently - if tooSoon() { - return nil - } - - c.keySetSyncMutex.Lock() - defer c.keySetSyncMutex.Unlock() - - // check again, as another goroutine may have been holding - // the lock while updating the keys - if tooSoon() { - return nil - } - - cfg := c.providerConfig.Get() - r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String()) - w := &clientKeyRepo{client: c} - _, err := key.Sync(r, w) - c.lastKeySetSync = time.Now().UTC() - - return err -} - -type clientKeyRepo struct { - client *Client -} - -func (r *clientKeyRepo) Set(ks key.KeySet) error { - pks, ok := ks.(*key.PublicKeySet) - if !ok { - return errors.New("unable to cast to PublicKey") - } - r.client.keySet = *pks - return nil -} - -func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) { - cfg := c.providerConfig.Get() - - if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) { - return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds) - } - - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.ClientCredsToken(scope) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token. -func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) { - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token. -func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) { - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -func (c *Client) VerifyJWT(jwt jose.JWT) error { - var keysFunc func() []key.PublicKey - if kID, ok := jwt.KeyID(); ok { - keysFunc = c.keysFuncWithID(kID) - } else { - keysFunc = c.keysFuncAll() - } - - v := NewJWTVerifier( - c.providerConfig.Get().Issuer.String(), - c.credentials.ID, - c.maybeSyncKeys, keysFunc) - - return v.Verify(jwt) -} - -// keysFuncWithID returns a function that retrieves at most unexpired -// public key from the Client that matches the provided ID -func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey { - return func() []key.PublicKey { - c.keySetSyncMutex.RLock() - defer c.keySetSyncMutex.RUnlock() - - if c.keySet.ExpiresAt().Before(time.Now()) { - return []key.PublicKey{} - } - - k := c.keySet.Key(kID) - if k == nil { - return []key.PublicKey{} - } - - return []key.PublicKey{*k} - } -} - -// keysFuncAll returns a function that retrieves all unexpired public -// keys from the Client -func (c *Client) keysFuncAll() func() []key.PublicKey { - return func() []key.PublicKey { - c.keySetSyncMutex.RLock() - defer c.keySetSyncMutex.RUnlock() - - if c.keySet.ExpiresAt().Before(time.Now()) { - return []key.PublicKey{} - } - - return c.keySet.Keys() - } -} - -type providerConfigRepo struct { - mu sync.RWMutex - config ProviderConfig // do not access directly, use Get() -} - -func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo { - return &providerConfigRepo{sync.RWMutex{}, pc} -} - -// returns an error to implement ProviderConfigSetter -func (r *providerConfigRepo) Set(cfg ProviderConfig) error { - r.mu.Lock() - defer r.mu.Unlock() - r.config = cfg - return nil -} - -func (r *providerConfigRepo) Get() ProviderConfig { - r.mu.RLock() - defer r.mu.RUnlock() - return r.config -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/identity.go b/vendor/github.com/coreos/go-oidc/oidc/identity.go deleted file mode 100644 index 9bfa8e343..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/identity.go +++ /dev/null @@ -1,44 +0,0 @@ -package oidc - -import ( - "errors" - "time" - - "github.com/coreos/go-oidc/jose" -) - -type Identity struct { - ID string - Name string - Email string - ExpiresAt time.Time -} - -func IdentityFromClaims(claims jose.Claims) (*Identity, error) { - if claims == nil { - return nil, errors.New("nil claim set") - } - - var ident Identity - var err error - var ok bool - - if ident.ID, ok, err = claims.StringClaim("sub"); err != nil { - return nil, err - } else if !ok { - return nil, errors.New("missing required claim: sub") - } - - if ident.Email, _, err = claims.StringClaim("email"); err != nil { - return nil, err - } - - exp, ok, err := claims.TimeClaim("exp") - if err != nil { - return nil, err - } else if ok { - ident.ExpiresAt = exp - } - - return &ident, nil -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/interface.go b/vendor/github.com/coreos/go-oidc/oidc/interface.go deleted file mode 100644 index 248cac0b4..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/interface.go +++ /dev/null @@ -1,3 +0,0 @@ -package oidc - -type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error) diff --git a/vendor/github.com/coreos/go-oidc/oidc/key.go b/vendor/github.com/coreos/go-oidc/oidc/key.go deleted file mode 100755 index 82a0f567d..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/key.go +++ /dev/null @@ -1,67 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "net/http" - "time" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" -) - -// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no -// Cache-Control header is provided by the JWK Set document endpoint. -const DefaultPublicKeySetTTL = 24 * time.Hour - -// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document. -func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo { - return &remotePublicKeyRepo{hc: hc, ep: ep} -} - -type remotePublicKeyRepo struct { - hc phttp.Client - ep string -} - -// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL -// is set on the Key Set to avoid it having to be re-retrieved for every -// encryption event. This TTL is typically controlled by the endpoint returning -// a Cache-Control header, but defaults to 24 hours if no Cache-Control header -// is found. -func (r *remotePublicKeyRepo) Get() (key.KeySet, error) { - req, err := http.NewRequest("GET", r.ep, nil) - if err != nil { - return nil, err - } - - resp, err := r.hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var d struct { - Keys []jose.JWK `json:"keys"` - } - if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { - return nil, err - } - - if len(d.Keys) == 0 { - return nil, errors.New("zero keys in response") - } - - ttl, ok, err := phttp.Cacheable(resp.Header) - if err != nil { - return nil, err - } - if !ok { - ttl = DefaultPublicKeySetTTL - } - - exp := time.Now().UTC().Add(ttl) - ks := key.NewPublicKeySet(d.Keys, exp) - return ks, nil -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/provider.go b/vendor/github.com/coreos/go-oidc/oidc/provider.go deleted file mode 100644 index cd67baa45..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/provider.go +++ /dev/null @@ -1,698 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/coreos/pkg/capnslog" - "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/oauth2" -) - -var ( - log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http") -) - -const ( - // Subject Identifier types defined by the OIDC spec. Specifies if the provider - // should provide the same sub claim value to all clients (public) or a unique - // value for each client (pairwise). - // - // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes - SubjectTypePublic = "public" - SubjectTypePairwise = "pairwise" -) - -var ( - // Default values for omitted provider config fields. - // - // Use ProviderConfig's Defaults method to fill a provider config with these values. - DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit} - DefaultResponseModesSupported = []string{"query", "fragment"} - DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic} - DefaultClaimTypesSupported = []string{"normal"} -) - -const ( - MaximumProviderConfigSyncInterval = 24 * time.Hour - MinimumProviderConfigSyncInterval = time.Minute - - discoveryConfigPath = "/.well-known/openid-configuration" -) - -// internally configurable for tests -var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval - -var ( - // Ensure ProviderConfig satisfies these interfaces. - _ json.Marshaler = &ProviderConfig{} - _ json.Unmarshaler = &ProviderConfig{} -) - -// ProviderConfig represents the OpenID Provider Metadata specifying what -// configurations a provider supports. -// -// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata -type ProviderConfig struct { - Issuer *url.URL // Required - AuthEndpoint *url.URL // Required - TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported - UserInfoEndpoint *url.URL - KeysEndpoint *url.URL // Required - RegistrationEndpoint *url.URL - EndSessionEndpoint *url.URL - CheckSessionIFrame *url.URL - - // Servers MAY choose not to advertise some supported scope values even when this - // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported. - ScopesSupported []string - // OAuth2.0 response types supported. - ResponseTypesSupported []string // Required - // OAuth2.0 response modes supported. - // - // If omitted, defaults to DefaultResponseModesSupported. - ResponseModesSupported []string - // OAuth2.0 grant types supported. - // - // If omitted, defaults to DefaultGrantTypesSupported. - GrantTypesSupported []string - ACRValuesSupported []string - // SubjectTypesSupported specifies strategies for providing values for the sub claim. - SubjectTypesSupported []string // Required - - // JWA signing and encryption algorith values supported for ID tokens. - IDTokenSigningAlgValues []string // Required - IDTokenEncryptionAlgValues []string - IDTokenEncryptionEncValues []string - - // JWA signing and encryption algorith values supported for user info responses. - UserInfoSigningAlgValues []string - UserInfoEncryptionAlgValues []string - UserInfoEncryptionEncValues []string - - // JWA signing and encryption algorith values supported for request objects. - ReqObjSigningAlgValues []string - ReqObjEncryptionAlgValues []string - ReqObjEncryptionEncValues []string - - TokenEndpointAuthMethodsSupported []string - TokenEndpointAuthSigningAlgValuesSupported []string - DisplayValuesSupported []string - ClaimTypesSupported []string - ClaimsSupported []string - ServiceDocs *url.URL - ClaimsLocalsSupported []string - UILocalsSupported []string - ClaimsParameterSupported bool - RequestParameterSupported bool - RequestURIParamaterSupported bool - RequireRequestURIRegistration bool - - Policy *url.URL - TermsOfService *url.URL - - // Not part of the OpenID Provider Metadata - ExpiresAt time.Time -} - -// Defaults returns a shallow copy of ProviderConfig with default -// values replacing omitted fields. -// -// var cfg oidc.ProviderConfig -// // Fill provider config with default values for omitted fields. -// cfg = cfg.Defaults() -// -func (p ProviderConfig) Defaults() ProviderConfig { - setDefault := func(val *[]string, defaultVal []string) { - if len(*val) == 0 { - *val = defaultVal - } - } - setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported) - setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported) - setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported) - setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported) - return p -} - -func (p *ProviderConfig) MarshalJSON() ([]byte, error) { - e := p.toEncodableStruct() - return json.Marshal(&e) -} - -func (p *ProviderConfig) UnmarshalJSON(data []byte) error { - var e encodableProviderConfig - if err := json.Unmarshal(data, &e); err != nil { - return err - } - conf, err := e.toStruct() - if err != nil { - return err - } - if err := conf.Valid(); err != nil { - return err - } - *p = conf - return nil -} - -type encodableProviderConfig struct { - Issuer string `json:"issuer"` - AuthEndpoint string `json:"authorization_endpoint"` - TokenEndpoint string `json:"token_endpoint"` - UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"` - KeysEndpoint string `json:"jwks_uri"` - RegistrationEndpoint string `json:"registration_endpoint,omitempty"` - EndSessionEndpoint string `json:"end_session_endpoint,omitempty"` - CheckSessionIFrame string `json:"check_session_iframe,omitempty"` - - // Use 'omitempty' for all slices as per OIDC spec: - // "Claims that return multiple values are represented as JSON arrays. - // Claims with zero elements MUST be omitted from the response." - // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse - - ScopesSupported []string `json:"scopes_supported,omitempty"` - ResponseTypesSupported []string `json:"response_types_supported,omitempty"` - ResponseModesSupported []string `json:"response_modes_supported,omitempty"` - GrantTypesSupported []string `json:"grant_types_supported,omitempty"` - ACRValuesSupported []string `json:"acr_values_supported,omitempty"` - SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` - - IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"` - IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"` - IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"` - UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"` - UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"` - UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"` - ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"` - ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"` - ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"` - - TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"` - TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"` - - DisplayValuesSupported []string `json:"display_values_supported,omitempty"` - ClaimTypesSupported []string `json:"claim_types_supported,omitempty"` - ClaimsSupported []string `json:"claims_supported,omitempty"` - ServiceDocs string `json:"service_documentation,omitempty"` - ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"` - UILocalsSupported []string `json:"ui_locales_supported,omitempty"` - ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"` - RequestParameterSupported bool `json:"request_parameter_supported,omitempty"` - RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"` - RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"` - - Policy string `json:"op_policy_uri,omitempty"` - TermsOfService string `json:"op_tos_uri,omitempty"` -} - -func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig { - return encodableProviderConfig{ - Issuer: uriToString(cfg.Issuer), - AuthEndpoint: uriToString(cfg.AuthEndpoint), - TokenEndpoint: uriToString(cfg.TokenEndpoint), - UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint), - KeysEndpoint: uriToString(cfg.KeysEndpoint), - RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint), - EndSessionEndpoint: uriToString(cfg.EndSessionEndpoint), - CheckSessionIFrame: uriToString(cfg.CheckSessionIFrame), - ScopesSupported: cfg.ScopesSupported, - ResponseTypesSupported: cfg.ResponseTypesSupported, - ResponseModesSupported: cfg.ResponseModesSupported, - GrantTypesSupported: cfg.GrantTypesSupported, - ACRValuesSupported: cfg.ACRValuesSupported, - SubjectTypesSupported: cfg.SubjectTypesSupported, - IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues, - IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues, - IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues, - UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues, - UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues, - UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues, - ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues, - ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues, - ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues, - TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported, - TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported, - DisplayValuesSupported: cfg.DisplayValuesSupported, - ClaimTypesSupported: cfg.ClaimTypesSupported, - ClaimsSupported: cfg.ClaimsSupported, - ServiceDocs: uriToString(cfg.ServiceDocs), - ClaimsLocalsSupported: cfg.ClaimsLocalsSupported, - UILocalsSupported: cfg.UILocalsSupported, - ClaimsParameterSupported: cfg.ClaimsParameterSupported, - RequestParameterSupported: cfg.RequestParameterSupported, - RequestURIParamaterSupported: cfg.RequestURIParamaterSupported, - RequireRequestURIRegistration: cfg.RequireRequestURIRegistration, - Policy: uriToString(cfg.Policy), - TermsOfService: uriToString(cfg.TermsOfService), - } -} - -func (e encodableProviderConfig) toStruct() (ProviderConfig, error) { - p := stickyErrParser{} - conf := ProviderConfig{ - Issuer: p.parseURI(e.Issuer, "issuer"), - AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"), - TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"), - UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"), - KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"), - RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"), - EndSessionEndpoint: p.parseURI(e.EndSessionEndpoint, "end_session_endpoint"), - CheckSessionIFrame: p.parseURI(e.CheckSessionIFrame, "check_session_iframe"), - ScopesSupported: e.ScopesSupported, - ResponseTypesSupported: e.ResponseTypesSupported, - ResponseModesSupported: e.ResponseModesSupported, - GrantTypesSupported: e.GrantTypesSupported, - ACRValuesSupported: e.ACRValuesSupported, - SubjectTypesSupported: e.SubjectTypesSupported, - IDTokenSigningAlgValues: e.IDTokenSigningAlgValues, - IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues, - IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues, - UserInfoSigningAlgValues: e.UserInfoSigningAlgValues, - UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues, - UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues, - ReqObjSigningAlgValues: e.ReqObjSigningAlgValues, - ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues, - ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues, - TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported, - TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported, - DisplayValuesSupported: e.DisplayValuesSupported, - ClaimTypesSupported: e.ClaimTypesSupported, - ClaimsSupported: e.ClaimsSupported, - ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"), - ClaimsLocalsSupported: e.ClaimsLocalsSupported, - UILocalsSupported: e.UILocalsSupported, - ClaimsParameterSupported: e.ClaimsParameterSupported, - RequestParameterSupported: e.RequestParameterSupported, - RequestURIParamaterSupported: e.RequestURIParamaterSupported, - RequireRequestURIRegistration: e.RequireRequestURIRegistration, - Policy: p.parseURI(e.Policy, "op_policy-uri"), - TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"), - } - if p.firstErr != nil { - return ProviderConfig{}, p.firstErr - } - return conf, nil -} - -// Empty returns if a ProviderConfig holds no information. -// -// This case generally indicates a ProviderConfigGetter has experienced an error -// and has nothing to report. -func (p ProviderConfig) Empty() bool { - return p.Issuer == nil -} - -func contains(sli []string, ele string) bool { - for _, s := range sli { - if s == ele { - return true - } - } - return false -} - -// Valid determines if a ProviderConfig conforms with the OIDC specification. -// If Valid returns successfully it guarantees required field are non-nil and -// URLs are well formed. -// -// Valid is called by UnmarshalJSON. -// -// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for -// URLs fields where the OIDC spec requires it. This may change in future releases -// of this package. See: https://github.com/coreos/go-oidc/issues/34 -func (p ProviderConfig) Valid() error { - grantTypes := p.GrantTypesSupported - if len(grantTypes) == 0 { - grantTypes = DefaultGrantTypesSupported - } - implicitOnly := true - for _, grantType := range grantTypes { - if grantType != oauth2.GrantTypeImplicit { - implicitOnly = false - break - } - } - - if len(p.SubjectTypesSupported) == 0 { - return errors.New("missing required field subject_types_supported") - } - if len(p.IDTokenSigningAlgValues) == 0 { - return errors.New("missing required field id_token_signing_alg_values_supported") - } - - if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") { - return errors.New("scoped_supported must be unspecified or include 'openid'") - } - - if !contains(p.IDTokenSigningAlgValues, "RS256") { - return errors.New("id_token_signing_alg_values_supported must include 'RS256'") - } - if contains(p.TokenEndpointAuthMethodsSupported, "none") { - return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'") - } - - uris := []struct { - val *url.URL - name string - required bool - }{ - {p.Issuer, "issuer", true}, - {p.AuthEndpoint, "authorization_endpoint", true}, - {p.TokenEndpoint, "token_endpoint", !implicitOnly}, - {p.UserInfoEndpoint, "userinfo_endpoint", false}, - {p.KeysEndpoint, "jwks_uri", true}, - {p.RegistrationEndpoint, "registration_endpoint", false}, - {p.EndSessionEndpoint, "end_session_endpoint", false}, - {p.CheckSessionIFrame, "check_session_iframe", false}, - {p.ServiceDocs, "service_documentation", false}, - {p.Policy, "op_policy_uri", false}, - {p.TermsOfService, "op_tos_uri", false}, - } - - for _, uri := range uris { - if uri.val == nil { - if !uri.required { - continue - } - return fmt.Errorf("empty value for required uri field %s", uri.name) - } - if uri.val.Host == "" { - return fmt.Errorf("no host for uri field %s", uri.name) - } - if uri.val.Scheme != "http" && uri.val.Scheme != "https" { - return fmt.Errorf("uri field %s schemeis not http or https", uri.name) - } - } - return nil -} - -// Supports determines if provider supports a client given their respective metadata. -func (p ProviderConfig) Supports(c ClientMetadata) error { - if err := p.Valid(); err != nil { - return fmt.Errorf("invalid provider config: %v", err) - } - if err := c.Valid(); err != nil { - return fmt.Errorf("invalid client config: %v", err) - } - - // Fill default values for omitted fields - c = c.Defaults() - p = p.Defaults() - - // Do the supported values list the requested one? - supports := []struct { - supported []string - requested string - name string - }{ - {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"}, - {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"}, - {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"}, - {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"}, - {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"}, - {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"}, - {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"}, - {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"}, - {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"}, - } - for _, field := range supports { - if field.requested == "" { - continue - } - if !contains(field.supported, field.requested) { - return fmt.Errorf("provider does not support requested value for field %s", field.name) - } - } - - stringsEqual := func(s1, s2 string) bool { return s1 == s2 } - - // For lists, are the list of requested values a subset of the supported ones? - supportsAll := []struct { - supported []string - requested []string - name string - // OAuth2.0 response_type can be space separated lists where order doesn't matter. - // For example "id_token token" is the same as "token id_token" - // Support a custom compare method. - comp func(s1, s2 string) bool - }{ - {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual}, - {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual}, - } - for _, field := range supportsAll { - requestLoop: - for _, req := range field.requested { - for _, sup := range field.supported { - if field.comp(req, sup) { - continue requestLoop - } - } - return fmt.Errorf("provider does not support requested value for field %s", field.name) - } - } - - // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about? - - return nil -} - -func (p ProviderConfig) SupportsGrantType(grantType string) bool { - var supported []string - if len(p.GrantTypesSupported) == 0 { - supported = DefaultGrantTypesSupported - } else { - supported = p.GrantTypesSupported - } - - for _, t := range supported { - if t == grantType { - return true - } - } - return false -} - -type ProviderConfigGetter interface { - Get() (ProviderConfig, error) -} - -type ProviderConfigSetter interface { - Set(ProviderConfig) error -} - -type ProviderConfigSyncer struct { - from ProviderConfigGetter - to ProviderConfigSetter - clock clockwork.Clock - - initialSyncDone bool - initialSyncWait sync.WaitGroup -} - -func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer { - return &ProviderConfigSyncer{ - from: from, - to: to, - clock: clockwork.NewRealClock(), - } -} - -func (s *ProviderConfigSyncer) Run() chan struct{} { - stop := make(chan struct{}) - - var next pcsStepper - next = &pcsStepNext{aft: time.Duration(0)} - - s.initialSyncWait.Add(1) - go func() { - for { - select { - case <-s.clock.After(next.after()): - next = next.step(s.sync) - case <-stop: - return - } - } - }() - - return stop -} - -func (s *ProviderConfigSyncer) WaitUntilInitialSync() { - s.initialSyncWait.Wait() -} - -func (s *ProviderConfigSyncer) sync() (time.Duration, error) { - cfg, err := s.from.Get() - if err != nil { - return 0, err - } - - if err = s.to.Set(cfg); err != nil { - return 0, fmt.Errorf("error setting provider config: %v", err) - } - - if !s.initialSyncDone { - s.initialSyncWait.Done() - s.initialSyncDone = true - } - - log.Debugf("Updating provider config: config=%#v", cfg) - - return nextSyncAfter(cfg.ExpiresAt, s.clock), nil -} - -type pcsStepFunc func() (time.Duration, error) - -type pcsStepper interface { - after() time.Duration - step(pcsStepFunc) pcsStepper -} - -type pcsStepNext struct { - aft time.Duration -} - -func (n *pcsStepNext) after() time.Duration { - return n.aft -} - -func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) { - ttl, err := fn() - if err == nil { - next = &pcsStepNext{aft: ttl} - log.Debugf("Synced provider config, next attempt in %v", next.after()) - } else { - next = &pcsStepRetry{aft: time.Second} - log.Errorf("Provider config sync failed, retrying in %v: %v", next.after(), err) - } - return -} - -type pcsStepRetry struct { - aft time.Duration -} - -func (r *pcsStepRetry) after() time.Duration { - return r.aft -} - -func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) { - ttl, err := fn() - if err == nil { - next = &pcsStepNext{aft: ttl} - log.Infof("Provider config sync no longer failing") - } else { - next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)} - log.Errorf("Provider config sync still failing, retrying in %v: %v", next.after(), err) - } - return -} - -func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration { - if exp.IsZero() { - return MaximumProviderConfigSyncInterval - } - - t := exp.Sub(clock.Now()) / 2 - if t > MaximumProviderConfigSyncInterval { - t = MaximumProviderConfigSyncInterval - } else if t < minimumProviderConfigSyncInterval { - t = minimumProviderConfigSyncInterval - } - - return t -} - -type httpProviderConfigGetter struct { - hc phttp.Client - issuerURL string - clock clockwork.Clock -} - -func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter { - return &httpProviderConfigGetter{ - hc: hc, - issuerURL: issuerURL, - clock: clockwork.NewRealClock(), - } -} - -func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) { - // If the Issuer value contains a path component, any terminating / MUST be removed before - // appending /.well-known/openid-configuration. - // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest - discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath - req, err := http.NewRequest("GET", discoveryURL, nil) - if err != nil { - return - } - - resp, err := r.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil { - return - } - - var ttl time.Duration - var ok bool - ttl, ok, err = phttp.Cacheable(resp.Header) - if err != nil { - return - } else if ok { - cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl) - } - - // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information. - // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation - if !urlEqual(cfg.Issuer.String(), r.issuerURL) { - err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL) - return - } - - return -} - -func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) { - if hc == nil { - hc = http.DefaultClient - } - - g := NewHTTPProviderConfigGetter(hc, issuerURL) - return g.Get() -} - -func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) { - return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock()) -} - -func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) { - var sleep time.Duration - var err error - for { - pcfg, err = FetchProviderConfig(hc, issuerURL) - if err == nil { - break - } - - sleep = timeutil.ExpBackoff(sleep, time.Minute) - fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err) - time.Sleep(sleep) - } - - return -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/transport.go b/vendor/github.com/coreos/go-oidc/oidc/transport.go deleted file mode 100644 index 61c926d7f..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/transport.go +++ /dev/null @@ -1,88 +0,0 @@ -package oidc - -import ( - "fmt" - "net/http" - "sync" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" -) - -type TokenRefresher interface { - // Verify checks if the provided token is currently valid or not. - Verify(jose.JWT) error - - // Refresh attempts to authenticate and retrieve a new token. - Refresh() (jose.JWT, error) -} - -type ClientCredsTokenRefresher struct { - Issuer string - OIDCClient *Client -} - -func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) { - _, err = VerifyClientClaims(jwt, c.Issuer) - return -} - -func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) { - if err = c.OIDCClient.Healthy(); err != nil { - err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err) - return - } - - jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"}) - if err != nil { - err = fmt.Errorf("unable to verify auth code with issuer: %v", err) - return - } - - return -} - -type AuthenticatedTransport struct { - TokenRefresher - http.RoundTripper - - mu sync.Mutex - jwt jose.JWT -} - -func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.TokenRefresher.Verify(t.jwt) == nil { - return t.jwt, nil - } - - jwt, err := t.TokenRefresher.Refresh() - if err != nil { - return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err) - } - - t.jwt = jwt - return t.jwt, nil -} - -// SetJWT sets the JWT held by the Transport. -// This is useful for cases in which you want to set an initial JWT. -func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) { - t.mu.Lock() - defer t.mu.Unlock() - - t.jwt = jwt -} - -func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) { - jwt, err := t.verifiedJWT() - if err != nil { - return nil, err - } - - req := phttp.CopyRequest(r) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode())) - return t.RoundTripper.RoundTrip(req) -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/util.go b/vendor/github.com/coreos/go-oidc/oidc/util.go deleted file mode 100644 index f2a5a195e..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/util.go +++ /dev/null @@ -1,109 +0,0 @@ -package oidc - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/coreos/go-oidc/jose" -) - -// RequestTokenExtractor funcs extract a raw encoded token from a request. -type RequestTokenExtractor func(r *http.Request) (string, error) - -// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's -// Authorization header. -func ExtractBearerToken(r *http.Request) (string, error) { - ah := r.Header.Get("Authorization") - if ah == "" { - return "", errors.New("missing Authorization header") - } - - if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { - return "", errors.New("should be a bearer token") - } - - val := ah[7:] - if len(val) == 0 { - return "", errors.New("bearer token is empty") - } - - return val, nil -} - -// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request. -func CookieTokenExtractor(cookieName string) RequestTokenExtractor { - return func(r *http.Request) (string, error) { - ck, err := r.Cookie(cookieName) - if err != nil { - return "", fmt.Errorf("token cookie not found in request: %v", err) - } - - if ck.Value == "" { - return "", errors.New("token cookie found but is empty") - } - - return ck.Value, nil - } -} - -func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims { - return jose.Claims{ - // required - "iss": iss, - "sub": sub, - "aud": aud, - "iat": iat.Unix(), - "exp": exp.Unix(), - } -} - -func GenClientID(hostport string) (string, error) { - b, err := randBytes(32) - if err != nil { - return "", err - } - - var host string - if strings.Contains(hostport, ":") { - host, _, err = net.SplitHostPort(hostport) - if err != nil { - return "", err - } - } else { - host = hostport - } - - return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil -} - -func randBytes(n int) ([]byte, error) { - b := make([]byte, n) - got, err := rand.Read(b) - if err != nil { - return nil, err - } else if n != got { - return nil, errors.New("unable to generate enough random data") - } - return b, nil -} - -// urlEqual checks two urls for equality using only the host and path portions. -func urlEqual(url1, url2 string) bool { - u1, err := url.Parse(url1) - if err != nil { - return false - } - u2, err := url.Parse(url2) - if err != nil { - return false - } - - return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/verification.go b/vendor/github.com/coreos/go-oidc/oidc/verification.go deleted file mode 100644 index 002413047..000000000 --- a/vendor/github.com/coreos/go-oidc/oidc/verification.go +++ /dev/null @@ -1,188 +0,0 @@ -package oidc - -import ( - "errors" - "fmt" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" -) - -func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) { - jwtBytes := []byte(jwt.Data()) - for _, k := range keys { - v, err := k.Verifier() - if err != nil { - return false, err - } - if v.Verify(jwt.Signature, jwtBytes) == nil { - return true, nil - } - } - return false, nil -} - -// containsString returns true if the given string(needle) is found -// in the string array(haystack). -func containsString(needle string, haystack []string) bool { - for _, v := range haystack { - if v == needle { - return true - } - } - return false -} - -// Verify claims in accordance with OIDC spec -// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation -func VerifyClaims(jwt jose.JWT, issuer, clientID string) error { - now := time.Now().UTC() - - claims, err := jwt.Claims() - if err != nil { - return err - } - - ident, err := IdentityFromClaims(claims) - if err != nil { - return err - } - - if ident.ExpiresAt.Before(now) { - return errors.New("token is expired") - } - - // iss REQUIRED. Issuer Identifier for the Issuer of the response. - // The iss value is a case sensitive URL using the https scheme that contains scheme, - // host, and optionally, port number and path components and no query or fragment components. - if iss, exists := claims["iss"].(string); exists { - if !urlEqual(iss, issuer) { - return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss) - } - } else { - return errors.New("missing claim: 'iss'") - } - - // iat REQUIRED. Time at which the JWT was issued. - // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z - // as measured in UTC until the date/time. - if _, exists := claims["iat"].(float64); !exists { - return errors.New("missing claim: 'iat'") - } - - // aud REQUIRED. Audience(s) that this ID Token is intended for. - // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value. - // It MAY also contain identifiers for other audiences. In the general case, the aud - // value is an array of case sensitive strings. In the common special case when there - // is one audience, the aud value MAY be a single case sensitive string. - if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { - if aud != clientID { - return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID) - } - } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { - if !containsString(clientID, aud) { - return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID) - } - } else { - return errors.New("invalid claim value: 'aud' is required, and should be either string or string array") - } - - return nil -} - -// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT. -// Returns the client ID if valid, or an error if invalid. -func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) { - claims, err := jwt.Claims() - if err != nil { - return "", fmt.Errorf("failed to parse JWT claims: %v", err) - } - - iss, ok, err := claims.StringClaim("iss") - if err != nil { - return "", fmt.Errorf("failed to parse 'iss' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'iss' claim") - } else if !urlEqual(iss, issuer) { - return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss) - } - - sub, ok, err := claims.StringClaim("sub") - if err != nil { - return "", fmt.Errorf("failed to parse 'sub' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'sub' claim") - } - - if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { - if aud != sub { - return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub) - } - } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { - if !containsString(sub, aud) { - return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub) - } - } else { - return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array") - } - - now := time.Now().UTC() - exp, ok, err := claims.TimeClaim("exp") - if err != nil { - return "", fmt.Errorf("failed to parse 'exp' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'exp' claim") - } else if exp.Before(now) { - return "", fmt.Errorf("token already expired at: %v", exp) - } - - return sub, nil -} - -type JWTVerifier struct { - issuer string - clientID string - syncFunc func() error - keysFunc func() []key.PublicKey - clock clockwork.Clock -} - -func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier { - return JWTVerifier{ - issuer: issuer, - clientID: clientID, - syncFunc: syncFunc, - keysFunc: keysFunc, - clock: clockwork.NewRealClock(), - } -} - -func (v *JWTVerifier) Verify(jwt jose.JWT) error { - ok, err := VerifySignature(jwt, v.keysFunc()) - if ok { - goto SignatureVerified - } else if err != nil { - return fmt.Errorf("oidc: JWT signature verification failed: %v", err) - } - - if err = v.syncFunc(); err != nil { - return fmt.Errorf("oidc: failed syncing KeySet: %v", err) - } - - ok, err = VerifySignature(jwt, v.keysFunc()) - if err != nil { - return fmt.Errorf("oidc: JWT signature verification failed: %v", err) - } else if !ok { - return errors.New("oidc: unable to verify JWT signature: no matching keys") - } - -SignatureVerified: - if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil { - return fmt.Errorf("oidc: JWT claims invalid: %v", err) - } - - return nil -} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go deleted file mode 100644 index 7f434990d..000000000 --- a/vendor/github.com/coreos/go-systemd/journal/journal.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package journal provides write bindings to the local systemd journal. -// It is implemented in pure Go and connects to the journal directly over its -// unix socket. -// -// To read from the journal, see the "sdjournal" package, which wraps the -// sd-journal a C API. -// -// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -package journal - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "syscall" -) - -// Priority of a journal message -type Priority int - -const ( - PriEmerg Priority = iota - PriAlert - PriCrit - PriErr - PriWarning - PriNotice - PriInfo - PriDebug -) - -var conn net.Conn - -func init() { - var err error - conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") - if err != nil { - conn = nil - } -} - -// Enabled returns true if the local systemd journal is available for logging -func Enabled() bool { - return conn != nil -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - if conn == nil { - return journalError("could not connect to journald socket") - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, err := io.Copy(conn, data) - if err != nil && isSocketSpaceError(err) { - file, err := tempFd() - if err != nil { - return journalError(err.Error()) - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return journalError(err.Error()) - } - - rights := syscall.UnixRights(int(file.Fd())) - - /* this connection should always be a UnixConn, but better safe than sorry */ - unixConn, ok := conn.(*net.UnixConn) - if !ok { - return journalError("can't send file through non-Unix connection") - } - unixConn.WriteMsgUnix([]byte{}, rights, nil) - } else if err != nil { - return journalError(err.Error()) - } - return nil -} - -// Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { - return Send(fmt.Sprintf(format, a...), priority, nil) -} - -func appendVariable(w io.Writer, name, value string) { - if !validVarName(name) { - journalError("variable name contains invalid character, ignoring") - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -func validVarName(name string) bool { - /* The variable name must be in uppercase and consist only of characters, - * numbers and underscores, and may not begin with an underscore. (from the docs) - */ - - valid := name[0] != '_' - for _, c := range name { - valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' - } - return valid -} - -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok { - return false - } - - sysErr, ok := opErr.Err.(syscall.Errno) - if !ok { - return false - } - - return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS -} - -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -func journalError(s string) error { - s = "journal error: " + s - fmt.Fprintln(os.Stderr, s) - return errors.New(s) -} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE deleted file mode 100644 index b39ddfa5c..000000000 --- a/vendor/github.com/coreos/pkg/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md deleted file mode 100644 index 81efb1fb6..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# capnslog, the CoreOS logging package - -There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). -capnslog provides a simple but consistent logging interface suitable for all kinds of projects. - -### Design Principles - -##### `package main` is the place where logging gets turned on and routed - -A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. - -##### All log options are runtime-configurable. - -Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. - -##### There is one log object per package. It is registered under its repository and package name. - -`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. - -##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. - -Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. - -Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. - -##### Log objects are an interface - -An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. - -##### Log levels have specific meanings: - - * Critical: Unrecoverable. Must fail. - * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost - * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. - * Notice: Normal, but important (uncommon) log information. - * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. - * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. - * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. - diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go deleted file mode 100644 index b305a845f..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/formatters.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "fmt" - "io" - "log" - "runtime" - "strings" - "time" -) - -type Formatter interface { - Format(pkg string, level LogLevel, depth int, entries ...interface{}) - Flush() -} - -func NewStringFormatter(w io.Writer) Formatter { - return &StringFormatter{ - w: bufio.NewWriter(w), - } -} - -type StringFormatter struct { - w *bufio.Writer -} - -func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { - now := time.Now().UTC() - s.w.WriteString(now.Format(time.RFC3339)) - s.w.WriteByte(' ') - writeEntries(s.w, pkg, l, i, entries...) - s.Flush() -} - -func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { - if pkg != "" { - w.WriteString(pkg + ": ") - } - str := fmt.Sprint(entries...) - endsInNL := strings.HasSuffix(str, "\n") - w.WriteString(str) - if !endsInNL { - w.WriteString("\n") - } -} - -func (s *StringFormatter) Flush() { - s.w.Flush() -} - -func NewPrettyFormatter(w io.Writer, debug bool) Formatter { - return &PrettyFormatter{ - w: bufio.NewWriter(w), - debug: debug, - } -} - -type PrettyFormatter struct { - w *bufio.Writer - debug bool -} - -func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { - now := time.Now() - ts := now.Format("2006-01-02 15:04:05") - c.w.WriteString(ts) - ms := now.Nanosecond() / 1000 - c.w.WriteString(fmt.Sprintf(".%06d", ms)) - if c.debug { - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) - } - c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) - writeEntries(c.w, pkg, l, depth, entries...) - c.Flush() -} - -func (c *PrettyFormatter) Flush() { - c.w.Flush() -} - -// LogFormatter emulates the form of the traditional built-in logger. -type LogFormatter struct { - logger *log.Logger - prefix string -} - -// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the -// golang log package to actually do the logging work so that logs look similar. -func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { - return &LogFormatter{ - logger: log.New(w, "", flag), // don't use prefix here - prefix: prefix, // save it instead - } -} - -// Format builds a log message for the LogFormatter. The LogLevel is ignored. -func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { - str := fmt.Sprint(entries...) - prefix := lf.prefix - if pkg != "" { - prefix = fmt.Sprintf("%s%s: ", prefix, pkg) - } - lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 -} - -// Flush is included so that the interface is complete, but is a no-op. -func (lf *LogFormatter) Flush() { - // noop -} - -// NilFormatter is a no-op log formatter that does nothing. -type NilFormatter struct { -} - -// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no -// messages so that you can cause part of your logging to be silent. -func NewNilFormatter() Formatter { - return &NilFormatter{} -} - -// Format does nothing. -func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { - // noop -} - -// Flush is included so that the interface is complete, but is a no-op. -func (_ *NilFormatter) Flush() { - // noop -} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go deleted file mode 100644 index 426603ef3..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "bytes" - "io" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -var pid = os.Getpid() - -type GlogFormatter struct { - StringFormatter -} - -func NewGlogFormatter(w io.Writer) *GlogFormatter { - g := &GlogFormatter{} - g.w = bufio.NewWriter(w) - return g -} - -func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { - g.w.Write(GlogHeader(level, depth+1)) - g.StringFormatter.Format(pkg, level, depth+1, entries...) -} - -func GlogHeader(level LogLevel, depth int) []byte { - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - now := time.Now().UTC() - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - buf := &bytes.Buffer{} - buf.Grow(30) - _, month, day := now.Date() - hour, minute, second := now.Clock() - buf.WriteString(level.Char()) - twoDigits(buf, int(month)) - twoDigits(buf, day) - buf.WriteByte(' ') - twoDigits(buf, hour) - buf.WriteByte(':') - twoDigits(buf, minute) - buf.WriteByte(':') - twoDigits(buf, second) - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) - buf.WriteByte('Z') - buf.WriteByte(' ') - buf.WriteString(strconv.Itoa(pid)) - buf.WriteByte(' ') - buf.WriteString(file) - buf.WriteByte(':') - buf.WriteString(strconv.Itoa(line)) - buf.WriteByte(']') - buf.WriteByte(' ') - return buf.Bytes() -} - -const digits = "0123456789" - -func twoDigits(b *bytes.Buffer, d int) { - c2 := digits[d%10] - d /= 10 - c1 := digits[d%10] - b.WriteByte(c1) - b.WriteByte(c2) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go deleted file mode 100644 index 44b8cd361..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/init.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "io" - "os" - "syscall" -) - -// Here's where the opinionation comes in. We need some sensible defaults, -// especially after taking over the log package. Your project (whatever it may -// be) may see things differently. That's okay; there should be no defaults in -// the main package that cannot be controlled or overridden programatically, -// otherwise it's a bug. Doing so is creating your own init_log.go file much -// like this one. - -func init() { - initHijack() - - // Go `log` pacakge uses os.Stderr. - SetFormatter(NewDefaultFormatter(os.Stderr)) - SetGlobalLogLevel(INFO) -} - -func NewDefaultFormatter(out io.Writer) Formatter { - if syscall.Getppid() == 1 { - // We're running under init, which may be systemd. - f, err := NewJournaldFormatter() - if err == nil { - return f - } - } - return NewPrettyFormatter(out, false) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go deleted file mode 100644 index 455305065..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/init_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import "os" - -func init() { - initHijack() - - // Go `log` package uses os.Stderr. - SetFormatter(NewPrettyFormatter(os.Stderr, false)) - SetGlobalLogLevel(INFO) -} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go deleted file mode 100644 index 72e05207c..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/coreos/go-systemd/journal" -) - -func NewJournaldFormatter() (Formatter, error) { - if !journal.Enabled() { - return nil, errors.New("No systemd detected") - } - return &journaldFormatter{}, nil -} - -type journaldFormatter struct{} - -func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - var pri journal.Priority - switch l { - case CRITICAL: - pri = journal.PriCrit - case ERROR: - pri = journal.PriErr - case WARNING: - pri = journal.PriWarning - case NOTICE: - pri = journal.PriNotice - case INFO: - pri = journal.PriInfo - case DEBUG: - pri = journal.PriDebug - case TRACE: - pri = journal.PriDebug - default: - panic("Unhandled loglevel") - } - msg := fmt.Sprint(entries...) - tags := map[string]string{ - "PACKAGE": pkg, - "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), - } - err := journal.Send(msg, pri, tags) - if err != nil { - fmt.Fprintln(os.Stderr, err) - } -} - -func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go deleted file mode 100644 index 970086b9f..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "log" -) - -func initHijack() { - pkg := NewPackageLogger("log", "") - w := packageWriter{pkg} - log.SetFlags(0) - log.SetPrefix("") - log.SetOutput(w) -} - -type packageWriter struct { - pl *PackageLogger -} - -func (p packageWriter) Write(b []byte) (int, error) { - if p.pl.level < INFO { - return 0, nil - } - p.pl.internalLog(calldepth+2, INFO, string(b)) - return len(b), nil -} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go deleted file mode 100644 index 849544883..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/logmap.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "errors" - "strings" - "sync" -) - -// LogLevel is the set of all log levels. -type LogLevel int8 - -const ( - // CRITICAL is the lowest log level; only errors which will end the program will be propagated. - CRITICAL LogLevel = iota - 1 - // ERROR is for errors that are not fatal but lead to troubling behavior. - ERROR - // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. - WARNING - // NOTICE is for normal but significant conditions. - NOTICE - // INFO is a log level for common, everyday log updates. - INFO - // DEBUG is the default hidden level for more verbose updates about internal processes. - DEBUG - // TRACE is for (potentially) call by call tracing of programs. - TRACE -) - -// Char returns a single-character representation of the log level. -func (l LogLevel) Char() string { - switch l { - case CRITICAL: - return "C" - case ERROR: - return "E" - case WARNING: - return "W" - case NOTICE: - return "N" - case INFO: - return "I" - case DEBUG: - return "D" - case TRACE: - return "T" - default: - panic("Unhandled loglevel") - } -} - -// String returns a multi-character representation of the log level. -func (l LogLevel) String() string { - switch l { - case CRITICAL: - return "CRITICAL" - case ERROR: - return "ERROR" - case WARNING: - return "WARNING" - case NOTICE: - return "NOTICE" - case INFO: - return "INFO" - case DEBUG: - return "DEBUG" - case TRACE: - return "TRACE" - default: - panic("Unhandled loglevel") - } -} - -// Update using the given string value. Fulfills the flag.Value interface. -func (l *LogLevel) Set(s string) error { - value, err := ParseLevel(s) - if err != nil { - return err - } - - *l = value - return nil -} - -// ParseLevel translates some potential loglevel strings into their corresponding levels. -func ParseLevel(s string) (LogLevel, error) { - switch s { - case "CRITICAL", "C": - return CRITICAL, nil - case "ERROR", "0", "E": - return ERROR, nil - case "WARNING", "1", "W": - return WARNING, nil - case "NOTICE", "2", "N": - return NOTICE, nil - case "INFO", "3", "I": - return INFO, nil - case "DEBUG", "4", "D": - return DEBUG, nil - case "TRACE", "5", "T": - return TRACE, nil - } - return CRITICAL, errors.New("couldn't parse log level " + s) -} - -type RepoLogger map[string]*PackageLogger - -type loggerStruct struct { - sync.Mutex - repoMap map[string]RepoLogger - formatter Formatter -} - -// logger is the global logger -var logger = new(loggerStruct) - -// SetGlobalLogLevel sets the log level for all packages in all repositories -// registered with capnslog. -func SetGlobalLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - for _, r := range logger.repoMap { - r.setRepoLogLevelInternal(l) - } -} - -// GetRepoLogger may return the handle to the repository's set of packages' loggers. -func GetRepoLogger(repo string) (RepoLogger, error) { - logger.Lock() - defer logger.Unlock() - r, ok := logger.repoMap[repo] - if !ok { - return nil, errors.New("no packages registered for repo " + repo) - } - return r, nil -} - -// MustRepoLogger returns the handle to the repository's packages' loggers. -func MustRepoLogger(repo string) RepoLogger { - r, err := GetRepoLogger(repo) - if err != nil { - panic(err) - } - return r -} - -// SetRepoLogLevel sets the log level for all packages in the repository. -func (r RepoLogger) SetRepoLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - r.setRepoLogLevelInternal(l) -} - -func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { - for _, v := range r { - v.level = l - } -} - -// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in -// order, and returns a map of the results, for use in SetLogLevel. -func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { - setlist := strings.Split(conf, ",") - out := make(map[string]LogLevel) - for _, setstring := range setlist { - setting := strings.Split(setstring, "=") - if len(setting) != 2 { - return nil, errors.New("oddly structured `pkg=level` option: " + setstring) - } - l, err := ParseLevel(setting[1]) - if err != nil { - return nil, err - } - out[setting[0]] = l - } - return out, nil -} - -// SetLogLevel takes a map of package names within a repository to their desired -// loglevel, and sets the levels appropriately. Unknown packages are ignored. -// "*" is a special package name that corresponds to all packages, and will be -// processed first. -func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { - logger.Lock() - defer logger.Unlock() - if l, ok := m["*"]; ok { - r.setRepoLogLevelInternal(l) - } - for k, v := range m { - l, ok := r[k] - if !ok { - continue - } - l.level = v - } -} - -// SetFormatter sets the formatting function for all logs. -func SetFormatter(f Formatter) { - logger.Lock() - defer logger.Unlock() - logger.formatter = f -} - -// NewPackageLogger creates a package logger object. -// This should be defined as a global var in your package, referencing your repo. -func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { - logger.Lock() - defer logger.Unlock() - if logger.repoMap == nil { - logger.repoMap = make(map[string]RepoLogger) - } - r, rok := logger.repoMap[repo] - if !rok { - logger.repoMap[repo] = make(RepoLogger) - r = logger.repoMap[repo] - } - p, pok := r[pkg] - if !pok { - r[pkg] = &PackageLogger{ - pkg: pkg, - level: INFO, - } - p = r[pkg] - } - return -} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go deleted file mode 100644 index 612d55c66..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "fmt" - "os" -) - -type PackageLogger struct { - pkg string - level LogLevel -} - -const calldepth = 2 - -func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { - logger.Lock() - defer logger.Unlock() - if inLevel != CRITICAL && p.level < inLevel { - return - } - if logger.formatter != nil { - logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) - } -} - -func (p *PackageLogger) LevelAt(l LogLevel) bool { - logger.Lock() - defer logger.Unlock() - return p.level >= l -} - -// Log a formatted string at any level between ERROR and TRACE -func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) -} - -// Log a message at any level between ERROR and TRACE -func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprint(args...)) -} - -// log stdlib compatibility - -func (p *PackageLogger) Println(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) -} - -func (p *PackageLogger) Printf(format string, args ...interface{}) { - p.Logf(INFO, format, args...) -} - -func (p *PackageLogger) Print(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprint(args...)) -} - -// Panic and fatal - -func (p *PackageLogger) Panicf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Panic(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Fatalf(format string, args ...interface{}) { - p.Logf(CRITICAL, format, args...) - os.Exit(1) -} - -func (p *PackageLogger) Fatal(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -func (p *PackageLogger) Fatalln(args ...interface{}) { - s := fmt.Sprintln(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -// Error Functions - -func (p *PackageLogger) Errorf(format string, args ...interface{}) { - p.Logf(ERROR, format, args...) -} - -func (p *PackageLogger) Error(entries ...interface{}) { - p.internalLog(calldepth, ERROR, entries...) -} - -// Warning Functions - -func (p *PackageLogger) Warningf(format string, args ...interface{}) { - p.Logf(WARNING, format, args...) -} - -func (p *PackageLogger) Warning(entries ...interface{}) { - p.internalLog(calldepth, WARNING, entries...) -} - -// Notice Functions - -func (p *PackageLogger) Noticef(format string, args ...interface{}) { - p.Logf(NOTICE, format, args...) -} - -func (p *PackageLogger) Notice(entries ...interface{}) { - p.internalLog(calldepth, NOTICE, entries...) -} - -// Info Functions - -func (p *PackageLogger) Infof(format string, args ...interface{}) { - p.Logf(INFO, format, args...) -} - -func (p *PackageLogger) Info(entries ...interface{}) { - p.internalLog(calldepth, INFO, entries...) -} - -// Debug Functions - -func (p *PackageLogger) Debugf(format string, args ...interface{}) { - if p.level < DEBUG { - return - } - p.Logf(DEBUG, format, args...) -} - -func (p *PackageLogger) Debug(entries ...interface{}) { - if p.level < DEBUG { - return - } - p.internalLog(calldepth, DEBUG, entries...) -} - -// Trace Functions - -func (p *PackageLogger) Tracef(format string, args ...interface{}) { - if p.level < TRACE { - return - } - p.Logf(TRACE, format, args...) -} - -func (p *PackageLogger) Trace(entries ...interface{}) { - if p.level < TRACE { - return - } - p.internalLog(calldepth, TRACE, entries...) -} - -func (p *PackageLogger) Flush() { - logger.Lock() - defer logger.Unlock() - logger.formatter.Flush() -} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go deleted file mode 100644 index 4be5a1f2d..000000000 --- a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "fmt" - "log/syslog" -) - -func NewSyslogFormatter(w *syslog.Writer) Formatter { - return &syslogFormatter{w} -} - -func NewDefaultSyslogFormatter(tag string) (Formatter, error) { - w, err := syslog.New(syslog.LOG_DEBUG, tag) - if err != nil { - return nil, err - } - return NewSyslogFormatter(w), nil -} - -type syslogFormatter struct { - w *syslog.Writer -} - -func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - for _, entry := range entries { - str := fmt.Sprint(entry) - switch l { - case CRITICAL: - s.w.Crit(str) - case ERROR: - s.w.Err(str) - case WARNING: - s.w.Warning(str) - case NOTICE: - s.w.Notice(str) - case INFO: - s.w.Info(str) - case DEBUG: - s.w.Debug(str) - case TRACE: - s.w.Debug(str) - default: - panic("Unhandled loglevel") - } - } -} - -func (s *syslogFormatter) Flush() { -} diff --git a/vendor/github.com/coreos/pkg/health/README.md b/vendor/github.com/coreos/pkg/health/README.md deleted file mode 100644 index 5ec34c21e..000000000 --- a/vendor/github.com/coreos/pkg/health/README.md +++ /dev/null @@ -1,11 +0,0 @@ -health -==== - -A simple framework for implementing an HTTP health check endpoint on servers. - -Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`. - -### Documentation - -For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health) - diff --git a/vendor/github.com/coreos/pkg/health/health.go b/vendor/github.com/coreos/pkg/health/health.go deleted file mode 100644 index a1c3610fa..000000000 --- a/vendor/github.com/coreos/pkg/health/health.go +++ /dev/null @@ -1,127 +0,0 @@ -package health - -import ( - "expvar" - "fmt" - "log" - "net/http" - - "github.com/coreos/pkg/httputil" -) - -// Checkables should return nil when the thing they are checking is healthy, and an error otherwise. -type Checkable interface { - Healthy() error -} - -// Checker provides a way to make an endpoint which can be probed for system health. -type Checker struct { - // Checks are the Checkables to be checked when probing. - Checks []Checkable - - // Unhealthyhandler is called when one or more of the checks are unhealthy. - // If not provided DefaultUnhealthyHandler is called. - UnhealthyHandler UnhealthyHandler - - // HealthyHandler is called when all checks are healthy. - // If not provided, DefaultHealthyHandler is called. - HealthyHandler http.HandlerFunc -} - -func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) { - unhealthyHandler := c.UnhealthyHandler - if unhealthyHandler == nil { - unhealthyHandler = DefaultUnhealthyHandler - } - - successHandler := c.HealthyHandler - if successHandler == nil { - successHandler = DefaultHealthyHandler - } - - if r.Method != "GET" { - w.Header().Set("Allow", "GET") - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - if err := Check(c.Checks); err != nil { - unhealthyHandler(w, r, err) - return - } - - successHandler(w, r) -} - -type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error) - -type StatusResponse struct { - Status string `json:"status"` - Details *StatusResponseDetails `json:"details,omitempty"` -} - -type StatusResponseDetails struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func Check(checks []Checkable) (err error) { - errs := []error{} - for _, c := range checks { - if e := c.Healthy(); e != nil { - errs = append(errs, e) - } - } - - switch len(errs) { - case 0: - err = nil - case 1: - err = errs[0] - default: - err = fmt.Errorf("multiple health check failure: %v", errs) - } - - return -} - -func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) { - err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{ - Status: "ok", - }) - if err != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) { - writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{ - Status: "error", - Details: &StatusResponseDetails{ - Code: http.StatusInternalServerError, - Message: err.Error(), - }, - }) - if writeErr != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported. -func ExpvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/vendor/github.com/coreos/pkg/httputil/README.md b/vendor/github.com/coreos/pkg/httputil/README.md deleted file mode 100644 index 44fa751c4..000000000 --- a/vendor/github.com/coreos/pkg/httputil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -httputil -==== - -Common code for dealing with HTTP. - -Includes: - -* Code for returning JSON responses. - -### Documentation - -Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil) - diff --git a/vendor/github.com/coreos/pkg/httputil/cookie.go b/vendor/github.com/coreos/pkg/httputil/cookie.go deleted file mode 100644 index c37a37bb2..000000000 --- a/vendor/github.com/coreos/pkg/httputil/cookie.go +++ /dev/null @@ -1,21 +0,0 @@ -package httputil - -import ( - "net/http" - "time" -) - -// DeleteCookies effectively deletes all named cookies -// by wiping all data and setting to expire immediately. -func DeleteCookies(w http.ResponseWriter, cookieNames ...string) { - for _, n := range cookieNames { - c := &http.Cookie{ - Name: n, - Value: "", - Path: "/", - MaxAge: -1, - Expires: time.Time{}, - } - http.SetCookie(w, c) - } -} diff --git a/vendor/github.com/coreos/pkg/httputil/json.go b/vendor/github.com/coreos/pkg/httputil/json.go deleted file mode 100644 index 0b0923503..000000000 --- a/vendor/github.com/coreos/pkg/httputil/json.go +++ /dev/null @@ -1,27 +0,0 @@ -package httputil - -import ( - "encoding/json" - "net/http" -) - -const ( - JSONContentType = "application/json" -) - -func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error { - enc, err := json.Marshal(resp) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return err - } - - w.Header().Set("Content-Type", JSONContentType) - w.WriteHeader(code) - - _, err = w.Write(enc) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff.go b/vendor/github.com/coreos/pkg/timeutil/backoff.go deleted file mode 100644 index b34fb4966..000000000 --- a/vendor/github.com/coreos/pkg/timeutil/backoff.go +++ /dev/null @@ -1,15 +0,0 @@ -package timeutil - -import ( - "time" -) - -func ExpBackoff(prev, max time.Duration) time.Duration { - if prev == 0 { - return time.Second - } - if prev > max/2 { - return max - } - return 2 * prev -} diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS new file mode 100644 index 000000000..9e80e062b --- /dev/null +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -0,0 +1,147 @@ +Aaron Lehmann +Aaron Schlesinger +Aaron Vinson +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +Alexey Gladkov +allencloud +amitshukla +Amy Lindburg +Andrew Hsu +Andrew Meredith +Andrew T Nguyen +Andrey Kostov +Andy Goldstein +Anis Elleuch +Anton Tiurin +Antonio Mercado +Antonio Murdaca +Arien Holthuizen +Arnaud Porterie +Arthur Baars +Asuka Suzuki +Avi Miller +Ayose Cazorla +BadZen +Ben Firshman +bin liu +Brian Bland +burnettk +Carson A +Chris Dillon +cyli +Daisuke Fujita +Daniel Huhn +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +David Verhasselt +David Xia +davidli +Dejan Golja +Derek McGowan +Diogo Mónica +DJ Enriquez +Donald Huang +Doug Davis +Eric Yang +Fabio Huser +farmerworking +Felix Yan +Florentin Raud +Frederick F. Kautz IV +gabriell nascimento +Gleb Schukin +harche +Henri Gomez +Hu Keping +Hua Wang +HuKeping +Ian Babrou +igayoso +Jack Griffin +Jason Freidman +Jeff Nickoloff +Jessie Frazelle +jhaohai +Jianqing Wang +John Starks +Jon Johnson +Jon Poler +Jonathan Boulle +Jordan Liggitt +Josh Hawn +Julien Fernandez +Ke Xu +Keerthan Mala +Kelsey Hightower +Kenneth Lim +Kenny Leung +Li Yi +Liu Hua +liuchang0812 +Louis Kottmann +Luke Carpenter +Mary Anthony +Matt Bentley +Matt Duch +Matt Moore +Matt Robenolt +Michael Prokop +Michal Minar +Miquel Sabaté +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nikita Tarasov +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Omer Cohen +Patrick Devine +Phil Estes +Philip Misiowiec +Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran +Sebastiaan van Stijn +Serge Dubrouski +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +Spencer Rinehart +Stefan Majewsky +Stefan Weil +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds +Troels Thomsen +Vincent Batts +Vincent Demeester +Vincent Giersch +W. Trevor King +weiyuan.yl +xg.song +xiekeyang +Yann ROBERT +yuzou +zhouhaibing089 +姜继忠 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 8f3fee627..000000000 --- a/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 8a37c1c7b..000000000 --- a/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2016 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go deleted file mode 100644 index 607dbed43..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) - } - if !dataCollisions[opt[0]] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true - } - } - - return newOptions, nil -} - -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go deleted file mode 100644 index f166cb2f7..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build freebsd,cgo - -package mount - -/* -#include -*/ -import "C" - -const ( - // RDONLY will mount the filesystem as read-only. - RDONLY = C.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = C.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = C.MNT_NOEXEC - - // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = C.MNT_SYNCHRONOUS - - // NOATIME will not update the file access time when reading from a file. - NOATIME = C.MNT_NOATIME -) - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NODEV = 0 - NODIRATIME = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIVE = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 -) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go deleted file mode 100644 index dc696dce9..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ /dev/null @@ -1,85 +0,0 @@ -package mount - -import ( - "syscall" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = syscall.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = syscall.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = syscall.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = syscall.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = syscall.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME -) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go deleted file mode 100644 index 5564f7b3c..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo solaris,!cgo - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 -) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go deleted file mode 100644 index 66ac4bf47..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ /dev/null @@ -1,74 +0,0 @@ -package mount - -import ( - "time" -) - -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() -} - -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. -func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() - if err != nil { - return false, err - } - - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return ForceMount(device, target, mType, options) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := parseOptions(options) - if err := mount(device, target, mType, uintptr(flag), data); err != nil { - return err - } - return nil -} - -// Unmount will unmount the target filesystem, so long as it is mounted. -func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err - } - return ForceUnmount(target) -} - -// ForceUnmount will force an unmount of the target filesystem, regardless if -// it is mounted or not. -func ForceUnmount(target string) (err error) { - // Simple retry logic for unmount - for i := 0; i < 10; i++ { - if err = unmount(target, 0); err == nil { - return nil - } - time.Sleep(100 * time.Millisecond) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go deleted file mode 100644 index bb870e6f5..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,59 +0,0 @@ -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "syscall" - "unsafe" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - } - } - - options := []string{"fspath", target} - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) - } - return nil -} - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go deleted file mode 100644 index dd4280c77..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -package mount - -import ( - "syscall" -) - -func mount(device, target, mType string, flag uintptr, data string) error { - if err := syscall.Mount(device, target, mType, flag, data); err != nil { - return err - } - - // If we have a bind mount or remount, remount... - if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { - return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) - } - return nil -} - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go deleted file mode 100644 index c684aa81f..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "golang.org/x/sys/unix" - "unsafe" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go deleted file mode 100644 index a2a3bb457..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go deleted file mode 100644 index e3fc3535e..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ /dev/null @@ -1,40 +0,0 @@ -package mount - -// Info reveals information about a particular mounted filesystem. This -// struct is populated from the content in the /proc//mountinfo file. -type Info struct { - // ID is a unique identifier of the mount (may be reused after umount). - ID int - - // Parent indicates the ID of the mount parent (or of self for the top of the - // mount tree). - Parent int - - // Major indicates one half of the device ID which identifies the device class. - Major int - - // Minor indicates one half of the device ID which identifies a specific - // instance of device. - Minor int - - // Root of the mount within the filesystem. - Root string - - // Mountpoint indicates the mount point relative to the process's root. - Mountpoint string - - // Opts represents mount-specific options. - Opts string - - // Optional represents optional fields. - Optional string - - // Fstype indicates the type of filesystem, such as EXT3. - Fstype string - - // Source indicates filesystem specific information or "none". - Source string - - // VfsOpts represents per super block options. - VfsOpts string -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go deleted file mode 100644 index 4f32edcd9..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ /dev/null @@ -1,41 +0,0 @@ -package mount - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. -func parseMountTable() ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") - } - - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) - - var out []*Info - for _, entry := range entries { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) - mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) - out = append(out, &mountinfo) - } - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go deleted file mode 100644 index be69fee1d..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build linux - -package mount - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} - -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - if optionalFields != "-" { - p.Optional = optionalFields - } - - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") - out = append(out, p) - } - return out, nil -} - -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f8..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go deleted file mode 100644 index 7fbcf1921..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -import ( - "fmt" - "runtime" -) - -func parseMountTable() ([]*Info, error) { - return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go deleted file mode 100644 index dab8a37ed..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount - -func parseMountTable() ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 8ceec84bc..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build linux - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - if _, err = Mounted(mountPoint); err != nil { - return err - } - - return ForceMount("", mountPoint, "none", options) -} diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE deleted file mode 100644 index c157bff96..000000000 --- a/vendor/github.com/docker/engine-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/engine-api/types/auth.go b/vendor/github.com/docker/engine-api/types/auth.go deleted file mode 100644 index 056af6b84..000000000 --- a/vendor/github.com/docker/engine-api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go deleted file mode 100644 index 931ae10ab..000000000 --- a/vendor/github.com/docker/engine-api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/engine-api/types/client.go b/vendor/github.com/docker/engine-api/types/client.go deleted file mode 100644 index 1820f8bc8..000000000 --- a/vendor/github.com/docker/engine-api/types/client.go +++ /dev/null @@ -1,296 +0,0 @@ -package types - -import ( - "bufio" - "io" - "net" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - Exit bool -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool -} - -// EventsOptions hold parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - BuildArgs map[string]string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height int - Width int -} - -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filter filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string -} - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filter filters.Args -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filter filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} diff --git a/vendor/github.com/docker/engine-api/types/configs.go b/vendor/github.com/docker/engine-api/types/configs.go deleted file mode 100644 index 13e73cb9b..000000000 --- a/vendor/github.com/docker/engine-api/types/configs.go +++ /dev/null @@ -1,60 +0,0 @@ -package types - -import ( - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for the plugin remove -// operation. This struct is used to tell the backend what operations -// to perform. -type PluginRmConfig struct { - ForceRemove bool -} diff --git a/vendor/github.com/docker/engine-api/types/container/config.go b/vendor/github.com/docker/engine-api/types/container/config.go deleted file mode 100644 index e300e119e..000000000 --- a/vendor/github.com/docker/engine-api/types/container/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package container - -import ( - "time" - - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/engine-api/types/container/host_config.go b/vendor/github.com/docker/engine-api/types/container/host_config.go deleted file mode 100644 index a9ff755b0..000000000 --- a/vendor/github.com/docker/engine-api/types/container/host_config.go +++ /dev/null @@ -1,320 +0,0 @@ -package container - -import ( - "strings" - - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// NetworkMode represents the container network stack. -type NetworkMode string - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]int // Initial console size - Isolation Isolation // Isolation technology of the container (eg default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources -} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go deleted file mode 100644 index 4171059a4..000000000 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build !windows - -package container - -import "strings" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses it's private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go deleted file mode 100644 index 0ee332ba6..000000000 --- a/vendor/github.com/docker/engine-api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsContainer indicates whether container uses a container network stack. -// Returns false as windows doesn't support this mode -func (n NetworkMode) IsContainer() bool { - return false -} - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// ConnectedContainer is the id of the container which network this container is connected to. -// Returns blank string on windows -func (n NetworkMode) ConnectedContainer() string { - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/docker/engine-api/types/errors.go b/vendor/github.com/docker/engine-api/types/errors.go deleted file mode 100644 index 649ab9513..000000000 --- a/vendor/github.com/docker/engine-api/types/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// ErrorResponse is the response body of API errors. -type ErrorResponse struct { - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/engine-api/types/filters/parse.go b/vendor/github.com/docker/engine-api/types/filters/parse.go deleted file mode 100644 index dc2c48b89..000000000 --- a/vendor/github.com/docker/engine-api/types/filters/parse.go +++ /dev/null @@ -1,307 +0,0 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/docker/engine-api/types/versions" -) - -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} -type Args struct { - fields map[string]map[string]bool -} - -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} -} - -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil -} - -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil -} - -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true - } else { - filters.fields[name] = map[string]bool{value: true} - } -} - -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - } -} - -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) -} - -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if sources == nil || len(sources) == 0 { - return false - } - - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(filters.fields[field]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] - return ok -} - -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { - if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) - } - } - return nil -} - -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { - return nil - } - for v := range filters.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/engine-api/types/network/network.go b/vendor/github.com/docker/engine-api/types/network/network.go deleted file mode 100644 index 47080b652..000000000 --- a/vendor/github.com/docker/engine-api/types/network/network.go +++ /dev/null @@ -1,53 +0,0 @@ -package network - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} diff --git a/vendor/github.com/docker/engine-api/types/plugin.go b/vendor/github.com/docker/engine-api/types/plugin.go deleted file mode 100644 index 05030ff3d..000000000 --- a/vendor/github.com/docker/engine-api/types/plugin.go +++ /dev/null @@ -1,169 +0,0 @@ -// +build experimental - -package types - -import ( - "encoding/json" - "fmt" -) - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) -} - -// PluginConfig represents the values of settings potentially modifiable by a user -type PluginConfig struct { - Mounts []PluginMount - Env []string - Args []string - Devices []PluginDevice -} - -// Plugin represents a Docker plugin for the remote API -type Plugin struct { - ID string `json:"Id,omitempty"` - Name string - Tag string - Active bool - Config PluginConfig - Manifest PluginManifest -} - -// PluginsListResponse contains the response for the remote API -type PluginsListResponse []*Plugin - -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - -// PluginInterfaceType represents a type that a plugin implements. -type PluginInterfaceType struct { - Prefix string // This is always "docker" - Capability string // Capability should be validated against the above list. - Version string // Plugin API version. Depends on the capability -} - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginInterface describes the interface between Docker and plugin -type PluginInterface struct { - Types []PluginInterfaceType - Socket string -} - -// PluginSetting is to be embedded in other structs, if they are supposed to be -// modifiable by the user. -type PluginSetting struct { - Name string - Description string - Settable []string -} - -// PluginNetwork represents the network configuration for a plugin -type PluginNetwork struct { - Type string -} - -// PluginMount represents the mount configuration for a plugin -type PluginMount struct { - PluginSetting - Source *string - Destination string - Type string - Options []string -} - -// PluginEnv represents an environment variable for a plugin -type PluginEnv struct { - PluginSetting - Value *string -} - -// PluginArgs represents the command line arguments for a plugin -type PluginArgs struct { - PluginSetting - Value []string -} - -// PluginDevice represents a device for a plugin -type PluginDevice struct { - PluginSetting - Path *string -} - -// PluginUser represents the user for the plugin's process -type PluginUser struct { - UID uint32 `json:"Uid,omitempty"` - GID uint32 `json:"Gid,omitempty"` -} - -// PluginManifest represents the manifest of a plugin -type PluginManifest struct { - ManifestVersion string - Description string - Documentation string - Interface PluginInterface - Entrypoint []string - Workdir string - User PluginUser `json:",omitempty"` - Network PluginNetwork - Capabilities []string - Mounts []PluginMount - Devices []PluginDevice - Env []PluginEnv - Args PluginArgs -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/engine-api/types/registry/registry.go b/vendor/github.com/docker/engine-api/types/registry/registry.go deleted file mode 100644 index d2aca6f02..000000000 --- a/vendor/github.com/docker/engine-api/types/registry/registry.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry - -import ( - "encoding/json" - "net" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/docker/engine-api/types/seccomp.go b/vendor/github.com/docker/engine-api/types/seccomp.go deleted file mode 100644 index 854f1c453..000000000 --- a/vendor/github.com/docker/engine-api/types/seccomp.go +++ /dev/null @@ -1,73 +0,0 @@ -package types - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - Architectures []Arch `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Arch used for additional architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Syscall is used to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} diff --git a/vendor/github.com/docker/engine-api/types/stats.go b/vendor/github.com/docker/engine-api/types/stats.go deleted file mode 100644 index b420ebe7f..000000000 --- a/vendor/github.com/docker/engine-api/types/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - CPUUsage CPUUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates All memory stats since container inception -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// TODO Windows: This can be factored out -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write -// TODO Windows: This can be factored out -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// NetworkStats aggregates All network stats of one container -// TODO Windows: This will require refactoring -type NetworkStats struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - Read time.Time `json:"read"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/github.com/docker/engine-api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/common.go b/vendor/github.com/docker/engine-api/types/swarm/common.go deleted file mode 100644 index b87f54536..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/common.go +++ /dev/null @@ -1,21 +0,0 @@ -package swarm - -import "time" - -// Version represent the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/container.go b/vendor/github.com/docker/engine-api/types/swarm/container.go deleted file mode 100644 index 29f2e8a64..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/container.go +++ /dev/null @@ -1,67 +0,0 @@ -package swarm - -import "time" - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Mounts []Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` -} - -// MountType represents the type of a mount. -type MountType string - -const ( - // MountTypeBind BIND - MountTypeBind MountType = "bind" - // MountTypeVolume VOLUME - MountTypeVolume MountType = "volume" -) - -// Mount represents a mount (volume). -type Mount struct { - Type MountType `json:",omitempty"` - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` -} - -// MountPropagation represents the propagation of a mount. -type MountPropagation string - -const ( - // MountPropagationRPrivate RPRIVATE - MountPropagationRPrivate MountPropagation = "rprivate" - // MountPropagationPrivate PRIVATE - MountPropagationPrivate MountPropagation = "private" - // MountPropagationRShared RSHARED - MountPropagationRShared MountPropagation = "rshared" - // MountPropagationShared SHARED - MountPropagationShared MountPropagation = "shared" - // MountPropagationRSlave RSLAVE - MountPropagationRSlave MountPropagation = "rslave" - // MountPropagationSlave SLAVE - MountPropagationSlave MountPropagation = "slave" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation MountPropagation `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/network.go b/vendor/github.com/docker/engine-api/types/swarm/network.go deleted file mode 100644 index 84804da2f..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/network.go +++ /dev/null @@ -1,99 +0,0 @@ -package swarm - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - TargetPort uint32 `json:",omitempty"` - PublishedPort uint32 `json:",omitempty"` -} - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} - -// Driver represents a driver (network/volume). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/node.go b/vendor/github.com/docker/engine-api/types/swarm/node.go deleted file mode 100644 index 9987662a5..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/node.go +++ /dev/null @@ -1,107 +0,0 @@ -package swarm - -// Node represents a node. -type Node struct { - ID string - Meta - - Spec NodeSpec `json:",omitempty"` - Description NodeDescription `json:",omitempty"` - Status NodeStatus `json:",omitempty"` - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` -} - -// Platform represents the platfrom (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/github.com/docker/engine-api/types/swarm/service.go b/vendor/github.com/docker/engine-api/types/swarm/service.go deleted file mode 100644 index 676fc0e0b..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/service.go +++ /dev/null @@ -1,73 +0,0 @@ -package swarm - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus UpdateStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt time.Time `json:",omitempty"` - CompletedAt time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - Parallelism uint64 `json:",omitempty"` - Delay time.Duration `json:",omitempty"` - FailureAction string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/github.com/docker/engine-api/types/swarm/swarm.go deleted file mode 100644 index 0a5414101..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/swarm.go +++ /dev/null @@ -1,141 +0,0 @@ -package swarm - -import "time" - -// ClusterInfo represents info about a the cluster for outputing in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - Worker string - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - TaskHistoryRetentionLimit int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - SnapshotInterval uint64 `json:",omitempty"` - KeepOldSnapshots uint64 `json:",omitempty"` - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - HeartbeatTick uint32 `json:",omitempty"` - ElectionTick uint32 `json:",omitempty"` -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - HeartbeatPeriod uint64 `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - NodeCertExpiry time.Duration `json:",omitempty"` - ExternalCAs []*ExternalCA `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - Protocol ExternalCAProtocol - URL string - Options map[string]string `json:",omitempty"` -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - ForceNewCluster bool - Spec Spec -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - RemoteAddrs []string - JoinToken string // accept by secret -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int - Managers int - - Cluster ClusterInfo -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool -} diff --git a/vendor/github.com/docker/engine-api/types/swarm/task.go b/vendor/github.com/docker/engine-api/types/swarm/task.go deleted file mode 100644 index fa8228a49..000000000 --- a/vendor/github.com/docker/engine-api/types/swarm/task.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm - -import "time" - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" -) - -// Task represents a task. -type Task struct { - ID string - Meta - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus ContainerStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string `json:",omitempty"` - PID int `json:",omitempty"` - ExitCode int `json:",omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/types.go b/vendor/github.com/docker/engine-api/types/types.go deleted file mode 100644 index b6f912592..000000000 --- a/vendor/github.com/docker/engine-api/types/types.go +++ /dev/null @@ -1,515 +0,0 @@ -package types - -import ( - "os" - "time" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST "/containers/{name:.*}/update" -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - -// ContainerChange contains response of Remote API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Remote API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Remote API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Remote API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - -// Container contains response of Remote API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Remote API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerProcessList contains response of Remote API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Version contains response of Remote API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Info contains response of Remote API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - SecurityOptions []string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Remote API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -type MountPoint struct { - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation string -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the response for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds metadata specific to the volume being created. -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` -} diff --git a/vendor/github.com/docker/engine-api/types/versions/README.md b/vendor/github.com/docker/engine-api/types/versions/README.md deleted file mode 100644 index cdac50a53..000000000 --- a/vendor/github.com/docker/engine-api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/engine-api/types/versions/compare.go b/vendor/github.com/docker/engine-api/types/versions/compare.go deleted file mode 100644 index 611d4fed6..000000000 --- a/vendor/github.com/docker/engine-api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bca3c2c99..000000000 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,243 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - if len(portStr) == 0 { - return 0 - } - - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := strconv.ParseUint(portStr, 10, 16) - return int(port) -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := PartParser(portSpecTemplate, rawPort) - if err != nil { - return nil, err - } - - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", rawIP) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: rawIP, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 872050205..000000000 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,56 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e..000000000 --- a/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784..000000000 --- a/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc3..000000000 --- a/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 477be8b21..000000000 --- a/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,27 +0,0 @@ -# go-connections maintainers file -# -# This file describes who runs the docker/go-connections project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "calavera", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e13..000000000 --- a/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b3547..000000000 --- a/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index c219a8a96..000000000 --- a/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index f5b82ea24..000000000 --- a/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,96 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil -} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index 5ac7fd825..000000000 --- a/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,118 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml deleted file mode 100644 index ed5cb244c..000000000 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover ./... - -notifications: - email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index 0eb9b72d8..000000000 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index d0d826bac..000000000 --- a/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,29 +0,0 @@ -## JSON-Patch - -Provides the ability to modify and test a JSON according to a -[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). - -*Version*: **1.0** - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) - -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) - -### API Usage - -* Given a `[]byte`, obtain a Patch object - - `obj, err := jsonpatch.DecodePatch(patch)` - -* Apply the patch and get a new document back - - `out, err := obj.Apply(doc)` - -* Create a JSON Merge Patch document based on two json documents (a to b): - - `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` - -* Bonus API: compare documents for structural equality - - `jsonpatch.Equal(doca, docb)` - diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index 330b9b528..000000000 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,306 +0,0 @@ -package jsonpatch - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" -) - -func merge(cur, patch *lazyNode) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc) - - return cur -} - -func mergeDocs(doc, patch *partialDoc) { - for k, v := range *patch { - k := decodePatchKey(k) - if v == nil { - delete(*doc, k) - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - pruneNulls(v) - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - newAry = append(newAry, v) - } - } - - *ary = newAry - - return ary -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, errBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, errBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, errBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, errBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - doc = pruneDocNulls(patch) - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch) - } - - return json.Marshal(doc) -} - -// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -// -// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. -// The function will return a mergeable json document with differences from a to b. -// -// An error will be returned if any of the two documents are invalid. -func CreateMergePatch(a, b []byte) ([]byte, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} - err := json.Unmarshal(a, &aI) - if err != nil { - return nil, errBadJSONDoc - } - err = json.Unmarshal(b, &bI) - if err != nil { - return nil, errBadJSONDoc - } - dest, err := getDiff(aI, bI) - if err != nil { - return nil, err - } - return json.Marshal(dest) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case map[string]interface{}: - bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - escapedKey := encodePatchKey(key) - av, ok := a[key] - // value was added - if !ok { - into[escapedKey] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[escapedKey] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[escapedKey] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[escapedKey] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[escapedKey] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[escapedKey] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} - -func encodePatchKey(k string) string { - return rfc6901Encoder.Replace(k) -} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index 415b6532f..000000000 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,579 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" -) - -const ( - eRaw = iota - eDoc - eAry -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -type operation map[string]*json.RawMessage - -// Patch is an ordered collection of operations. -type Patch []operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, fmt.Errorf("Unknown type") - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -func (o operation) kind() string { - if obj, ok := o["op"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) path() string { - if obj, ok := o["path"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) from() string { - if obj, ok := o["from"]; ok { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *partialDoc, path string) (container, string) { - doc := container(pd) - - split := strings.Split(path, "/") - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return fmt.Errorf("Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -func (d *partialArray) set(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - sz := len(*d) - if idx+1 > sz { - sz = idx + 1 - } - - ary := make([]*lazyNode, sz) - - cur := *d - - copy(ary, cur) - - if idx >= len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - ary[idx] = val - - *d = ary - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - ary := make([]*lazyNode, len(*d)+1) - - cur := *d - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx >= len(*d) { - return nil, fmt.Errorf("Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return fmt.Errorf("Unable to remove invalid index: %d", idx) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) - } - - return con.add(key, op.value()) -} - -func (p Patch) remove(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) - } - - return con.remove(key) -} - -func (p Patch) replace(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) - } - - return con.set(key, op.value()) -} - -func (p Patch) move(doc *partialDoc, op operation) error { - from := op.from() - - con, key := findObject(doc, from) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return err - } - - err = con.remove(key) - if err != nil { - return err - } - - path := op.path() - - con, key = findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) - } - - return con.set(key, val) -} - -func (p Patch) test(doc *partialDoc, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - - if err != nil { - return err - } - - if val == nil { - if op.value().raw == nil { - return nil - } else { - return fmt.Errorf("Testing value %s failed", path) - } - } - - if val.equal(op.value()) { - return nil - } - - return fmt.Errorf("Testing value %s failed", path) -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - pd := &partialDoc{} - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - for _, op := range p { - switch op.kind() { - case "add": - err = p.add(pd, op) - case "remove": - err = p.remove(pd, op) - case "replace": - err = p.replace(pd, op) - case "move": - err = p.move(pd, op) - case "test": - err = p.test(pd, op) - default: - err = fmt.Errorf("Unexpected kind: %s", op.kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.drone.sec b/vendor/github.com/go-openapi/jsonpointer/.drone.sec new file mode 100644 index 000000000..a1d7bbe07 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.drone.sec @@ -0,0 +1 @@ +eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.pDqezepze0YqRx4u6M8GFaWtnVR-utTWZic-GX-RvMATAoYpG4H2sc9tlnGNCxa44dbRY0vY10qfBU7Sno8vkp21fsK42ofGLfen_suum_0ilm0sFS0X-kAwk7TIq5L5lPPKiChPMUiGp5oJW-g5MqMFX1jNiI-4fP-vSM3B3-eyZtJD_O517TgfIRLnblCzqwIkyRmAfPNopi-Fe8Y31TmO2Vd0nFc1Aqro_VaJSACzEVxOHTNpjETcMjlYzwgMXLeiAfLV-5hM0f6DXgHMlLSuMkB_Ndnw25dkB7hreGk4x0tHQ3X9mUfTgLq1hIDoyeeKDIM83Tqw4LBRph20BQ.qd_pNuyi23B0PlWz.JtpO7kqOm0SWOGzWDalkWheHuNd-eDpVbqI9WPAEFDOIBvz7TbsYMBlIYVWEGWbat4mkx_ejxnMn1L1l996NJnyP7eY-QE82cfPJbjx94d0Ob70KZ4DCm_UxcY2t-OKFiPJqxW7MA5jKyDuGD16bdxpjLEoe_cMSEr8FNu-MVG6wcchPcyYyRkqTQSl4mb09KikkAzHjwjo-DcO0f8ps4Uzsoc0aqAAWdE-ocG0YqierLoemjusYMiLH-eLF6MvaLRvHSte-cLzPuYCeZURnBDgxu3i3UApgddnX7g1c7tdGGBGvgCl-tEEDW58Vxgdjksim2S7y3lfoJ8FFzSWeRH2y7Kq04hgew3b2J_RiDB9ejzIopzG8ZGjJa3EO1-i9ORTl12nXK1RdlLGqu604ENaeVOPCIHL-0C8e6_wHdUGHydLZImSxKYSrNvy8resP1D_9t4B-3q2mkS9mhnMONrXbPDVw5QY5mvXlWs0Db99ARwzsl-Qlu0A_tsZwMjWT2I1QMvWPyTRScmMm0FJSv9zStjzxWa_q2GL7Naz1fI4Dd6ZgNJWYYq-mHN5chEeBdIcwb_zMPHczMQXXNL5nmfRGM1aPffkToFWCDpIlI8IXec83ZC6_POxZegS6n9Drrvc.6Nz8EXxs1lWX3ASaCeNElA \ No newline at end of file diff --git a/vendor/github.com/go-openapi/jsonpointer/.drone.yml b/vendor/github.com/go-openapi/jsonpointer/.drone.yml new file mode 100644 index 000000000..cb8c7b50a --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.drone.yml @@ -0,0 +1,32 @@ +clone: + path: github.com/go-openapi/jsonpointer + +matrix: + GO_VERSION: + - "1.6" + +build: + integration: + image: golang:$$GO_VERSION + pull: true + commands: + - go get -u github.com/stretchr/testify/assert + - go get -u github.com/go-openapi/swag + - go test -race + - go test -v -cover -coverprofile=coverage.out -covermode=count ./... + +notify: + slack: + channel: bots + webhook_url: $$SLACK_URL + username: drone + +publish: + coverage: + server: https://coverage.vmware.run + token: $$GITHUB_TOKEN + # threshold: 70 + # must_increase: true + when: + matrix: + GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore new file mode 100644 index 000000000..769c24400 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml b/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml new file mode 100644 index 000000000..5ec183e22 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.pullapprove.yml @@ -0,0 +1,13 @@ +approve_by_comment: true +approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' +reject_regex: ^[Rr]ejected +reset_on_push: false +reviewers: + members: + - casualjim + - chancez + - frapposelli + - vburenin + - pytlesk4 + name: pullapprove + required: 1 diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE similarity index 100% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/github.com/go-openapi/jsonpointer/LICENSE diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md new file mode 100644 index 000000000..9c9b1fd48 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -0,0 +1,15 @@ +# gojsonpointer [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonpointer/status.svg)](https://ci.vmware.run/go-openapi/jsonpointer) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonpointer/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) +An implementation of JSON Pointer - Go language + +## Status +Completed YES + +Tested YES + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go new file mode 100644 index 000000000..39dd012c2 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -0,0 +1,238 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package jsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/go-openapi/swag" +) + +const ( + emptyPointer = `` + pointerSeparator = `/` + + invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator +) + +var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() + +// JSONPointable is an interface for structs to implement when they need to customize the +// json pointer process +type JSONPointable interface { + JSONLookup(string) (interface{}, error) +} + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +// New creates a new json pointer for the given string +func New(jsonPointerString string) (Pointer, error) { + + var p Pointer + err := p.parse(jsonPointerString) + return p, err + +} + +// Pointer the json pointer reprsentation +type Pointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *Pointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != emptyPointer { + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + err = errors.New(invalidStart) + } else { + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Get uses the pointer to retrieve a value from a JSON document +func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + return p.get(document, swag.DefaultJSONNameProvider) +} + +// GetForToken gets a value for a json pointer token 1 level deep +func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { + return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) +} + +func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + kind := reflect.Invalid + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind = rValue.Kind() + switch kind { + + case reflect.Struct: + if rValue.Type().Implements(jsonPointableType) { + r, err := node.(JSONPointable).JSONLookup(decodedToken) + if err != nil { + return nil, kind, err + } + return r, kind, nil + } + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, kind, fmt.Errorf("object has no field %q", decodedToken) + } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil + + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) + if mv.IsValid() && !swag.IsZero(mv) { + return mv.Interface(), kind, nil + } + return nil, kind, fmt.Errorf("object has no key %q", decodedToken) + + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, kind, err + } + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) + } + + elem := rValue.Index(tokenIndex) + return elem.Interface(), kind, nil + + default: + return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) + } + +} + +func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { + + if nameProvider == nil { + nameProvider = swag.DefaultJSONNameProvider + } + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + return node, kind, nil + } + + for _, token := range p.referenceTokens { + + decodedToken := Unescape(token) + + r, knd, err := getSingleImpl(node, decodedToken, nameProvider) + if err != nil { + return nil, knd, err + } + node, kind = r, knd + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + return node, kind, nil +} + +// DecodedTokens returns the decoded tokens +func (p *Pointer) DecodedTokens() []string { + result := make([]string, 0, len(p.referenceTokens)) + for _, t := range p.referenceTokens { + result = append(result, Unescape(t)) + } + return result +} + +// IsEmpty returns true if this is an empty json pointer +// this indicates that it points to the root document +func (p *Pointer) IsEmpty() bool { + return len(p.referenceTokens) == 0 +} + +// Pointer to string representation function +func (p *Pointer) String() string { + + if len(p.referenceTokens) == 0 { + return emptyPointer + } + + pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + encRefTok0 = `~0` + encRefTok1 = `~1` + decRefTok0 = `~` + decRefTok1 = `/` +) + +// Unescape unescapes a json pointer reference token string to the original representation +func Unescape(token string) string { + step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) + step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + return step2 +} + +// Escape escapes a pointer reference token string +func Escape(token string) string { + step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) + step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + return step2 +} diff --git a/vendor/github.com/go-openapi/jsonreference/.drone.sec b/vendor/github.com/go-openapi/jsonreference/.drone.sec new file mode 100644 index 000000000..5ff54fb9c --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.drone.sec @@ -0,0 +1 @@ +eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Xe40Wx6g5Y-iN0JVMhKyFfubtOId3zAVE564szw_yYGzFNhc_cGZO9F3BtAcJ55CfHG9C_ozn9dpnUDl_zYZoy_6cPCq13Ekb95z8NAC3ekDtbAATsc9HZwRNwI7UfkhstdwxljEouGB01qoLcUn6lFutrou-Ho21COHeDb2caemnPSA-rEAnXkOiBFu0RQ1MIwMygzvHXIHHYNpNwAtXqmiggM10miSjqBM3JmRPxCi7VK6_Rxij5p6LlhmK1BDi8Y6oBh-9BX3--5GAJeWZ6Vof5TnP-Enioia18j8c8KFtfY4q0y6Ednjb-AarLZ12gj695ppkBNJUdTJQmwGwA.fVcz_RiLrUB5fgMS.rjWllDYC6m_NB-ket_LizNEy9mlJ27odBTZQcMKaUqqXZBtWUCmPrOoMXGq-_cc-c7chg7D-WMh9SPQ23pV0P-DY-jsDpbOqHG2STOMEfW9ZREoaOLJXQaWcuBldLjRyWFcq0HGj97LgE6szD1Zlou3bmdHS_Q-U9Up9YQ_8_YnDcESD_cj1w5FZom7HjchKJFeGjQjfDQpoCKCQNMJaavUqy9jHQEeQ_uVocSrETg3GpewDcUF2tuv8uGq7ZZWu7Vl8zmnY1MFTynaGBWzTCSRmCkAXjcsaUheDP_NT5D7k-xUS6LwtqEUiXAXV07SNFraorFj5lnBQZRDlZMYcA3NWR6zHiOxekR9LBYPofst6w1rIqUchj_5m1tDpVTBMPir1eAaFcnJtPgo4ch17OF-kmcmQGLhJI3U7n8wv4sTrmP1dewtRRKrvlJe5r3_6eDiK4xZ8K0rnK1D4g6zuQqU1gA8KaU7pmZkKpFx3Bew4v-6DH32YwQBvAI7Lbb8afou9WsCNB_iswz5XGimP4bifiJRwpWBEz9VGhZFdiw-hZpYWgbxzVb5gtqfTDLIvpbLDmFz1vge16uUQHHVFpo1pSozyr7A60X8qsh9pmmO3RcJ-ZGZBWqiRC-Kl5ejz7WQ.LFoK4Ibi11B2lWQ5WcPSag \ No newline at end of file diff --git a/vendor/github.com/go-openapi/jsonreference/.drone.yml b/vendor/github.com/go-openapi/jsonreference/.drone.yml new file mode 100644 index 000000000..157ffe579 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.drone.yml @@ -0,0 +1,33 @@ +clone: + path: github.com/go-openapi/jsonreference + +matrix: + GO_VERSION: + - "1.6" + +build: + integration: + image: golang:$$GO_VERSION + pull: true + commands: + - go get -u github.com/stretchr/testify/assert + - go get -u github.com/PuerkitoBio/purell + - go get -u github.com/go-openapi/jsonpointer + - go test -race + - go test -v -cover -coverprofile=coverage.out -covermode=count ./... + +notify: + slack: + channel: bots + webhook_url: $$SLACK_URL + username: drone + +publish: + coverage: + server: https://coverage.vmware.run + token: $$GITHUB_TOKEN + # threshold: 70 + # must_increase: true + when: + matrix: + GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore new file mode 100644 index 000000000..769c24400 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml b/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml new file mode 100644 index 000000000..5ec183e22 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/.pullapprove.yml @@ -0,0 +1,13 @@ +approve_by_comment: true +approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' +reject_regex: ^[Rr]ejected +reset_on_push: false +reviewers: + members: + - casualjim + - chancez + - frapposelli + - vburenin + - pytlesk4 + name: pullapprove + required: 1 diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md new file mode 100644 index 000000000..5f7881274 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -0,0 +1,15 @@ +# gojsonreference [![Build Status](https://ci.vmware.run/api/badges/go-openapi/jsonreference/status.svg)](https://ci.vmware.run/go-openapi/jsonreference) [![Coverage](https://coverage.vmware.run/badges/go-openapi/jsonreference/coverage.svg)](https://coverage.vmware.run/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) +An implementation of JSON Reference - Go language + +## Status +Work in progress ( 90% done ) + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go new file mode 100644 index 000000000..3bc0a6e26 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -0,0 +1,156 @@ +// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author sigu-399 +// author-github https://github.com/sigu-399 +// author-mail sigu.399@gmail.com +// +// repository-name jsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package jsonreference + +import ( + "errors" + "net/url" + "strings" + + "github.com/PuerkitoBio/purell" + "github.com/go-openapi/jsonpointer" +) + +const ( + fragmentRune = `#` +) + +// New creates a new reference for the given string +func New(jsonReferenceString string) (Ref, error) { + + var r Ref + err := r.parse(jsonReferenceString) + return r, err + +} + +// MustCreateRef parses the ref string and panics when it's invalid. +// Use the New method for a version that returns an error +func MustCreateRef(ref string) Ref { + r, err := New(ref) + if err != nil { + panic(err) + } + return r +} + +// Ref represents a json reference object +type Ref struct { + referenceURL *url.URL + referencePointer jsonpointer.Pointer + + HasFullURL bool + HasURLPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +// GetURL gets the URL for this reference +func (r *Ref) GetURL() *url.URL { + return r.referenceURL +} + +// GetPointer gets the json pointer for this reference +func (r *Ref) GetPointer() *jsonpointer.Pointer { + return &r.referencePointer +} + +// String returns the best version of the url for this reference +func (r *Ref) String() string { + + if r.referenceURL != nil { + return r.referenceURL.String() + } + + if r.HasFragmentOnly { + return fragmentRune + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +// IsRoot returns true if this reference is a root document +func (r *Ref) IsRoot() bool { + return r.referenceURL != nil && + !r.IsCanonical() && + !r.HasURLPathOnly && + r.referenceURL.Fragment == "" +} + +// IsCanonical returns true when this pointer starts with http(s):// or file:// +func (r *Ref) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) +} + +// "Constructor", parses the given string JSON reference +func (r *Ref) parse(jsonReferenceString string) error { + + parsed, err := url.Parse(jsonReferenceString) + if err != nil { + return err + } + + r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + refURL := r.referenceURL + + if refURL.Scheme != "" && refURL.Host != "" { + r.HasFullURL = true + } else { + if refURL.Path != "" { + r.HasURLPathOnly = true + } else if refURL.RawQuery == "" && refURL.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refURL.Scheme == "file" + r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = jsonpointer.New(refURL.Fragment) + + return nil +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + childURL := child.GetURL() + parentURL := r.GetURL() + if childURL == nil { + return nil, errors.New("child url is nil") + } + if parentURL == nil { + return &child, nil + } + + ref, err := New(parentURL.ResolveReference(childURL).String()) + if err != nil { + return nil, err + } + return &ref, nil +} diff --git a/vendor/github.com/go-openapi/spec/.drone.sec b/vendor/github.com/go-openapi/spec/.drone.sec new file mode 100644 index 000000000..60c5ebe38 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.drone.sec @@ -0,0 +1 @@ +eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.Epk8dDFH8U1RPYIPDpajZO26L5zFJ1wnQNGWxVHHo5cXrWF148kENoZzh35FT9cAxxPS_4CeVVpf59EgvCc8bem1puuj0gBZptn-lYa7iXZdI-ESN2Te7nF5VbZfwbnI62nEikYGyxz-ozL_IFuMl-qWek4iLerF8Z_xh0MZOJ_w8Nog7qb2WQov72d997TJv5ZKjWcRYPbnsAy1q60-Cqxq3a6enhcSPXqpK46nYSXGKfHvognWBJ_pxwkEqIBPN6hE4EfNtJjMf2LFKEdYy02nbHz78d-2YZ8wIUSJ-IWIwn3GTzObdGqRed20Qf3JtWTsOespmexDrLSeo3HW6A.7XaHW-Y1jjRAWt_W.S1Adut62RLOYZc-lN02M0MGczEucch3zIr4J1UPBPnZooWzntiE5UaUz0UdhjHVszQE5hTfG-yocKD1rDQGER6qrLtnJVrCm9J3n4lHglM-xOz1eZln1XKrWcAgZnAKaKSzuAa5scPG4iTHW6RwbWi_PWm04tBJ1yazdjaVo3uvuhflwvU9if7uMPMtscrDesbBVvpG89xmeudiFjX-wjsV5oGBIjz6ukEBAMKzNDMqikNoG4SnGenpxUpjUjMkDXxiC3BC8oL2_myeIfFeEOF066DqEN3CLkqBVO25zdpWAF4Ou2jKv--mgGEb_E1aMgiSoAVBnybene0TKn2IJ8rtkyRdmWlLIRKZdDT3v775C1FPK6-tYzS7NVg9nnuvpta5PhzYNkqI1Ie74Sl0I-RFClhsdx9dLDhoFEKCx2etC4UDX9jhj2u0Y2MrL76dRGE9kEV1hL1fh6HMvS4ZAAWw3Qce4skCjcL-2YyIOHzKjgLGkZsR5cTUQwCJyacVkdHUOUKFdDGZaUzWkFyeZ1oyrlG2d52svaplpU5-vCOVbWkqUN9rOALGPTC51Ur0L7DFx29aDImhaxZqTe2t9mcdqY7VLcO3JgUiD3JKsEet7s2EDeN44MqITv9KBS8wqJW4.sRv4ov0wB0IxTHw90kJy-A \ No newline at end of file diff --git a/vendor/github.com/go-openapi/spec/.drone.yml b/vendor/github.com/go-openapi/spec/.drone.yml new file mode 100644 index 000000000..6d0442737 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.drone.yml @@ -0,0 +1,35 @@ +clone: + path: github.com/go-openapi/spec + +matrix: + GO_VERSION: + - "1.6" + +build: + integration: + image: golang:$$GO_VERSION + pull: true + commands: + - go get -u github.com/stretchr/testify/assert + - go get -u gopkg.in/yaml.v2 + - go get -u github.com/go-openapi/swag + - go get -u github.com/go-openapi/jsonpointer + - go get -u github.com/go-openapi/jsonreference + - go test -race + - go test -v -cover -coverprofile=coverage.out -covermode=count ./... + +notify: + slack: + channel: bots + webhook_url: $$SLACK_URL + username: drone + +publish: + coverage: + server: https://coverage.vmware.run + token: $$GITHUB_TOKEN + # threshold: 70 + # must_increase: true + when: + matrix: + GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore new file mode 100644 index 000000000..dd91ed6a0 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/github.com/go-openapi/spec/.pullapprove.yml b/vendor/github.com/go-openapi/spec/.pullapprove.yml new file mode 100644 index 000000000..5ec183e22 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/.pullapprove.yml @@ -0,0 +1,13 @@ +approve_by_comment: true +approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' +reject_regex: ^[Rr]ejected +reset_on_push: false +reviewers: + members: + - casualjim + - chancez + - frapposelli + - vburenin + - pytlesk4 + name: pullapprove + required: 1 diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md new file mode 100644 index 000000000..4b2af124a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/README.md @@ -0,0 +1,5 @@ +# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) + +The object model for OpenAPI specification documents \ No newline at end of file diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go new file mode 100644 index 000000000..294cbccf7 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -0,0 +1,274 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by go-bindata. +// sources: +// schemas/jsonschema-draft-04.json +// schemas/v2/schema.json +// DO NOT EDIT! + +package spec + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xcc\x57\x3b\x6f\xdb\x30\x10\xde\xfd\x2b\x04\xa5\x63\x52\xb9\x40\xa7\x6c\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x85\x0d\x43\xff\xbd\xa4\xa8\x07\x29\x91\x92\x2d\xbb\x48\xb4\xc4\xe1\xbd\xbe\x3b\xde\x8b\xe7\x45\x20\xbf\x10\xc7\xe1\x73\x10\x1e\x84\xc8\x9e\xa3\xe8\x35\x67\xf4\x29\xdf\x1d\x20\x45\x9f\x19\xdf\x47\x31\x47\x89\x78\x5a\x7e\x8d\xf4\xd9\x43\xf8\xa8\x85\x3e\xe9\xff\x67\x48\xc6\x90\xef\x38\xce\x04\x66\x54\x49\x7f\x67\x1c\x02\xcd\x12\xa4\x20\x50\xad\xa2\xe3\x4e\x30\xc5\x8a\x39\x97\xdc\x1a\x71\x45\xd0\x6c\xdf\x38\x47\x27\x8b\x50\x11\xc5\x29\x03\xa5\x1c\x55\xe4\x47\x9b\x98\x62\xba\x12\x90\x2a\x7d\x5f\x7a\x24\x5c\x9f\x9f\xa5\x83\x1c\x12\xa5\xe2\x21\x0c\xca\x96\xa9\xec\xf8\xc3\x8c\xe5\x12\xd7\x5f\x58\x51\x01\x7b\xe0\x7e\x10\xb8\x66\x18\xc2\xc0\x69\x91\x4a\x8e\xe5\x25\xfa\x7f\x40\x82\x0a\x22\x96\x43\x3b\x88\x90\xdf\x0a\xea\xda\x82\x1d\x19\x91\x8b\xfa\x58\xa5\x21\xc5\x1c\x6b\x9d\x0a\x42\x50\x06\x1b\x27\x8c\x1c\xa7\x19\x81\x3f\xd2\x97\x7c\x68\x1a\x68\xe5\xc0\xba\x8d\x74\x10\x6e\x19\x23\x80\xa8\xfa\xd9\x3a\x1e\x84\xb4\x20\x44\xff\x4d\xb7\xfa\x84\x6d\x5f\x61\x27\xd4\xaf\x5c\x70\x4c\xf7\xa1\xcf\x7e\x45\x9d\x73\xcf\xc6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x28\x7e\x2b\xa0\xa1\x0a\x5e\x40\x07\x73\x61\x80\x6d\x6d\x34\x8e\xe9\xd3\x8c\xb3\x0c\xb8\xc0\xbd\xe8\xe9\xa2\xf3\x78\x53\xa3\xec\x01\x49\x18\x4f\x91\xba\xab\xb0\xe0\x38\x74\xc6\xaa\x2b\xca\x7b\x6b\x16\x58\x10\x98\xd4\xeb\x14\xb5\xeb\x7d\x96\x82\x26\x4b\xcf\xe6\x71\x2a\xcf\xb0\x4c\xcd\x2a\xf7\x3d\x6a\x9b\x74\xf3\x56\x5e\x8f\x02\xc7\x1d\x29\x72\x59\x28\xbf\x5a\x16\xfb\xc6\x4d\xfb\xe8\x58\xb3\x8c\x1b\x77\x0a\x77\x86\xa6\xb4\xb4\xf5\x64\x93\xbb\xa0\x24\x88\xe4\x1e\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x74\xfc\x09\x74\x2f\x0e\xbd\x9e\x3b\xd5\xbc\x2c\x1f\xaf\xd6\xd0\xb6\x52\xbb\xdf\x22\x21\x80\x4f\xe7\xa8\xb7\x78\xb8\xd4\x7d\x74\x07\x13\xc5\x71\x05\x05\x91\xa6\x91\xf4\x7b\x38\x3d\xe9\x1e\x6e\x1d\xab\xef\x3c\x0c\x74\xbf\x7d\xd5\x6c\xce\x89\xa5\xbe\x8d\xf7\x66\xce\xee\xd1\x86\x67\x80\x34\xad\x8f\xc3\xb3\xae\xc6\x1c\xe3\xb7\xc2\x96\xd9\xb4\x72\x0c\xf0\xab\x92\xe9\x5a\x05\xee\x5c\xb2\x87\xc6\x7f\xa9\x9b\x17\x6b\xb0\xcc\x75\x77\x96\x16\xb7\xcf\x1c\xde\x0a\xcc\x21\x1e\x53\x64\x0e\x73\x4f\x81\xbc\xb8\x07\xa6\xe6\xfa\x50\x55\xe2\x5b\x4d\xad\x4b\xb6\xb6\x81\x49\x77\xc7\xca\x68\x1a\x90\x67\xd7\x78\x3f\x3c\xba\xa3\x8e\xdd\xe8\x7b\xc0\x8a\x21\x03\x1a\x03\xdd\xdd\x11\xd1\x20\xd3\x46\x72\x55\x7d\x93\x0d\xb3\xcf\x34\x52\x46\x03\xd9\x8d\x75\xe2\x0e\x42\xbd\xb9\xdf\xe9\xdd\x34\xb6\x24\x9b\x5b\xa4\x56\x3f\x6b\xac\xd8\x01\x30\x1e\x25\xce\x3a\x77\xc6\x73\xd4\xbd\x96\xc9\xf5\x06\xbc\xca\xf8\x44\xb0\x2e\x09\x5a\xf3\xf5\x3a\x94\x7b\xb7\xa8\x9f\x7f\x17\x8e\x58\x53\xb2\x0e\xfc\xf5\x92\x8c\xc2\x4c\x49\xca\x84\xe7\x7d\x5d\xb6\x2f\x7e\x4f\x79\xba\x96\xe6\x75\xb7\x87\x9b\x0d\xdc\xb5\xbd\xae\xbb\x85\xb8\x8e\x64\x67\xd1\xe8\x18\xe5\xe2\x5f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") + +func jsonschemaDraft04JSONBytes() ([]byte, error) { + return bindataRead( + _jsonschemaDraft04JSON, + "jsonschema-draft-04.json", + ) +} + +func jsonschemaDraft04JSON() (*asset, error) { + bytes, err := jsonschemaDraft04JSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1441640690, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\xdf\x73\xdc\xb6\xf1\x7f\xcf\x5f\x81\xb9\x78\x46\xf6\x24\xd6\x39\xfe\x7e\x5f\xea\x97\x8c\x1a\x39\x89\x5a\xbb\xd2\xf8\x9c\xf6\xc1\x95\x67\x70\x24\x4e\x87\x84\x3f\x2e\x04\x29\xe9\xea\xea\x7f\xef\x02\xfc\x71\x04\x01\x90\x20\x89\x3b\x9d\x6d\x7a\xa6\x8d\x8e\x04\x16\x8b\xc5\x62\xf7\xb3\x0b\x10\xf8\xf4\x0d\x42\xb3\x94\xa6\x01\x99\xbd\x42\xb3\x33\xf4\xb7\xc5\xe5\x3f\xd0\xc2\x5b\x93\x10\xa3\x55\x9c\xa0\xc5\x1d\xbe\xb9\x21\x09\x7a\x79\xfa\x02\x9d\x5d\x5d\x9c\xce\xbe\xe7\x15\xa8\xcf\x4b\xaf\xd3\x74\xf3\x6a\x3e\x67\x79\x91\x53\x1a\xcf\x6f\x5f\xce\x99\xa8\x7b\xfa\x3b\x8b\xa3\x6f\xf3\xc2\x4f\xf2\x47\xb5\x1a\xfc\xe5\xf3\xa2\x60\x9c\xdc\xcc\xfd\x04\xaf\xd2\xe7\x2f\xfe\xbf\xa8\x5c\xd4\x4b\xb7\x1b\xc1\x54\xbc\xfc\x9d\x78\x69\xfe\x2c\x21\x7f\x66\x34\x21\xbc\xf9\x0f\xf0\x1b\x9e\x14\xad\x8b\xd7\x9c\xb3\x68\x15\x97\x7f\x6f\x70\xba\x66\x33\xf8\xfb\x5a\xd4\xc5\xbe\x4f\x53\x1a\x47\x38\xb8\x4a\xe2\x0d\x49\x52\x4a\x18\xd0\x59\xe1\x80\x11\x51\x00\xca\xa7\x24\x89\xa4\xb7\x9f\x72\x52\x1f\xef\x9f\x57\x3f\x78\x97\x12\xb2\xe2\xac\x7d\x3b\xf7\xc9\x8a\x46\x82\x2c\x9b\xdf\x92\xc8\x8f\x93\xd7\xf7\x29\x89\x18\x3c\x98\x89\xd2\x0f\xf0\xff\x0f\x39\x79\x0d\xdd\x92\xfb\x1a\xed\xb2\xdb\x2c\x4d\x68\x74\x53\xf4\x05\x9e\x93\x28\x0b\xab\x6e\x8b\x27\x30\x26\xb3\xe2\xd7\x75\x55\xcc\x27\xcc\x4b\xe8\x86\x73\xc4\xa9\xbc\x5f\x93\x6a\x0c\x6f\x49\xc2\xf9\x42\xf1\x0a\xa5\x6b\xca\x90\x1f\x7b\x59\x48\xa2\xf4\xb4\xe0\xb4\x2e\xc2\xce\xce\x8a\x52\x52\xbd\x75\xcc\x52\x9b\x8e\x14\x62\xe6\xaf\x3e\x7e\xf8\xf8\xe9\x61\x8e\x5e\xfd\x1b\xfe\x5d\x7f\xf7\xf4\xc7\x57\xf0\x97\xff\xdd\xb3\x1f\x9f\xcc\xda\xfa\xc3\x1b\x42\x4f\x23\x1c\x12\x04\x1a\x4a\x37\xcf\xf2\x1e\x11\xa1\xa0\xe8\xf5\x3d\x0e\x37\x01\x79\x85\x4e\x76\x8a\x79\x22\x73\xba\xc4\x8c\x5c\x81\x72\xf4\xe5\x76\xde\xca\x16\xa7\x8a\xb8\xce\xa1\x34\xd6\xb1\x33\xc7\x1b\x7a\xd2\x90\xb5\x50\xf8\x9a\x42\x18\xc5\x5d\x14\x7c\x43\x41\xc6\x12\x05\x0f\xde\x66\x0d\x12\x0d\xe6\xce\x50\x00\xd5\xb8\x90\xde\x5e\xbc\x7d\x8d\x78\x4f\x19\xc2\x9e\x47\x36\x29\xf1\xd1\x72\x5b\x31\xbb\xeb\x9e\x9e\x89\x90\xf8\x14\xbf\x87\xea\x2a\x1b\xa0\xdc\x7e\xe6\xf5\x67\xa3\x68\x1a\x79\x38\x42\x05\x8d\x51\x6c\x88\x29\xdf\x29\xcd\xca\x32\xec\x6a\xd6\x5e\x77\xd7\xaf\x17\x6e\xb4\x9f\x80\x5a\x82\xc2\x58\x31\x51\x94\x3d\x37\x51\x4b\x08\xdb\xc0\x43\x1b\xfd\x28\x8b\x1a\x69\x31\xe2\x65\x09\x4d\xb7\x16\xaa\x56\x96\xd4\xd6\x3f\xef\x23\x27\x5d\x25\x89\x6a\x8a\x6f\x98\x6e\x16\xe2\x24\xc1\xdb\x9d\x1e\xd0\x94\x84\xf5\x72\xc6\x06\x81\x5e\x69\x12\x1f\xaa\xda\x59\x44\xff\xcc\xc8\x45\x41\x23\x4d\x32\x22\xf1\x40\xee\xf9\x04\xc7\xc1\x79\xec\x59\x74\x49\x2a\xdd\xb0\xf0\x3a\x1d\x52\xcc\xa9\xc6\xad\xe9\x66\xcb\x2f\x24\x22\x09\x0e\x10\xaf\x9e\x84\x98\x3f\x46\x78\x19\x67\xa9\x66\xb6\x2a\x5e\x51\x3c\x2d\xcc\x7d\x55\xac\x72\xf4\x8a\xcf\xe8\xf2\x8c\xe5\xd4\x32\x78\x47\xf1\x5a\xf6\x90\x2d\x02\xd4\x7a\xc9\x52\x8e\xf2\xc0\x69\x3c\x66\xad\x1b\x8d\xd6\x0c\x06\x5c\x27\xdb\x33\x94\xab\x04\xc2\x91\x0f\x56\x87\x78\x14\x2c\xb7\x20\x5a\xf7\x24\x35\xce\xbe\x57\xa5\x3a\xa6\x75\x06\x20\x27\x4a\xa9\x57\x79\x64\x70\xed\x4b\x70\xd0\x9d\x8d\xcb\x94\x86\x33\x10\xc4\x11\x07\x04\xb5\xe7\x92\x0b\x5d\xac\xe3\x2c\x00\xcf\x40\x90\x4f\x57\x2b\x92\x00\x46\x40\xab\x24\x0e\x45\x09\x21\xa7\x53\x84\x7e\xa1\xe9\xaf\xd9\x12\xfd\x1c\xe0\xdb\x18\x74\x0f\xbd\xc5\xc9\x1f\x7e\x7c\x17\x21\x40\x16\x38\x08\xe2\x3b\xe2\x1b\x7a\x01\x6a\x14\xb2\xcb\xd5\x82\x24\xb7\xd4\x1b\x33\x8e\xdc\xeb\x0a\x62\x9c\x7b\x96\x93\x13\xa8\xb5\x5d\x8a\xe0\x32\x53\xec\xa5\x76\xea\x5a\x16\xd6\x52\x0a\xa0\x41\x30\xba\x76\x94\xca\xc2\xaa\xc2\x37\x1d\x7a\x83\x3b\x5b\x93\xf1\x53\x5e\x53\x32\x19\xa5\x34\x60\x60\x40\xd7\x24\x0d\xeb\x39\xfd\x0d\x73\x91\xc3\xb0\x91\x43\x48\x7d\x50\x30\xba\xda\x42\x59\x94\xa3\xba\x9c\xcb\x42\x12\x08\xda\x85\x80\x61\x0e\x91\x02\x8e\xe8\x7f\x44\xbf\x0c\x23\x9b\x25\xc1\x48\x5e\x7e\x7b\xf7\x06\x6d\x62\x0a\xfc\x00\x33\x05\x8e\xf3\x54\xb9\x9e\xca\x84\xf2\xe7\x9c\x06\xb8\x3b\x3d\x6b\x30\xe5\xe9\x58\xe6\x04\x0d\x04\xc3\x05\xde\x9e\x59\x49\xc9\xc0\x65\xce\x4c\x9b\xe5\x3d\x98\xb1\x97\x74\x5f\x9d\x4f\x46\xdd\xd7\xfb\x3c\xa1\x8d\x03\xfd\xdb\xfe\x14\xbc\xae\xd4\x45\x17\x05\xfc\x3d\x45\x17\xe9\x09\x43\x24\xf2\xe2\x2c\xc1\x37\x60\x44\x41\xe3\x32\xc6\xfd\x12\xba\x5c\x00\x28\x8e\x43\x18\x08\xba\x0c\xaa\x6a\x07\xd5\xfb\xaa\x4d\x2b\x5d\x3f\x16\x1d\x52\x42\x00\x4b\xeb\xf9\x8e\x04\x20\xeb\xdb\x3c\x84\x63\xa5\x0c\x68\xe4\xd3\x5b\xea\x67\x80\xc4\x80\x0d\x21\x21\x76\x8a\x40\x62\x5b\x14\x66\x10\xcd\x80\x8f\x4c\xca\x8a\x45\x95\x93\x32\xbc\x3c\x39\x55\xc2\xc8\x3d\x0a\xa3\xa6\x0e\x10\xa8\x5a\x11\xe3\x3d\xe5\xb0\xb8\x6d\x14\xdb\xe6\x8e\x4d\x00\x65\x92\xbe\x81\x6e\x27\xc2\x2f\x92\x49\x0a\x9f\x8d\xd1\xbc\x8c\x44\x72\x20\x04\x68\x92\xe7\xb4\xf2\xf6\x59\x81\x79\x96\x42\xcd\x61\xb0\x72\x72\x0c\xc6\x91\x3f\x29\x82\x69\xbf\x00\x86\x22\x1c\x95\x23\xe4\x86\xaa\x69\x22\xb8\x3d\xf6\xbd\x6a\xaf\x7f\xf7\x13\x02\x38\x97\x81\x9f\x15\x8e\x81\x09\x5c\x50\x0b\x56\xa5\x6e\xe9\x62\xc9\x3d\xf6\xaa\x6c\x6e\xbf\x9d\x32\x45\x79\x3d\x7b\x23\xfb\x8c\x06\x83\x6a\xac\x56\xb6\x5a\xe5\xda\xc4\xcb\x2e\x2f\xc6\xcd\xb9\xe2\xc4\x4c\xfe\xc9\x3e\x26\x70\xe1\x3a\x8e\xdd\xfa\x93\x3c\xdd\x36\x66\x88\x95\x04\x41\x48\x43\xf2\x3e\xa7\xd1\x99\x2e\xd4\xb8\xd6\x2a\xdb\x55\x42\x80\x5f\xdf\xbf\xbf\x42\x21\x40\x38\x70\xf9\x0d\x8b\xc2\xd9\xc0\x8d\xa1\xec\x09\x81\x76\x49\xa3\x81\x38\xe8\x88\xe2\x7c\x39\x3b\x24\x09\x43\xce\x10\x89\x57\x6a\x96\x48\x37\x54\xb5\x97\x0f\x52\x75\x43\x9a\xa8\x51\x70\x06\x0e\x22\xc4\xc9\x76\x54\xfc\xbd\x4c\x28\x81\x88\x35\xa7\x54\xaa\x45\x35\xf6\x8f\x16\xfc\x57\x1c\x7c\x3f\x22\xba\x37\x18\x5a\xf1\xce\x36\xa5\xd6\xa4\x59\x31\x76\xe1\xbb\x48\xfb\x14\x01\x27\xdd\xa5\x5c\xba\x64\xaf\x49\x6f\x1b\x84\xdb\x33\xc5\xdd\x22\x16\x4d\x9a\xbb\xc9\x96\x26\xf9\x3f\x88\xad\x82\x8e\x2b\xb6\xb4\x59\xf0\x16\x92\xbb\xf2\x66\x9a\xba\x5c\x78\x0b\x49\xc5\x0a\x36\x26\xb1\xb2\xee\xd2\x42\x4b\x59\x7b\x69\x52\xf3\x39\x0e\xf1\x70\x4a\x8c\xda\xb9\x8c\xe3\x80\xe0\xa8\xa9\x9e\x2b\x9c\x05\xa9\x84\xa6\x15\x46\xd5\xb4\x7d\x1b\xa7\x52\xea\x5e\xd0\x32\xc6\x48\x02\xf8\xbb\x02\x42\x47\xe4\x34\x0a\xc2\xbd\x81\xd0\x0d\xb1\xcc\x08\xee\x7c\xb4\x5e\xf9\x33\x47\x74\xe4\xe5\xd4\xe1\x84\x7c\x12\xc0\xdc\x72\x42\x2a\xde\x34\xa3\x81\xe1\xb4\xd6\x04\x2b\xd3\x65\x98\xa0\x70\xea\xad\x1d\x51\x72\x64\xb7\xb4\x93\x4e\xbb\x9a\x67\x9d\x9c\xc8\xeb\x56\x61\x2c\x4f\x29\x31\x61\xbb\x09\x05\x4b\x9e\xf0\x44\x04\x8e\xb6\xe8\x16\x07\xd4\xcf\x11\x26\x83\x60\x23\x83\x32\xb1\x2f\xc2\xa6\x93\xc2\xdc\xd4\xb3\x12\x21\x95\xa7\xec\x0f\x6e\x67\xfd\xd3\x0f\x2f\x9e\xff\xe5\xfa\xd3\xff\x3d\x3c\x7b\xf2\xdf\x8f\x4f\x8b\xf6\x9f\x3d\xe9\x67\xc1\xff\x89\x83\x8c\x18\xf2\x1c\x7b\x30\x2b\x51\x9c\x36\x40\xa8\x7e\x84\x2c\x65\xd4\x29\x25\x6d\x37\xfa\x77\x64\xd7\x95\x2e\xf5\xcb\xe5\x59\x53\xc1\x38\x22\x97\x2b\x29\x86\xe8\x31\x3a\xda\x81\xb1\xa8\xcf\xb7\x00\xbd\x23\x62\x6d\xc9\xd3\x2c\x89\x5c\x6b\x59\x1f\x1e\x14\xd5\xa7\x53\xd9\xc4\xbe\x23\xeb\x6a\xdb\x93\x54\x53\x95\x76\x53\x62\x2d\x52\x93\x93\x5f\x4a\x93\x3d\x28\xad\x68\x40\x16\x3a\x6a\xb5\x5f\xd7\x46\xbb\x6d\x6d\x21\xcb\xc2\x86\x48\x41\x89\xd5\x5b\x48\x55\xa5\x5b\x26\xef\x91\x61\x15\x49\x89\x55\xb9\x39\xcf\xa4\xe5\x4d\xcc\x5a\x9a\x77\x06\xf8\xf4\xd3\x4c\x90\xb4\x9e\x5f\xa9\x9c\x53\x91\x98\xd2\x85\x73\xca\x0e\x38\xf1\x54\x53\x92\x9b\x71\xb1\xa2\xde\x7c\x4a\xa3\x94\xdc\xa8\x8f\x75\xe8\x1c\x95\x29\x86\xce\x09\x51\xa5\xc4\x7a\x5b\x08\x5d\xc2\xc2\x04\x35\x12\x1a\x52\xbe\xca\xc0\xf2\x04\x85\x96\x9e\x17\x07\x01\x0c\x25\x54\xf8\x59\xcb\x93\x69\x85\xbb\x51\xcb\x80\x22\xcb\x60\xc5\x82\x64\x59\x58\x4b\x29\xc4\xf7\x34\xcc\x42\x3b\x4a\x65\x61\x83\x01\xf1\x82\x8c\x81\x50\xde\xf6\x21\xa9\xd4\xd2\x73\x09\xe5\xed\xb9\x2c\x0a\x77\x70\xd9\x87\xa4\x52\xcb\x24\xcb\x37\x24\xba\x49\x2d\xf1\xef\xae\xb8\xa9\xcf\xbd\xa8\x55\xc5\x4d\xb8\xbc\xd8\x39\x69\xb7\x14\x25\x0a\x9b\x7a\x79\x61\x3f\x55\xaa\xd2\xa6\x3e\xf6\xa1\x55\x96\xd6\xd2\x92\x33\x86\x16\xe4\xea\x15\xf4\xba\x12\x59\xeb\x47\x64\xd4\x09\x98\x79\x14\x3c\xe5\xa5\x12\x06\x1b\xfa\xb8\x2b\x6f\x98\xf9\xfd\x61\x90\xe2\x99\x1f\xc9\xe9\x36\x8b\xb7\xec\x4e\x85\xe0\xa9\x70\x54\x5b\x1e\x3a\x25\x62\x25\xfc\x0e\x82\x2b\x74\xff\x9c\x67\x3d\x45\x64\xd5\xbd\x6b\x86\xe7\x8d\x35\x65\x8c\xbb\x0f\x97\xb1\xbf\xbd\xaa\xd6\xf5\xc6\x6d\x7c\xa8\xbb\x16\x69\xdf\x9f\x8c\x1b\xaf\x8f\x31\x6d\xe3\x2a\xbb\x9d\xa7\xd6\x35\xc9\xed\x2a\x58\xe7\xcb\xf7\x94\xc7\xc5\x7c\x8f\x9b\xd8\x3d\x43\x21\x8a\x2e\xd0\x25\x2f\x9d\xb1\x71\xfb\xdb\x1c\xef\x18\xd9\x31\x6e\x40\x11\x63\x04\x76\xce\x09\x83\x95\x2b\x12\xc2\x41\xec\x61\xbd\xd0\x6c\xa0\x18\xd7\xe5\x6e\xbc\x54\x53\xe0\x3e\xb9\x52\x13\xdb\x77\x6b\x22\x12\x20\x71\x82\x20\x76\xcf\xbf\x6c\xa8\xd8\xe6\x83\x55\xb6\xc7\x4b\xe4\x09\x2c\x1c\x9c\x0e\xc8\xc4\x6a\xc3\x39\xbb\x38\x6d\xc4\xb6\x8a\x1c\xb7\x57\x16\x62\x91\x2d\x17\x4d\x46\x8e\x2d\xec\xe9\x9c\xeb\x9f\xa9\x06\x1c\xcf\x44\x93\x03\x3d\xfe\x4f\x3f\xd5\x26\xa3\x3a\xd4\xa8\x1e\x3e\x36\x35\x04\xa1\x86\x90\x75\x8a\x4d\xa7\xd8\x74\x8a\x4d\x5b\x7b\x3d\xc5\xa6\x5f\x68\x6c\xfa\x4d\xfd\xbf\x25\x4e\x02\xde\x93\xed\x04\x93\x26\x98\x54\x7b\x2a\x74\x62\x42\x49\xfb\x43\x49\x82\x99\xd7\xe1\x26\xdd\x36\x57\x15\xa5\x96\x6d\x76\xbf\xb4\xb1\x25\x9a\x61\x88\xc1\x94\xe2\x49\x19\x5c\x53\xdb\xe5\xb6\x60\x38\x0a\xb6\x5c\x6f\x45\xc2\x86\xaf\x8a\x73\xa6\x78\xce\x26\x33\x7d\x33\x31\x21\xbc\xa3\x44\x78\xff\x82\x01\x7c\xcb\xad\xfe\x04\xf5\xd0\x04\xf5\x26\xa8\x37\x41\x3d\xd4\x84\x7a\xdc\xe4\x9d\xe3\x14\x4f\x68\x6f\x42\x7b\xb5\xa7\xa5\x5a\x4c\x80\x6f\x02\x7c\x3a\xde\x3f\x0f\xc0\xd7\x78\xc8\xf7\x69\x4d\x20\x10\x4d\x20\x70\x02\x81\x5d\xbd\x9e\x40\xe0\xd7\x04\x02\xf9\x27\x2c\x9f\x27\x00\x34\x7d\xb6\x59\x3c\x2d\x1e\x75\x6f\x9f\x1c\x04\x18\xb5\x4e\x4d\xfa\xd6\xb1\xd6\xb4\xa8\xe1\x1c\x62\x1e\x39\x8c\xe4\x8a\x35\x41\xc8\x69\x65\xb5\xfa\xf7\x75\x40\xae\x09\x69\xa1\x09\x69\x4d\x48\x6b\x42\x5a\xa8\x89\xb4\xa2\x38\xfa\xeb\x21\x36\xa9\xea\x3f\x1e\x19\xf4\x75\x9a\x71\xd3\x9c\x4e\x74\x16\xf4\x5a\x32\x8e\x03\x29\x9a\x96\xab\x07\x92\x33\xa0\x61\x65\x64\xaf\x1b\x18\x5a\x33\xa4\x83\x04\x2e\xef\x62\x1e\xd8\x09\x45\xd1\x3a\xd8\x57\x76\x64\xda\x7e\x4d\x7b\x06\x80\x2b\x47\x8c\x94\xd5\x8f\xe1\x04\x04\x83\xa5\x13\xd0\x73\x3a\x07\x3d\xc6\x4b\x09\x17\x5c\x7f\xe7\x3e\x1c\xb8\x68\x8f\x5d\x2d\xfb\x67\x79\xb4\xfb\x7c\xd7\x9d\xb9\x74\x1a\xad\x35\xbc\x1e\xd1\xa0\xe6\x2b\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x66\x0f\x68\xb1\xeb\x73\x8e\x0e\x20\x36\xa4\x45\x17\x68\x6d\x40\xbb\x4e\x20\xdd\x90\xfe\xba\xc0\x7d\xa3\xfa\x3b\x0a\x1c\xda\xb6\x2c\xf9\x97\x98\x89\x38\xe4\xa2\x88\x99\x86\x01\x49\x07\x2d\x9f\xe7\xf3\xe9\xc5\x20\xf0\x39\x40\xe6\xa3\x10\xea\x3e\x25\xbd\xef\x86\xdb\x05\x6d\x81\x81\x07\x08\xbb\x13\x28\x83\xc0\xcd\xc7\x28\x1c\x42\xea\x07\x69\xbd\x5d\xf4\xa6\xb4\xdf\x18\x06\x72\xaf\x7f\x26\x67\x3e\x6c\x23\x94\x21\x96\xcc\x18\xc6\x74\x7e\xfa\xce\xcb\x44\xdb\xc3\x1e\xa1\xd0\xcc\xa8\xca\x95\x6b\xbf\x9a\x99\xd3\x0a\x4d\x3c\xe8\x01\x95\x26\x15\x36\x06\x4f\xd5\x02\x28\x8b\x94\xd3\xa3\x89\x51\xb7\xd0\x29\x1b\xb5\x1f\x94\x97\xfa\xb3\xfd\x7a\x32\x28\x0f\xd6\xa8\xa1\xc3\x41\xa0\xa2\xb6\x96\xb3\x09\x4d\x9d\x33\x1d\x5a\x68\xdb\x29\x3d\x77\x86\x98\xc1\x1c\x21\xe5\x7d\xea\x9e\x7b\x7d\x38\xeb\x27\x50\x9f\x72\x6c\x0e\x62\xc2\x69\x9c\x0c\x89\x4e\x12\x88\xf9\x2f\xa3\xc0\x78\x30\xe3\xe0\x23\xd8\xee\x43\xe5\x84\x54\xbd\x0c\x78\x41\x03\x26\x74\x7f\x24\x62\xb1\x02\x50\x17\xe8\x2e\x66\xb4\xfe\xba\xb0\x76\x0c\xcc\x17\x1e\x46\xbb\x38\xb0\x64\x0a\x9d\x8f\x26\x74\x7e\x14\x14\xe4\x66\xe9\xca\x6e\x4b\xcf\xde\xcc\xd9\xf1\x1a\xa3\xe6\x6a\x97\x85\x45\x1a\x75\xb3\xc3\x74\xfe\xd0\xb4\x12\xd9\x45\x69\x5a\x89\x9c\x56\x22\xa7\x95\xc8\xc7\x5b\x89\x7c\x04\xc8\x28\xf9\x24\xdd\xb5\x89\x63\x2f\x29\x2c\x69\xbe\xcb\x31\x0c\xbf\x16\x62\xa6\xf4\xb7\xe3\xd2\x42\x1d\x8d\xe1\xfe\x52\x75\x8a\x4a\x0c\xab\x77\x16\x56\x37\x4c\x98\xce\xd5\x97\x25\x2d\x87\x59\xfb\xf1\xf8\x16\x5b\x9d\xb4\x5d\x10\xa7\xed\x6e\x70\xe7\x65\x75\x7a\x3b\x04\x4a\x40\xef\x87\xd4\x04\x95\x4f\xe8\x32\x53\x0f\x6f\x1e\x0d\x02\xef\x12\xbc\xd9\xb8\x3a\xae\xfc\x58\xe6\x2a\xbf\xfc\xd3\x95\x06\xf5\xb9\x5e\xcc\xb5\xb6\x8d\x3c\x77\xd6\x19\xc0\x3f\x96\x71\xed\xb8\x7a\x76\xb8\xad\xd3\x9d\xc5\x6b\x95\xed\x5a\x62\x46\xbd\xb3\x2c\x5d\xf3\x7b\x24\xf2\xcd\xa6\x0b\xe5\xe8\xfd\x46\x0a\xcc\x8a\x30\xde\xd0\xbf\x93\xad\x1b\x5a\x31\x06\x06\x5f\x5e\x40\x60\x46\x3d\x9a\xba\xa4\x79\x85\x19\xbb\x8b\x13\xdf\x25\xcd\xb3\x0d\xe7\xd3\xa1\x28\x0b\xb2\x9e\x47\x18\xfb\x29\xf6\x89\x96\x6a\xf5\xf7\xb5\x56\xf3\xda\xc6\x79\xbf\x96\xe6\x31\x4e\xd2\x15\xbd\x75\xb9\xf5\xf9\xf8\x4c\x49\x63\x7e\x1d\x60\x0c\x1b\x28\xa2\xb1\xfd\xed\xc0\x23\x9c\x77\xbf\x7b\x88\x87\x7a\xae\x7e\xbb\xf8\x5b\xcf\x36\x6b\xe4\x42\x9c\x1f\xe5\x71\x7c\xba\x69\xb0\xd7\x87\xd5\xd1\x55\x10\xdf\x49\x77\x1c\x00\x4f\x71\x52\xdc\x27\xfb\x5b\x9f\x7b\xe9\xdc\x68\x6c\x2e\x14\x8b\x24\x18\xe7\x7b\x74\x6b\xb4\x10\x7e\x77\x7b\xcc\x83\x5e\xdb\x5e\x84\x22\xfa\xb0\xc8\x6b\x68\xa9\x29\x52\xee\xd1\x13\x8b\xbb\x87\x3f\xff\x59\xa1\x20\x8e\xc7\x9d\x15\x69\xfc\x07\xf9\xf2\x67\xc3\xa6\x10\xfa\xa1\x67\x43\x25\xdd\x69\x16\xc8\xb3\x40\x87\x91\xa7\x89\x50\xb4\xbc\xc7\x89\x80\x77\x72\x9f\xe6\xc2\xb1\xcc\x05\x35\xb0\x3b\x32\xa4\xf4\xf5\x4d\x93\x6a\x48\xbe\x30\xfc\x34\x4d\x42\xa4\x9f\x84\x8b\xe6\x28\x3a\x58\x78\x90\xbb\x2c\xb7\x2a\xdf\x3d\xea\x70\x49\xa6\xba\xe2\x59\x91\x6f\xc7\x3a\x4c\xe3\x0a\xc0\x6e\x96\x34\x5f\x4f\xef\x68\xa0\x88\x10\x9f\xf8\x28\x8d\xc5\xd9\x37\x08\x17\xf7\xf9\xe5\xf7\xb4\x06\x81\xf6\xfa\x89\x92\x37\xd9\x88\x69\xba\x3e\x38\xdd\xa9\xdc\x3b\x2f\x89\xc8\x9a\x8c\xe1\xce\x37\x6d\x1a\xce\x7a\x1d\xac\x76\x37\xeb\x20\xe1\xa7\x09\x8e\x18\xf0\xc4\x2f\xff\x48\x63\x2f\x0e\xca\xef\xd8\xc5\x75\xff\x6d\xe2\x34\xce\x7e\x9d\x79\x14\x1b\x92\x64\x0b\xc1\x9f\x30\xf9\xd1\x9d\xf2\xbb\x66\xf0\x7a\xcb\xa6\x65\x3b\x86\x89\x79\x95\xf5\x99\xc7\x6e\xa5\xab\x44\xe4\x9f\xa9\xfc\x73\x43\x37\xba\x8b\xc7\x77\x4b\x47\x82\x5c\x2b\x97\xbb\xb3\x7f\x0e\xc5\x6e\xed\x41\x58\x3f\x74\xc8\x8e\xff\xe6\xd6\x3e\x47\xdb\xfa\x4a\xf2\x7a\x27\xe1\x74\x2b\xdf\xae\xa9\xe6\x16\x1b\x67\xdb\xf7\x2a\x03\xae\xdb\x1b\xe0\xf2\x6b\xb7\xaa\x21\x65\x47\x8e\xb3\x2f\xdc\xca\x26\x5a\x76\xff\xb8\xff\xaa\xad\xea\x97\xb2\x87\xc7\xd9\x97\x6c\x6a\xbf\x9c\xb6\xa5\xdf\x50\x54\x1b\x2f\x65\xeb\x8f\xfb\x2f\x7a\x6a\x52\xdc\x6b\x6b\xf2\x17\x3c\x3b\xac\xd0\xdc\x90\xe4\xec\xcb\xb4\x9a\x18\x95\xbd\x93\xfb\x94\xe2\x3e\x1b\xd3\x0b\x51\xbf\xe7\xc9\xe9\x57\x67\xd5\x44\x88\xdc\x29\x7f\xd4\x54\x78\x19\x0c\x59\x20\x68\x7d\x54\x2a\x78\x52\xfc\xd5\xa8\x4d\x32\xbd\x3e\x2c\x97\x61\xfa\x37\xfc\x7f\x0f\xff\x0b\x00\x00\xff\xff\x31\x8b\xeb\xb6\x54\x9c\x00\x00") + +func v2SchemaJSONBytes() ([]byte, error) { + return bindataRead( + _v2SchemaJSON, + "v2/schema.json", + ) +} + +func v2SchemaJSON() (*asset, error) { + bytes, err := v2SchemaJSONBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "v2/schema.json", size: 40020, mode: os.FileMode(420), modTime: time.Unix(1446147817, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "jsonschema-draft-04.json": jsonschemaDraft04JSON, + "v2/schema.json": v2SchemaJSON, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, + "v2": &bintree{nil, map[string]*bintree{ + "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go new file mode 100644 index 000000000..f285970aa --- /dev/null +++ b/vendor/github.com/go-openapi/spec/contact_info.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ContactInfo contact information for the exposed API. +// +// For more information: http://goo.gl/8us55a#contactObject +type ContactInfo struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` + Email string `json:"email,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go new file mode 100644 index 000000000..eb1490b05 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -0,0 +1,626 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + "sync" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// ResolutionCache a cache for resolving urls +type ResolutionCache interface { + Get(string) (interface{}, bool) + Set(string, interface{}) +} + +type simpleCache struct { + lock sync.Mutex + store map[string]interface{} +} + +var resCache = initResolutionCache() + +func initResolutionCache() ResolutionCache { + return &simpleCache{store: map[string]interface{}{ + "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), + "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), + }} +} + +func (s *simpleCache) Get(uri string) (interface{}, bool) { + s.lock.Lock() + v, ok := s.store[uri] + s.lock.Unlock() + return v, ok +} + +func (s *simpleCache) Set(uri string, data interface{}) { + s.lock.Lock() + s.store[uri] = data + s.lock.Unlock() +} + +// ResolveRef resolves a reference against a context root +func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) + if err != nil { + return nil, err + } + + result := new(Schema) + if err := resolver.Resolve(ref, result); err != nil { + return nil, err + } + return result, nil +} + +// ResolveParameter resolves a paramter reference against a context root +func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) + if err != nil { + return nil, err + } + + result := new(Parameter) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + +// ResolveResponse resolves response a reference against a context root +func ResolveResponse(root interface{}, ref Ref) (*Response, error) { + resolver, err := defaultSchemaLoader(root, nil, nil) + if err != nil { + return nil, err + } + + result := new(Response) + if err := resolver.Resolve(&ref, result); err != nil { + return nil, err + } + return result, nil +} + +type schemaLoader struct { + loadingRef *Ref + startingRef *Ref + currentRef *Ref + root interface{} + cache ResolutionCache + loadDoc func(string) (json.RawMessage, error) +} + +var idPtr, _ = jsonpointer.New("/id") +var schemaPtr, _ = jsonpointer.New("/$schema") +var refPtr, _ = jsonpointer.New("/$ref") + +func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) { + if cache == nil { + cache = resCache + } + + var ptr *jsonpointer.Pointer + if ref != nil { + ptr = ref.GetPointer() + } + + currentRef := nextRef(root, ref, ptr) + + return &schemaLoader{ + root: root, + loadingRef: ref, + startingRef: ref, + cache: cache, + loadDoc: func(path string) (json.RawMessage, error) { + data, err := swag.LoadFromFileOrHTTP(path) + if err != nil { + return nil, err + } + return json.RawMessage(data), nil + }, + currentRef: currentRef, + }, nil +} + +func idFromNode(node interface{}) (*Ref, error) { + if idValue, _, err := idPtr.Get(node); err == nil { + if refStr, ok := idValue.(string); ok && refStr != "" { + idRef, err := NewRef(refStr) + if err != nil { + return nil, err + } + return &idRef, nil + } + } + return nil, nil +} + +func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { + if startingRef == nil { + return nil + } + if ptr == nil { + return startingRef + } + + ret := startingRef + var idRef *Ref + node := startingNode + + for _, tok := range ptr.DecodedTokens() { + node, _, _ = jsonpointer.GetForToken(node, tok) + if node == nil { + break + } + + idRef, _ = idFromNode(node) + if idRef != nil { + nw, err := ret.Inherits(*idRef) + if err != nil { + break + } + ret = nw + } + + refRef, _, _ := refPtr.Get(node) + if refRef != nil { + rf, _ := NewRef(refRef.(string)) + nw, err := ret.Inherits(rf) + if err != nil { + break + } + ret = nw + } + + } + return ret +} + +func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error { + tgt := reflect.ValueOf(target) + if tgt.Kind() != reflect.Ptr { + return fmt.Errorf("resolve ref: target needs to be a pointer") + } + + oldRef := currentRef + if currentRef != nil { + var err error + currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer())) + if err != nil { + return err + } + } + if currentRef == nil { + currentRef = ref + } + + refURL := currentRef.GetURL() + if refURL == nil { + return nil + } + if currentRef.IsRoot() { + nv := reflect.ValueOf(node) + reflect.Indirect(tgt).Set(reflect.Indirect(nv)) + return nil + } + + if strings.HasPrefix(refURL.String(), "#") { + res, _, err := ref.GetPointer().Get(node) + if err != nil { + res, _, err = ref.GetPointer().Get(r.root) + if err != nil { + return err + } + } + rv := reflect.Indirect(reflect.ValueOf(res)) + tgtType := reflect.Indirect(tgt).Type() + if rv.Type().AssignableTo(tgtType) { + reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res))) + } else { + if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil { + return err + } + } + + return nil + } + + if refURL.Scheme != "" && refURL.Host != "" { + // most definitely take the red pill + data, _, _, err := r.load(refURL) + if err != nil { + return err + } + + if ((oldRef == nil && currentRef != nil) || + (oldRef != nil && currentRef == nil) || + oldRef.String() != currentRef.String()) && + ((oldRef == nil && ref != nil) || + (oldRef != nil && ref == nil) || + (oldRef.String() != ref.String())) { + + return r.resolveRef(currentRef, ref, data, target) + } + + var res interface{} + if currentRef.String() != "" { + res, _, err = currentRef.GetPointer().Get(data) + if err != nil { + return err + } + } else { + res = data + } + + if err := swag.DynamicJSONToStruct(res, target); err != nil { + return err + } + + } + return nil +} + +func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { + toFetch := *refURL + toFetch.Fragment = "" + + data, fromCache := r.cache.Get(toFetch.String()) + if !fromCache { + b, err := r.loadDoc(toFetch.String()) + if err != nil { + return nil, url.URL{}, false, err + } + + if err := json.Unmarshal(b, &data); err != nil { + return nil, url.URL{}, false, err + } + r.cache.Set(toFetch.String(), data) + } + + return data, toFetch, fromCache, nil +} +func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error { + if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil { + return err + } + + return nil +} + +type specExpander struct { + spec *Swagger + resolver *schemaLoader +} + +// ExpandSpec expands the references in a swagger spec +func ExpandSpec(spec *Swagger) error { + resolver, err := defaultSchemaLoader(spec, nil, nil) + if err != nil { + return err + } + + for key, defintition := range spec.Definitions { + var def *Schema + var err error + if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil { + return err + } + spec.Definitions[key] = *def + } + + for key, parameter := range spec.Parameters { + if err := expandParameter(¶meter, resolver); err != nil { + return err + } + spec.Parameters[key] = parameter + } + + for key, response := range spec.Responses { + if err := expandResponse(&response, resolver); err != nil { + return err + } + spec.Responses[key] = response + } + + if spec.Paths != nil { + for key, path := range spec.Paths.Paths { + if err := expandPathItem(&path, resolver); err != nil { + return err + } + spec.Paths.Paths[key] = path + } + } + + return nil +} + +// ExpandSchema expands the refs in the schema object +func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { + + if schema == nil { + return nil + } + if root == nil { + root = schema + } + + nrr, _ := NewRef(schema.ID) + var rrr *Ref + if nrr.String() != "" { + switch root.(type) { + case *Schema: + rid, _ := NewRef(root.(*Schema).ID) + rrr, _ = rid.Inherits(nrr) + case *Swagger: + rid, _ := NewRef(root.(*Swagger).ID) + rrr, _ = rid.Inherits(nrr) + } + + } + + resolver, err := defaultSchemaLoader(root, rrr, cache) + if err != nil { + return err + } + + refs := []string{""} + if rrr != nil { + refs[0] = rrr.String() + } + var s *Schema + if s, err = expandSchema(*schema, refs, resolver); err != nil { + return nil + } + *schema = *s + return nil +} + +func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) { + if target.Items != nil { + if target.Items.Schema != nil { + t, err := expandSchema(*target.Items.Schema, parentRefs, resolver) + if err != nil { + return nil, err + } + *target.Items.Schema = *t + } + for i := range target.Items.Schemas { + t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver) + if err != nil { + return nil, err + } + target.Items.Schemas[i] = *t + } + } + return &target, nil +} + +func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) { + defer func() { + schema = &target + }() + if target.Ref.String() == "" && target.Ref.IsRoot() { + target = *resolver.root.(*Schema) + return + } + + // t is the new expanded schema + var t *Schema + for target.Ref.String() != "" { + // var newTarget Schema + pRefs := strings.Join(parentRefs, ",") + pRefs += "," + if strings.Contains(pRefs, target.Ref.String()+",") { + err = nil + return + } + + if err = resolver.Resolve(&target.Ref, &t); err != nil { + return + } + parentRefs = append(parentRefs, target.Ref.String()) + target = *t + } + + if t, err = expandItems(target, parentRefs, resolver); err != nil { + return + } + target = *t + + for i := range target.AllOf { + if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil { + return + } + target.AllOf[i] = *t + } + for i := range target.AnyOf { + if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil { + return + } + target.AnyOf[i] = *t + } + for i := range target.OneOf { + if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil { + return + } + target.OneOf[i] = *t + } + if target.Not != nil { + if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil { + return + } + *target.Not = *t + } + for k, _ := range target.Properties { + if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil { + return + } + target.Properties[k] = *t + } + if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { + if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil { + return + } + *target.AdditionalProperties.Schema = *t + } + for k, _ := range target.PatternProperties { + if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil { + return + } + target.PatternProperties[k] = *t + } + for k, _ := range target.Dependencies { + if target.Dependencies[k].Schema != nil { + if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil { + return + } + *target.Dependencies[k].Schema = *t + } + } + if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { + if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil { + return + } + *target.AdditionalItems.Schema = *t + } + for k, _ := range target.Definitions { + if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil { + return + } + target.Definitions[k] = *t + } + return +} + +func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error { + if pathItem == nil { + return nil + } + if pathItem.Ref.String() != "" { + if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil { + return err + } + } + + for idx := range pathItem.Parameters { + if err := expandParameter(&(pathItem.Parameters[idx]), resolver); err != nil { + return err + } + } + if err := expandOperation(pathItem.Get, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Head, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Options, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Put, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Post, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Patch, resolver); err != nil { + return err + } + if err := expandOperation(pathItem.Delete, resolver); err != nil { + return err + } + return nil +} + +func expandOperation(op *Operation, resolver *schemaLoader) error { + if op == nil { + return nil + } + for i, param := range op.Parameters { + if err := expandParameter(¶m, resolver); err != nil { + return err + } + op.Parameters[i] = param + } + + if op.Responses != nil { + responses := op.Responses + if err := expandResponse(responses.Default, resolver); err != nil { + return err + } + for code, response := range responses.StatusCodeResponses { + if err := expandResponse(&response, resolver); err != nil { + return err + } + responses.StatusCodeResponses[code] = response + } + } + return nil +} + +func expandResponse(response *Response, resolver *schemaLoader) error { + if response == nil { + return nil + } + + if response.Ref.String() != "" { + if err := resolver.Resolve(&response.Ref, response); err != nil { + return err + } + } + + if response.Schema != nil { + parentRefs := []string{response.Schema.Ref.String()} + if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil { + return err + } + if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil { + return err + } else { + *response.Schema = *s + } + } + return nil +} + +func expandParameter(parameter *Parameter, resolver *schemaLoader) error { + if parameter == nil { + return nil + } + if parameter.Ref.String() != "" { + if err := resolver.Resolve(¶meter.Ref, parameter); err != nil { + return err + } + } + if parameter.Schema != nil { + parentRefs := []string{parameter.Schema.Ref.String()} + if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil { + return err + } + if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil { + return err + } else { + *parameter.Schema = *s + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go new file mode 100644 index 000000000..88add91b2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/external_docs.go @@ -0,0 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// ExternalDocumentation allows referencing an external resource for +// extended documentation. +// +// For more information: http://goo.gl/8us55a#externalDocumentationObject +type ExternalDocumentation struct { + Description string `json:"description,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go new file mode 100644 index 000000000..758b84531 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/header.go @@ -0,0 +1,165 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +type HeaderProps struct { + Description string `json:"description,omitempty"` +} + +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + CommonValidations + SimpleSchema + HeaderProps +} + +// ResponseHeader creates a new header instance for use in a response +func ResponseHeader() *Header { + return new(Header) +} + +// WithDescription sets the description on this response, allows for chaining +func (h *Header) WithDescription(description string) *Header { + h.Description = description + return h +} + +// Typed a fluent builder method for the type of parameter +func (h *Header) Typed(tpe, format string) *Header { + h.Type = tpe + h.Format = format + return h +} + +// CollectionOf a fluent builder method for an array item +func (h *Header) CollectionOf(items *Items, format string) *Header { + h.Type = "array" + h.Items = items + h.CollectionFormat = format + return h +} + +// WithDefault sets the default value on this item +func (h *Header) WithDefault(defaultValue interface{}) *Header { + h.Default = defaultValue + return h +} + +// WithMaxLength sets a max length value +func (h *Header) WithMaxLength(max int64) *Header { + h.MaxLength = &max + return h +} + +// WithMinLength sets a min length value +func (h *Header) WithMinLength(min int64) *Header { + h.MinLength = &min + return h +} + +// WithPattern sets a pattern value +func (h *Header) WithPattern(pattern string) *Header { + h.Pattern = pattern + return h +} + +// WithMultipleOf sets a multiple of value +func (h *Header) WithMultipleOf(number float64) *Header { + h.MultipleOf = &number + return h +} + +// WithMaximum sets a maximum number value +func (h *Header) WithMaximum(max float64, exclusive bool) *Header { + h.Maximum = &max + h.ExclusiveMaximum = exclusive + return h +} + +// WithMinimum sets a minimum number value +func (h *Header) WithMinimum(min float64, exclusive bool) *Header { + h.Minimum = &min + h.ExclusiveMinimum = exclusive + return h +} + +// WithEnum sets a the enum values (replace) +func (h *Header) WithEnum(values ...interface{}) *Header { + h.Enum = append([]interface{}{}, values...) + return h +} + +// WithMaxItems sets the max items +func (h *Header) WithMaxItems(size int64) *Header { + h.MaxItems = &size + return h +} + +// WithMinItems sets the min items +func (h *Header) WithMinItems(size int64) *Header { + h.MinItems = &size + return h +} + +// UniqueValues dictates that this array can only have unique items +func (h *Header) UniqueValues() *Header { + h.UniqueItems = true + return h +} + +// AllowDuplicates this array can have duplicates +func (h *Header) AllowDuplicates() *Header { + h.UniqueItems = false + return h +} + +// MarshalJSON marshal this to JSON +func (h Header) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(h.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(h.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(h.HeaderProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2, b3), nil +} + +// UnmarshalJSON marshal this from JSON +func (h *Header) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &h.HeaderProps); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go new file mode 100644 index 000000000..fb8b7c4ac --- /dev/null +++ b/vendor/github.com/go-openapi/spec/info.go @@ -0,0 +1,168 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Extensions vendor specific extensions +type Extensions map[string]interface{} + +// Add adds a value to these extensions +func (e Extensions) Add(key string, value interface{}) { + realKey := strings.ToLower(key) + e[realKey] = value +} + +// GetString gets a string value from the extensions +func (e Extensions) GetString(key string) (string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(string) + return str, ok + } + return "", false +} + +// GetBool gets a string value from the extensions +func (e Extensions) GetBool(key string) (bool, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + str, ok := v.(bool) + return str, ok + } + return false, false +} + +// GetStringSlice gets a string value from the extensions +func (e Extensions) GetStringSlice(key string) ([]string, bool) { + if v, ok := e[strings.ToLower(key)]; ok { + arr, ok := v.([]interface{}) + if !ok { + return nil, false + } + var strs []string + for _, iface := range arr { + str, ok := iface.(string) + if !ok { + return nil, false + } + strs = append(strs, str) + } + return strs, ok + } + return nil, false +} + +// VendorExtensible composition block. +type VendorExtensible struct { + Extensions Extensions +} + +// AddExtension adds an extension to this extensible object +func (v *VendorExtensible) AddExtension(key string, value interface{}) { + if value == nil { + return + } + if v.Extensions == nil { + v.Extensions = make(map[string]interface{}) + } + v.Extensions.Add(key, value) +} + +// MarshalJSON marshals the extensions to json +func (v VendorExtensible) MarshalJSON() ([]byte, error) { + toser := make(map[string]interface{}) + for k, v := range v.Extensions { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + toser[k] = v + } + } + return json.Marshal(toser) +} + +// UnmarshalJSON for this extensible object +func (v *VendorExtensible) UnmarshalJSON(data []byte) error { + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if v.Extensions == nil { + v.Extensions = map[string]interface{}{} + } + v.Extensions[k] = vv + } + } + return nil +} + +// InfoProps the properties for an info definition +type InfoProps struct { + Description string `json:"description,omitempty"` + Title string `json:"title,omitempty"` + TermsOfService string `json:"termsOfService,omitempty"` + Contact *ContactInfo `json:"contact,omitempty"` + License *License `json:"license,omitempty"` + Version string `json:"version,omitempty"` +} + +// Info object provides metadata about the API. +// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. +// +// For more information: http://goo.gl/8us55a#infoObject +type Info struct { + VendorExtensible + InfoProps +} + +// JSONLookup look up a value by the json property name +func (i Info) JSONLookup(token string) (interface{}, error) { + if ex, ok := i.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(i.InfoProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (i Info) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.InfoProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (i *Info) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &i.InfoProps); err != nil { + return err + } + if err := json.Unmarshal(data, &i.VendorExtensible); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go new file mode 100644 index 000000000..4d57ea5ca --- /dev/null +++ b/vendor/github.com/go-openapi/spec/items.go @@ -0,0 +1,199 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +type SimpleSchema struct { + Type string `json:"type,omitempty"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitempty"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` +} + +func (s *SimpleSchema) TypeName() string { + if s.Format != "" { + return s.Format + } + return s.Type +} + +func (s *SimpleSchema) ItemsTypeName() string { + if s.Items == nil { + return "" + } + return s.Items.TypeName() +} + +type CommonValidations struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + +// Items a limited subset of JSON-Schema's items object. +// It is used by parameter definitions that are not located in "body". +// +// For more information: http://goo.gl/8us55a#items-object- +type Items struct { + Refable + CommonValidations + SimpleSchema +} + +// NewItems creates a new instance of items +func NewItems() *Items { + return &Items{} +} + +// Typed a fluent builder method for the type of item +func (i *Items) Typed(tpe, format string) *Items { + i.Type = tpe + i.Format = format + return i +} + +// CollectionOf a fluent builder method for an array item +func (i *Items) CollectionOf(items *Items, format string) *Items { + i.Type = "array" + i.Items = items + i.CollectionFormat = format + return i +} + +// WithDefault sets the default value on this item +func (i *Items) WithDefault(defaultValue interface{}) *Items { + i.Default = defaultValue + return i +} + +// WithMaxLength sets a max length value +func (i *Items) WithMaxLength(max int64) *Items { + i.MaxLength = &max + return i +} + +// WithMinLength sets a min length value +func (i *Items) WithMinLength(min int64) *Items { + i.MinLength = &min + return i +} + +// WithPattern sets a pattern value +func (i *Items) WithPattern(pattern string) *Items { + i.Pattern = pattern + return i +} + +// WithMultipleOf sets a multiple of value +func (i *Items) WithMultipleOf(number float64) *Items { + i.MultipleOf = &number + return i +} + +// WithMaximum sets a maximum number value +func (i *Items) WithMaximum(max float64, exclusive bool) *Items { + i.Maximum = &max + i.ExclusiveMaximum = exclusive + return i +} + +// WithMinimum sets a minimum number value +func (i *Items) WithMinimum(min float64, exclusive bool) *Items { + i.Minimum = &min + i.ExclusiveMinimum = exclusive + return i +} + +// WithEnum sets a the enum values (replace) +func (i *Items) WithEnum(values ...interface{}) *Items { + i.Enum = append([]interface{}{}, values...) + return i +} + +// WithMaxItems sets the max items +func (i *Items) WithMaxItems(size int64) *Items { + i.MaxItems = &size + return i +} + +// WithMinItems sets the min items +func (i *Items) WithMinItems(size int64) *Items { + i.MinItems = &size + return i +} + +// UniqueValues dictates that this array can only have unique items +func (i *Items) UniqueValues() *Items { + i.UniqueItems = true + return i +} + +// AllowDuplicates this array can have duplicates +func (i *Items) AllowDuplicates() *Items { + i.UniqueItems = false + return i +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (i *Items) UnmarshalJSON(data []byte) error { + var validations CommonValidations + if err := json.Unmarshal(data, &validations); err != nil { + return err + } + var ref Refable + if err := json.Unmarshal(data, &ref); err != nil { + return err + } + var simpleSchema SimpleSchema + if err := json.Unmarshal(data, &simpleSchema); err != nil { + return err + } + i.Refable = ref + i.CommonValidations = validations + i.SimpleSchema = simpleSchema + return nil +} + +// MarshalJSON converts this items object to JSON +func (i Items) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(i.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(i.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(i.Refable) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2), nil +} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go new file mode 100644 index 000000000..f20961b4f --- /dev/null +++ b/vendor/github.com/go-openapi/spec/license.go @@ -0,0 +1,23 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// License information for the exposed API. +// +// For more information: http://goo.gl/8us55a#licenseObject +type License struct { + Name string `json:"name,omitempty"` + URL string `json:"url,omitempty"` +} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go new file mode 100644 index 000000000..de1db6f02 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/operation.go @@ -0,0 +1,233 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +type OperationProps struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitempty"` +} + +// Operation describes a single API operation on a path. +// +// For more information: http://goo.gl/8us55a#operationObject +type Operation struct { + VendorExtensible + OperationProps +} + +// SuccessResponse gets a success response model +func (o *Operation) SuccessResponse() (*Response, int, bool) { + if o.Responses == nil { + return nil, 0, false + } + + for k, v := range o.Responses.StatusCodeResponses { + if k/100 == 2 { + return &v, k, true + } + } + + return o.Responses.Default, 0, false +} + +// JSONLookup look up a value by the json property name +func (o Operation) JSONLookup(token string) (interface{}, error) { + if ex, ok := o.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(o.OperationProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (o *Operation) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &o.OperationProps); err != nil { + return err + } + if err := json.Unmarshal(data, &o.VendorExtensible); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (o Operation) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(o.OperationProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(o.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +// NewOperation creates a new operation instance. +// It expects an ID as parameter but not passing an ID is also valid. +func NewOperation(id string) *Operation { + op := new(Operation) + op.ID = id + return op +} + +// WithID sets the ID property on this operation, allows for chaining. +func (o *Operation) WithID(id string) *Operation { + o.ID = id + return o +} + +// WithDescription sets the description on this operation, allows for chaining +func (o *Operation) WithDescription(description string) *Operation { + o.Description = description + return o +} + +// WithSummary sets the summary on this operation, allows for chaining +func (o *Operation) WithSummary(summary string) *Operation { + o.Summary = summary + return o +} + +// WithExternalDocs sets/removes the external docs for/from this operation. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (o *Operation) WithExternalDocs(description, url string) *Operation { + if description == "" && url == "" { + o.ExternalDocs = nil + return o + } + + if o.ExternalDocs == nil { + o.ExternalDocs = &ExternalDocumentation{} + } + o.ExternalDocs.Description = description + o.ExternalDocs.URL = url + return o +} + +// Deprecate marks the operation as deprecated +func (o *Operation) Deprecate() *Operation { + o.Deprecated = true + return o +} + +// Undeprecate marks the operation as not deprected +func (o *Operation) Undeprecate() *Operation { + o.Deprecated = false + return o +} + +// WithConsumes adds media types for incoming body values +func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { + o.Consumes = append(o.Consumes, mediaTypes...) + return o +} + +// WithProduces adds media types for outgoing body values +func (o *Operation) WithProduces(mediaTypes ...string) *Operation { + o.Produces = append(o.Produces, mediaTypes...) + return o +} + +// WithTags adds tags for this operation +func (o *Operation) WithTags(tags ...string) *Operation { + o.Tags = append(o.Tags, tags...) + return o +} + +// AddParam adds a parameter to this operation, when a parameter for that location +// and with that name already exists it will be replaced +func (o *Operation) AddParam(param *Parameter) *Operation { + if param == nil { + return o + } + + for i, p := range o.Parameters { + if p.Name == param.Name && p.In == param.In { + params := append(o.Parameters[:i], *param) + params = append(params, o.Parameters[i+1:]...) + o.Parameters = params + return o + } + } + + o.Parameters = append(o.Parameters, *param) + return o +} + +// RemoveParam removes a parameter from the operation +func (o *Operation) RemoveParam(name, in string) *Operation { + for i, p := range o.Parameters { + if p.Name == name && p.In == name { + o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) + return o + } + } + return o +} + +// SecuredWith adds a security scope to this operation. +func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { + o.Security = append(o.Security, map[string][]string{name: scopes}) + return o +} + +// WithDefaultResponse adds a default response to the operation. +// Passing a nil value will remove the response +func (o *Operation) WithDefaultResponse(response *Response) *Operation { + return o.RespondsWith(0, response) +} + +// RespondsWith adds a status code response to the operation. +// When the code is 0 the value of the response will be used as default response value. +// When the value of the response is nil it will be removed from the operation +func (o *Operation) RespondsWith(code int, response *Response) *Operation { + if o.Responses == nil { + o.Responses = new(Responses) + } + if code == 0 { + o.Responses.Default = response + return o + } + if response == nil { + delete(o.Responses.StatusCodeResponses, code) + return o + } + if o.Responses.StatusCodeResponses == nil { + o.Responses.StatusCodeResponses = make(map[int]Response) + } + o.Responses.StatusCodeResponses[code] = *response + return o +} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go new file mode 100644 index 000000000..8fb66d12a --- /dev/null +++ b/vendor/github.com/go-openapi/spec/parameter.go @@ -0,0 +1,299 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// QueryParam creates a query parameter +func QueryParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} +} + +// HeaderParam creates a header parameter, this is always required by default +func HeaderParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} +} + +// PathParam creates a path parameter, this is always required +func PathParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} +} + +// BodyParam creates a body parameter +func BodyParam(name string, schema *Schema) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, SimpleSchema: SimpleSchema{Type: "object"}} +} + +// FormDataParam creates a body parameter +func FormDataParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} +} + +// FileParam creates a body parameter +func FileParam(name string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, SimpleSchema: SimpleSchema{Type: "file"}} +} + +// SimpleArrayParam creates a param for a simple array (string, int, date etc) +func SimpleArrayParam(name, tpe, fmt string) *Parameter { + return &Parameter{ParamProps: ParamProps{Name: name}, SimpleSchema: SimpleSchema{Type: "array", CollectionFormat: "csv", Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} +} + +// ParamRef creates a parameter that's a json reference +func ParamRef(uri string) *Parameter { + p := new(Parameter) + p.Ref = MustCreateRef(uri) + return p +} + +type ParamProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitempty"` + Schema *Schema `json:"schema,omitempty"` // when in == "body" + AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` // when in == "query" || "formData" +} + +// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). +// +// There are five possible parameter types. +// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, the path parameter is `itemId`. +// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. +// * Header - Custom headers that are expected as part of the request. +// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be *one* body parameter. The name of the body parameter has no effect on the parameter itself and is used for documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist together for the same operation. +// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or `multipart/form-data` are used as the content type of the request (in Swagger's definition, the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be declared together with a body parameter for the same operation. Form parameters have a different format based on the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4): +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// For more information: http://goo.gl/8us55a#parameterObject +type Parameter struct { + Refable + CommonValidations + SimpleSchema + VendorExtensible + ParamProps +} + +// JSONLookup look up a value by the json property name +func (p Parameter) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) + if err != nil { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) + if err != nil { + return nil, err + } + if r != nil { + return r, nil + } + r, _, err = jsonpointer.GetForToken(p.ParamProps, token) + return r, err +} + +// WithDescription a fluent builder method for the description of the parameter +func (p *Parameter) WithDescription(description string) *Parameter { + p.Description = description + return p +} + +// Named a fluent builder method to override the name of the parameter +func (p *Parameter) Named(name string) *Parameter { + p.Name = name + return p +} + +// WithLocation a fluent builder method to override the location of the parameter +func (p *Parameter) WithLocation(in string) *Parameter { + p.In = in + return p +} + +// Typed a fluent builder method for the type of the parameter value +func (p *Parameter) Typed(tpe, format string) *Parameter { + p.Type = tpe + p.Format = format + return p +} + +// CollectionOf a fluent builder method for an array parameter +func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { + p.Type = "array" + p.Items = items + p.CollectionFormat = format + return p +} + +// WithDefault sets the default value on this parameter +func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { + p.AsOptional() // with default implies optional + p.Default = defaultValue + return p +} + +// AllowsEmptyValues flags this parameter as being ok with empty values +func (p *Parameter) AllowsEmptyValues() *Parameter { + p.AllowEmptyValue = true + return p +} + +// NoEmptyValues flags this parameter as not liking empty values +func (p *Parameter) NoEmptyValues() *Parameter { + p.AllowEmptyValue = false + return p +} + +// AsOptional flags this parameter as optional +func (p *Parameter) AsOptional() *Parameter { + p.Required = false + return p +} + +// AsRequired flags this parameter as required +func (p *Parameter) AsRequired() *Parameter { + if p.Default != nil { // with a default required makes no sense + return p + } + p.Required = true + return p +} + +// WithMaxLength sets a max length value +func (p *Parameter) WithMaxLength(max int64) *Parameter { + p.MaxLength = &max + return p +} + +// WithMinLength sets a min length value +func (p *Parameter) WithMinLength(min int64) *Parameter { + p.MinLength = &min + return p +} + +// WithPattern sets a pattern value +func (p *Parameter) WithPattern(pattern string) *Parameter { + p.Pattern = pattern + return p +} + +// WithMultipleOf sets a multiple of value +func (p *Parameter) WithMultipleOf(number float64) *Parameter { + p.MultipleOf = &number + return p +} + +// WithMaximum sets a maximum number value +func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { + p.Maximum = &max + p.ExclusiveMaximum = exclusive + return p +} + +// WithMinimum sets a minimum number value +func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { + p.Minimum = &min + p.ExclusiveMinimum = exclusive + return p +} + +// WithEnum sets a the enum values (replace) +func (p *Parameter) WithEnum(values ...interface{}) *Parameter { + p.Enum = append([]interface{}{}, values...) + return p +} + +// WithMaxItems sets the max items +func (p *Parameter) WithMaxItems(size int64) *Parameter { + p.MaxItems = &size + return p +} + +// WithMinItems sets the min items +func (p *Parameter) WithMinItems(size int64) *Parameter { + p.MinItems = &size + return p +} + +// UniqueValues dictates that this array can only have unique items +func (p *Parameter) UniqueValues() *Parameter { + p.UniqueItems = true + return p +} + +// AllowDuplicates this array can have duplicates +func (p *Parameter) AllowDuplicates() *Parameter { + p.UniqueItems = false + return p +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Parameter) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { + return err + } + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + if err := json.Unmarshal(data, &p.ParamProps); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Parameter) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.CommonValidations) + if err != nil { + return nil, err + } + b2, err := json.Marshal(p.SimpleSchema) + if err != nil { + return nil, err + } + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.ParamProps) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b3, b1, b2, b4, b5), nil +} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go new file mode 100644 index 000000000..9ab3ec538 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/path_item.go @@ -0,0 +1,90 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// pathItemProps the path item specific properties +type PathItemProps struct { + Get *Operation `json:"get,omitempty"` + Put *Operation `json:"put,omitempty"` + Post *Operation `json:"post,omitempty"` + Delete *Operation `json:"delete,omitempty"` + Options *Operation `json:"options,omitempty"` + Head *Operation `json:"head,omitempty"` + Patch *Operation `json:"patch,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` +} + +// PathItem describes the operations available on a single path. +// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// The path itself is still exposed to the documentation viewer but they will +// not know which operations and parameters are available. +// +// For more information: http://goo.gl/8us55a#pathItemObject +type PathItem struct { + Refable + VendorExtensible + PathItemProps +} + +// JSONLookup look up a value by the json property name +func (p PathItem) JSONLookup(token string) (interface{}, error) { + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + if token == "$ref" { + return &p.Ref, nil + } + r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) + return r, err +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *PathItem) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &p.Refable); err != nil { + return err + } + if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { + return err + } + if err := json.Unmarshal(data, &p.PathItemProps); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p PathItem) MarshalJSON() ([]byte, error) { + b3, err := json.Marshal(p.Refable) + if err != nil { + return nil, err + } + b4, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + b5, err := json.Marshal(p.PathItemProps) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b3, b4, b5) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go new file mode 100644 index 000000000..9dc82a290 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/paths.go @@ -0,0 +1,97 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-openapi/swag" +) + +// Paths holds the relative paths to the individual endpoints. +// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order +// to construct the full URL. +// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). +// +// For more information: http://goo.gl/8us55a#pathsObject +type Paths struct { + VendorExtensible + Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" +} + +// JSONLookup look up a value by the json property name +func (p Paths) JSONLookup(token string) (interface{}, error) { + if pi, ok := p.Paths[token]; ok { + return &pi, nil + } + if ex, ok := p.Extensions[token]; ok { + return &ex, nil + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (p *Paths) UnmarshalJSON(data []byte) error { + var res map[string]json.RawMessage + if err := json.Unmarshal(data, &res); err != nil { + return err + } + for k, v := range res { + if strings.HasPrefix(strings.ToLower(k), "x-") { + if p.Extensions == nil { + p.Extensions = make(map[string]interface{}) + } + var d interface{} + if err := json.Unmarshal(v, &d); err != nil { + return err + } + p.Extensions[k] = d + } + if strings.HasPrefix(k, "/") { + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + var pi PathItem + if err := json.Unmarshal(v, &pi); err != nil { + return err + } + p.Paths[k] = pi + } + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (p Paths) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(p.VendorExtensible) + if err != nil { + return nil, err + } + + pths := make(map[string]PathItem) + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + pths[k] = v + } + } + b2, err := json.Marshal(pths) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go new file mode 100644 index 000000000..68631df8b --- /dev/null +++ b/vendor/github.com/go-openapi/spec/ref.go @@ -0,0 +1,167 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "net/http" + "os" + "path/filepath" + + "github.com/go-openapi/jsonreference" +) + +// Refable is a struct for things that accept a $ref property +type Refable struct { + Ref Ref +} + +// MarshalJSON marshals the ref to json +func (r Refable) MarshalJSON() ([]byte, error) { + return r.Ref.MarshalJSON() +} + +// UnmarshalJSON unmarshalss the ref from json +func (r *Refable) UnmarshalJSON(d []byte) error { + return json.Unmarshal(d, &r.Ref) +} + +// Ref represents a json reference that is potentially resolved +type Ref struct { + jsonreference.Ref +} + +// RemoteURI gets the remote uri part of the ref +func (r *Ref) RemoteURI() string { + if r.String() == "" { + return r.String() + } + + u := *r.GetURL() + u.Fragment = "" + return u.String() +} + +// IsValidURI returns true when the url the ref points to can be found +func (r *Ref) IsValidURI() bool { + if r.String() == "" { + return true + } + + v := r.RemoteURI() + if v == "" { + return true + } + + if r.HasFullURL { + rr, err := http.Get(v) + if err != nil { + return false + } + + return rr.StatusCode/100 == 2 + } + + if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { + return false + } + + // check for local file + pth := v + if r.HasURLPathOnly { + p, e := filepath.Abs(pth) + if e != nil { + return false + } + pth = p + } + + fi, err := os.Stat(pth) + if err != nil { + return false + } + + return !fi.IsDir() +} + +// Inherits creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *Ref) Inherits(child Ref) (*Ref, error) { + ref, err := r.Ref.Inherits(child.Ref) + if err != nil { + return nil, err + } + return &Ref{Ref: *ref}, nil +} + +// NewRef creates a new instance of a ref object +// returns an error when the reference uri is an invalid uri +func NewRef(refURI string) (Ref, error) { + ref, err := jsonreference.New(refURI) + if err != nil { + return Ref{}, err + } + return Ref{Ref: ref}, nil +} + +// MustCreateRef creates a ref object but +func MustCreateRef(refURI string) Ref { + return Ref{Ref: jsonreference.MustCreateRef(refURI)} +} + +// // NewResolvedRef creates a resolved ref +// func NewResolvedRef(refURI string, data interface{}) Ref { +// return Ref{ +// Ref: jsonreference.MustCreateRef(refURI), +// Resolved: data, +// } +// } + +// MarshalJSON marshals this ref into a JSON object +func (r Ref) MarshalJSON() ([]byte, error) { + str := r.String() + if str == "" { + if r.IsRoot() { + return []byte(`{"$ref":"#"}`), nil + } + return []byte("{}"), nil + } + v := map[string]interface{}{"$ref": str} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshals this ref from a JSON object +func (r *Ref) UnmarshalJSON(d []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(d, &v); err != nil { + return err + } + + if v == nil { + return nil + } + + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *r = Ref{Ref: ref} + } + } + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go new file mode 100644 index 000000000..308cc8478 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/response.go @@ -0,0 +1,113 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/swag" +) + +// ResponseProps properties specific to a response +type ResponseProps struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitempty"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + +// Response describes a single response from an API Operation. +// +// For more information: http://goo.gl/8us55a#responseObject +type Response struct { + Refable + ResponseProps +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Response) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.Refable); err != nil { + return err + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Response) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponseProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.Refable) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// NewResponse creates a new response instance +func NewResponse() *Response { + return new(Response) +} + +// ResponseRef creates a response as a json reference +func ResponseRef(url string) *Response { + resp := NewResponse() + resp.Ref = MustCreateRef(url) + return resp +} + +// WithDescription sets the description on this response, allows for chaining +func (r *Response) WithDescription(description string) *Response { + r.Description = description + return r +} + +// WithSchema sets the schema on this response, allows for chaining. +// Passing a nil argument removes the schema from this response +func (r *Response) WithSchema(schema *Schema) *Response { + r.Schema = schema + return r +} + +// AddHeader adds a header to this response +func (r *Response) AddHeader(name string, header *Header) *Response { + if header == nil { + return r.RemoveHeader(name) + } + if r.Headers == nil { + r.Headers = make(map[string]Header) + } + r.Headers[name] = *header + return r +} + +// RemoveHeader removes a header from this response +func (r *Response) RemoveHeader(name string) *Response { + delete(r.Headers, name) + return r +} + +// AddExample adds an example to this response +func (r *Response) AddExample(mediaType string, example interface{}) *Response { + if r.Examples == nil { + r.Examples = make(map[string]interface{}) + } + r.Examples[mediaType] = example + return r +} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go new file mode 100644 index 000000000..ea071ca63 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -0,0 +1,122 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "github.com/go-openapi/swag" +) + +// Responses is a container for the expected responses of an operation. +// The container maps a HTTP response code to the expected response. +// It is not expected from the documentation to necessarily cover all possible HTTP response codes, +// since they may not be known in advance. However, it is expected from the documentation to cover +// a successful operation response and any known errors. +// +// The `default` can be used a default response object for all HTTP codes that are not covered +// individually by the specification. +// +// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response +// for a successful operation call. +// +// For more information: http://goo.gl/8us55a#responsesObject +type Responses struct { + VendorExtensible + ResponsesProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (r Responses) JSONLookup(token string) (interface{}, error) { + if token == "default" { + return r.Default, nil + } + if ex, ok := r.Extensions[token]; ok { + return &ex, nil + } + if i, err := strconv.Atoi(token); err == nil { + if scr, ok := r.StatusCodeResponses[i]; ok { + return &scr, nil + } + } + return nil, fmt.Errorf("object has no field %q", token) +} + +// UnmarshalJSON hydrates this items instance with the data from JSON +func (r *Responses) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { + return err + } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { + r.ResponsesProps = ResponsesProps{} + } + return nil +} + +// MarshalJSON converts this items object to JSON +func (r Responses) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(r.ResponsesProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(r.VendorExtensible) + if err != nil { + return nil, err + } + concated := swag.ConcatJSON(b1, b2) + return concated, nil +} + +type ResponsesProps struct { + Default *Response + StatusCodeResponses map[int]Response +} + +func (r ResponsesProps) MarshalJSON() ([]byte, error) { + toser := map[string]Response{} + if r.Default != nil { + toser["default"] = *r.Default + } + for k, v := range r.StatusCodeResponses { + toser[strconv.Itoa(k)] = v + } + return json.Marshal(toser) +} + +func (r *ResponsesProps) UnmarshalJSON(data []byte) error { + var res map[string]Response + if err := json.Unmarshal(data, &res); err != nil { + return nil + } + if v, ok := res["default"]; ok { + r.Default = &v + delete(res, "default") + } + for k, v := range res { + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = v + } + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go new file mode 100644 index 000000000..eb88f005c --- /dev/null +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -0,0 +1,628 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// BooleanProperty creates a boolean property +func BooleanProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} +} + +// BoolProperty creates a boolean property +func BoolProperty() *Schema { return BooleanProperty() } + +// StringProperty creates a string property +func StringProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// CharProperty creates a string property +func CharProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} +} + +// Float64Property creates a float64/double property +func Float64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} +} + +// Float32Property creates a float32/float property +func Float32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} +} + +// Int8Property creates an int8 property +func Int8Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} +} + +// Int16Property creates an int16 property +func Int16Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} +} + +// Int32Property creates an int32 property +func Int32Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} +} + +// Int64Property creates an int64 property +func Int64Property() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} +} + +// StrFmtProperty creates a property for the named string format +func StrFmtProperty(format string) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} +} + +// DateProperty creates a date property +func DateProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} +} + +// DateTimeProperty creates a date time property +func DateTimeProperty() *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} +} + +// MapProperty creates a map property +func MapProperty(property *Schema) *Schema { + return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} +} + +// RefProperty creates a ref property +func RefProperty(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// RefSchema creates a ref property +func RefSchema(name string) *Schema { + return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} +} + +// ArrayProperty creates an array property +func ArrayProperty(items *Schema) *Schema { + if items == nil { + return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} + } + return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} +} + +// ComposedSchema creates a schema with allOf +func ComposedSchema(schemas ...Schema) *Schema { + s := new(Schema) + s.AllOf = schemas + return s +} + +// SchemaURL represents a schema url +type SchemaURL string + +// MarshalJSON marshal this to JSON +func (r SchemaURL) MarshalJSON() ([]byte, error) { + if r == "" { + return []byte("{}"), nil + } + v := map[string]interface{}{"$schema": string(r)} + return json.Marshal(v) +} + +// UnmarshalJSON unmarshal this from JSON +func (r *SchemaURL) UnmarshalJSON(data []byte) error { + var v map[string]interface{} + if err := json.Unmarshal(data, &v); err != nil { + return err + } + if v == nil { + return nil + } + if vv, ok := v["$schema"]; ok { + if str, ok := vv.(string); ok { + u, err := url.Parse(str) + if err != nil { + return err + } + + *r = SchemaURL(u.String()) + } + } + return nil +} + +// type ExtraSchemaProps map[string]interface{} + +// // JSONSchema represents a structure that is a json schema draft 04 +// type JSONSchema struct { +// SchemaProps +// ExtraSchemaProps +// } + +// // MarshalJSON marshal this to JSON +// func (s JSONSchema) MarshalJSON() ([]byte, error) { +// b1, err := json.Marshal(s.SchemaProps) +// if err != nil { +// return nil, err +// } +// b2, err := s.Ref.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b3, err := s.Schema.MarshalJSON() +// if err != nil { +// return nil, err +// } +// b4, err := json.Marshal(s.ExtraSchemaProps) +// if err != nil { +// return nil, err +// } +// return swag.ConcatJSON(b1, b2, b3, b4), nil +// } + +// // UnmarshalJSON marshal this from JSON +// func (s *JSONSchema) UnmarshalJSON(data []byte) error { +// var sch JSONSchema +// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Ref); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.Schema); err != nil { +// return err +// } +// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { +// return err +// } +// *s = sch +// return nil +// } + +type SchemaProps struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-,omitempty"` + Schema SchemaURL `json:"-,omitempty"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitempty"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitempty"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitempty"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitempty"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitempty"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` + Definitions Definitions `json:"definitions,omitempty"` +} + +type SwaggerSchemaProps struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitempty"` + XML *XMLObject `json:"xml,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` + Example interface{} `json:"example,omitempty"` +} + +// Schema the schema object allows the definition of input and output data types. +// These types can be objects, but also primitives and arrays. +// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) +// and uses a predefined subset of it. +// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. +// +// For more information: http://goo.gl/8us55a#schemaObject +type Schema struct { + VendorExtensible + SchemaProps + SwaggerSchemaProps + ExtraProps map[string]interface{} `json:"-"` +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s Schema) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + if ex, ok := s.ExtraProps[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) + if r != nil || err != nil { + return r, err + } + r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) + return r, err +} + +// WithID sets the id for this schema, allows for chaining +func (s *Schema) WithID(id string) *Schema { + s.ID = id + return s +} + +// WithTitle sets the title for this schema, allows for chaining +func (s *Schema) WithTitle(title string) *Schema { + s.Title = title + return s +} + +// WithDescription sets the description for this schema, allows for chaining +func (s *Schema) WithDescription(description string) *Schema { + s.Description = description + return s +} + +// WithProperties sets the properties for this schema +func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { + s.Properties = schemas + return s +} + +// SetProperty sets a property on this schema +func (s *Schema) SetProperty(name string, schema Schema) *Schema { + if s.Properties == nil { + s.Properties = make(map[string]Schema) + } + s.Properties[name] = schema + return s +} + +// WithAllOf sets the all of property +func (s *Schema) WithAllOf(schemas ...Schema) *Schema { + s.AllOf = schemas + return s +} + +// WithMaxProperties sets the max number of properties an object can have +func (s *Schema) WithMaxProperties(max int64) *Schema { + s.MaxProperties = &max + return s +} + +// WithMinProperties sets the min number of properties an object must have +func (s *Schema) WithMinProperties(min int64) *Schema { + s.MinProperties = &min + return s +} + +// Typed sets the type of this schema for a single value item +func (s *Schema) Typed(tpe, format string) *Schema { + s.Type = []string{tpe} + s.Format = format + return s +} + +// AddType adds a type with potential format to the types for this schema +func (s *Schema) AddType(tpe, format string) *Schema { + s.Type = append(s.Type, tpe) + if format != "" { + s.Format = format + } + return s +} + +// CollectionOf a fluent builder method for an array parameter +func (s *Schema) CollectionOf(items Schema) *Schema { + s.Type = []string{"array"} + s.Items = &SchemaOrArray{Schema: &items} + return s +} + +// WithDefault sets the default value on this parameter +func (s *Schema) WithDefault(defaultValue interface{}) *Schema { + s.Default = defaultValue + return s +} + +// WithRequired flags this parameter as required +func (s *Schema) WithRequired(items ...string) *Schema { + s.Required = items + return s +} + +// AddRequired adds field names to the required properties array +func (s *Schema) AddRequired(items ...string) *Schema { + s.Required = append(s.Required, items...) + return s +} + +// WithMaxLength sets a max length value +func (s *Schema) WithMaxLength(max int64) *Schema { + s.MaxLength = &max + return s +} + +// WithMinLength sets a min length value +func (s *Schema) WithMinLength(min int64) *Schema { + s.MinLength = &min + return s +} + +// WithPattern sets a pattern value +func (s *Schema) WithPattern(pattern string) *Schema { + s.Pattern = pattern + return s +} + +// WithMultipleOf sets a multiple of value +func (s *Schema) WithMultipleOf(number float64) *Schema { + s.MultipleOf = &number + return s +} + +// WithMaximum sets a maximum number value +func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { + s.Maximum = &max + s.ExclusiveMaximum = exclusive + return s +} + +// WithMinimum sets a minimum number value +func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { + s.Minimum = &min + s.ExclusiveMinimum = exclusive + return s +} + +// WithEnum sets a the enum values (replace) +func (s *Schema) WithEnum(values ...interface{}) *Schema { + s.Enum = append([]interface{}{}, values...) + return s +} + +// WithMaxItems sets the max items +func (s *Schema) WithMaxItems(size int64) *Schema { + s.MaxItems = &size + return s +} + +// WithMinItems sets the min items +func (s *Schema) WithMinItems(size int64) *Schema { + s.MinItems = &size + return s +} + +// UniqueValues dictates that this array can only have unique items +func (s *Schema) UniqueValues() *Schema { + s.UniqueItems = true + return s +} + +// AllowDuplicates this array can have duplicates +func (s *Schema) AllowDuplicates() *Schema { + s.UniqueItems = false + return s +} + +// AddToAllOf adds a schema to the allOf property +func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { + s.AllOf = append(s.AllOf, schemas...) + return s +} + +// WithDiscriminator sets the name of the discriminator field +func (s *Schema) WithDiscriminator(discriminator string) *Schema { + s.Discriminator = discriminator + return s +} + +// AsReadOnly flags this schema as readonly +func (s *Schema) AsReadOnly() *Schema { + s.ReadOnly = true + return s +} + +// AsWritable flags this schema as writeable (not read-only) +func (s *Schema) AsWritable() *Schema { + s.ReadOnly = false + return s +} + +// WithExample sets the example for this schema +func (s *Schema) WithExample(example interface{}) *Schema { + s.Example = example + return s +} + +// WithExternalDocs sets/removes the external docs for/from this schema. +// When you pass empty strings as params the external documents will be removed. +// When you pass non-empty string as one value then those values will be used on the external docs object. +// So when you pass a non-empty description, you should also pass the url and vice versa. +func (s *Schema) WithExternalDocs(description, url string) *Schema { + if description == "" && url == "" { + s.ExternalDocs = nil + return s + } + + if s.ExternalDocs == nil { + s.ExternalDocs = &ExternalDocumentation{} + } + s.ExternalDocs.Description = description + s.ExternalDocs.URL = url + return s +} + +// WithXMLName sets the xml name for the object +func (s *Schema) WithXMLName(name string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Name = name + return s +} + +// WithXMLNamespace sets the xml namespace for the object +func (s *Schema) WithXMLNamespace(namespace string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Namespace = namespace + return s +} + +// WithXMLPrefix sets the xml prefix for the object +func (s *Schema) WithXMLPrefix(prefix string) *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Prefix = prefix + return s +} + +// AsXMLAttribute flags this object as xml attribute +func (s *Schema) AsXMLAttribute() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = true + return s +} + +// AsXMLElement flags this object as an xml node +func (s *Schema) AsXMLElement() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Attribute = false + return s +} + +// AsWrappedXML flags this object as wrapped, this is mostly useful for array types +func (s *Schema) AsWrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = true + return s +} + +// AsUnwrappedXML flags this object as an xml node +func (s *Schema) AsUnwrappedXML() *Schema { + if s.XML == nil { + s.XML = new(XMLObject) + } + s.XML.Wrapped = false + return s +} + +// MarshalJSON marshal this to JSON +func (s Schema) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SchemaProps) + if err != nil { + return nil, fmt.Errorf("schema props %v", err) + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, fmt.Errorf("vendor props %v", err) + } + b3, err := s.Ref.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("ref prop %v", err) + } + b4, err := s.Schema.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("schema prop %v", err) + } + b5, err := json.Marshal(s.SwaggerSchemaProps) + if err != nil { + return nil, fmt.Errorf("common validations %v", err) + } + var b6 []byte + if s.ExtraProps != nil { + jj, err := json.Marshal(s.ExtraProps) + if err != nil { + return nil, fmt.Errorf("extra props %v", err) + } + b6 = jj + } + return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *Schema) UnmarshalJSON(data []byte) error { + var sch Schema + if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sch.Ref); err != nil { + return err + } + if err := json.Unmarshal(data, &sch.Schema); err != nil { + return err + } + if err := json.Unmarshal(data, &sch.SwaggerSchemaProps); err != nil { + return err + } + + var d map[string]interface{} + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + delete(d, "$ref") + delete(d, "$schema") + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(d, pn) + } + + for k, vv := range d { + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-") { + if sch.Extensions == nil { + sch.Extensions = map[string]interface{}{} + } + sch.Extensions[k] = vv + continue + } + if sch.ExtraProps == nil { + sch.ExtraProps = map[string]interface{}{} + } + sch.ExtraProps[k] = vv + } + + *s = sch + + return nil +} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go new file mode 100644 index 000000000..22d4f10af --- /dev/null +++ b/vendor/github.com/go-openapi/spec/security_scheme.go @@ -0,0 +1,142 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +const ( + basic = "basic" + apiKey = "apiKey" + oauth2 = "oauth2" + implicit = "implicit" + password = "password" + application = "application" + accessCode = "accessCode" +) + +// BasicAuth creates a basic auth security scheme +func BasicAuth() *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} +} + +// APIKeyAuth creates an api key auth security scheme +func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} +} + +// OAuth2Implicit creates an implicit flow oauth2 security scheme +func OAuth2Implicit(authorizationURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: implicit, + AuthorizationURL: authorizationURL, + }} +} + +// OAuth2Password creates a password flow oauth2 security scheme +func OAuth2Password(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: password, + TokenURL: tokenURL, + }} +} + +// OAuth2Application creates an application flow oauth2 security scheme +func OAuth2Application(tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: application, + TokenURL: tokenURL, + }} +} + +// OAuth2AccessToken creates an access token flow oauth2 security scheme +func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { + return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ + Type: oauth2, + Flow: accessCode, + AuthorizationURL: authorizationURL, + TokenURL: tokenURL, + }} +} + +type SecuritySchemeProps struct { + Description string `json:"description,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` // api key + In string `json:"in,omitempty"` // api key + Flow string `json:"flow,omitempty"` // oauth2 + AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 + TokenURL string `json:"tokenUrl,omitempty"` // oauth2 + Scopes map[string]string `json:"scopes,omitempty"` // oauth2 +} + +// AddScope adds a scope to this security scheme +func (s *SecuritySchemeProps) AddScope(scope, description string) { + if s.Scopes == nil { + s.Scopes = make(map[string]string) + } + s.Scopes[scope] = description +} + +// SecurityScheme allows the definition of a security scheme that can be used by the operations. +// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) +// and OAuth2's common flows (implicit, password, application and access code). +// +// For more information: http://goo.gl/8us55a#securitySchemeObject +type SecurityScheme struct { + VendorExtensible + SecuritySchemeProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (s SecurityScheme) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SecuritySchemeProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (s *SecurityScheme) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { + return err + } + if err := json.Unmarshal(data, &s.VendorExtensible); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go new file mode 100644 index 000000000..cc2ae56b2 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/spec.go @@ -0,0 +1,79 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import "encoding/json" + +//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... +//go:generate perl -pi -e s,Json,JSON,g bindata.go + +const ( + // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs + SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" + // JSONSchemaURL the url for the json schema schema + JSONSchemaURL = "http://json-schema.org/draft-04/schema#" +) + +var ( + jsonSchema = MustLoadJSONSchemaDraft04() + swaggerSchema = MustLoadSwagger20Schema() +) + +// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error +func MustLoadJSONSchemaDraft04() *Schema { + d, e := JSONSchemaDraft04() + if e != nil { + panic(e) + } + return d +} + +// JSONSchemaDraft04 loads the json schema document for json shema draft04 +func JSONSchemaDraft04() (*Schema, error) { + b, err := Asset("jsonschema-draft-04.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} + +// MustLoadSwagger20Schema panics when Swagger20Schema returns an error +func MustLoadSwagger20Schema() *Schema { + d, e := Swagger20Schema() + if e != nil { + panic(e) + } + return d +} + +// Swagger20Schema loads the swagger 2.0 schema from the embedded assets +func Swagger20Schema() (*Schema, error) { + + b, err := Asset("v2/schema.json") + if err != nil { + return nil, err + } + + schema := new(Schema) + if err := json.Unmarshal(b, schema); err != nil { + return nil, err + } + return schema, nil +} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go new file mode 100644 index 000000000..ff3ef875e --- /dev/null +++ b/vendor/github.com/go-openapi/spec/swagger.go @@ -0,0 +1,317 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +// Swagger this is the root document object for the API specification. +// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) together into one document. +// +// For more information: http://goo.gl/8us55a#swagger-object- +type Swagger struct { + VendorExtensible + SwaggerProps +} + +// JSONLookup look up a value by the json property name +func (s Swagger) JSONLookup(token string) (interface{}, error) { + if ex, ok := s.Extensions[token]; ok { + return &ex, nil + } + r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) + return r, err +} + +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(s.SwaggerProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(s.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON unmarshals a swagger spec from json +func (s *Swagger) UnmarshalJSON(data []byte) error { + var sw Swagger + if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { + return err + } + if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { + return err + } + *s = sw + return nil +} + +type SwaggerProps struct { + ID string `json:"id,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` // the scheme, when present must be from [http, https, ws, wss] + Swagger string `json:"swagger,omitempty"` + Info *Info `json:"info,omitempty"` + Host string `json:"host,omitempty"` + BasePath string `json:"basePath,omitempty"` // must start with a leading "/" + Paths *Paths `json:"paths"` // required + Definitions Definitions `json:"definitions"` + Parameters map[string]Parameter `json:"parameters,omitempty"` + Responses map[string]Response `json:"responses,omitempty"` + SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` + Security []map[string][]string `json:"security,omitempty"` + Tags []Tag `json:"tags,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// Dependencies represent a dependencies property +type Dependencies map[string]SchemaOrStringArray + +// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property +type SchemaOrBool struct { + Allows bool + Schema *Schema +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { + if token == "allows" { + return s.Allows, nil + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +var jsTrue = []byte("true") +var jsFalse = []byte("false") + +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if s.Schema != nil { + return json.Marshal(s.Schema) + } + + if s.Schema == nil && !s.Allows { + return jsFalse, nil + } + return jsTrue, nil +} + +// UnmarshalJSON converts this bool or schema object from a JSON structure +func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + var nw SchemaOrBool + if len(data) >= 4 { + if data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + } + *s = nw + return nil +} + +// SchemaOrStringArray represents a schema or a string array +type SchemaOrStringArray struct { + Schema *Schema + Property []string +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if len(s.Property) > 0 { + return json.Marshal(s.Property) + } + if s.Schema != nil { + return json.Marshal(s.Schema) + } + return nil, nil +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + var nw SchemaOrStringArray + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Property); err != nil { + return err + } + } + *s = nw + return nil +} + +// Definitions contains the models explicitly defined in this spec +// An object to hold data types that can be consumed and produced by operations. +// These data types can be primitives, arrays or models. +// +// For more information: http://goo.gl/8us55a#definitionsObject +type Definitions map[string]Schema + +// SecurityDefinitions a declaration of the security schemes available to be used in the specification. +// This does not enforce the security schemes on the operations and only serves to provide +// the relevant details for each scheme. +// +// For more information: http://goo.gl/8us55a#securityDefinitionsObject +type SecurityDefinitions map[string]*SecurityScheme + +// StringOrArray represents a value that can either be a string +// or an array of strings. Mainly here for serialization purposes +type StringOrArray []string + +// Contains returns true when the value is contained in the slice +func (s StringOrArray) Contains(value string) bool { + for _, str := range s { + if str == value { + return true + } + } + return false +} + +// JSONLookup implements an interface to customize json pointer lookup +func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { + if _, err := strconv.Atoi(token); err == nil { + r, _, err := jsonpointer.GetForToken(s.Schemas, token) + return r, err + } + r, _, err := jsonpointer.GetForToken(s.Schema, token) + return r, err +} + +// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string +func (s *StringOrArray) UnmarshalJSON(data []byte) error { + var first byte + if len(data) > 1 { + first = data[0] + } + + if first == '[' { + var parsed []string + if err := json.Unmarshal(data, &parsed); err != nil { + return err + } + *s = StringOrArray(parsed) + return nil + } + + var single interface{} + if err := json.Unmarshal(data, &single); err != nil { + return err + } + if single == nil { + return nil + } + switch single.(type) { + case string: + *s = StringOrArray([]string{single.(string)}) + return nil + default: + return fmt.Errorf("only string or array is allowed, not %T", single) + } +} + +// MarshalJSON converts this string or array to a JSON array or JSON string +func (s StringOrArray) MarshalJSON() ([]byte, error) { + if len(s) == 1 { + return json.Marshal([]string(s)[0]) + } + return json.Marshal([]string(s)) +} + +// SchemaOrArray represents a value that can either be a Schema +// or an array of Schema. Mainly here for serialization purposes +type SchemaOrArray struct { + Schema *Schema + Schemas []Schema +} + +// Len returns the number of schemas in this property +func (s SchemaOrArray) Len() int { + if s.Schema != nil { + return 1 + } + return len(s.Schemas) +} + +// ContainsType returns true when one of the schemas is of the specified type +func (s *SchemaOrArray) ContainsType(name string) bool { + if s.Schema != nil { + return s.Schema.Type != nil && s.Schema.Type.Contains(name) + } + return false +} + +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if len(s.Schemas) > 0 { + return json.Marshal(s.Schemas) + } + return json.Marshal(s.Schema) +} + +// UnmarshalJSON converts this schema object or array from a JSON structure +func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + var nw SchemaOrArray + var first byte + if len(data) > 1 { + first = data[0] + } + if first == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err + } + nw.Schema = &sch + } + if first == '[' { + if err := json.Unmarshal(data, &nw.Schemas); err != nil { + return err + } + } + *s = nw + return nil +} + +// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go new file mode 100644 index 000000000..97f555840 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/tag.go @@ -0,0 +1,73 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +import ( + "encoding/json" + + "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/swag" +) + +type TagProps struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` +} + +// NewTag creates a new tag +func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { + return Tag{TagProps: TagProps{description, name, externalDocs}} +} + +// Tag allows adding meta data to a single tag that is used by the [Operation Object](http://goo.gl/8us55a#operationObject). +// It is not mandatory to have a Tag Object per tag used there. +// +// For more information: http://goo.gl/8us55a#tagObject +type Tag struct { + VendorExtensible + TagProps +} + +// JSONLookup implements an interface to customize json pointer lookup +func (t Tag) JSONLookup(token string) (interface{}, error) { + if ex, ok := t.Extensions[token]; ok { + return &ex, nil + } + + r, _, err := jsonpointer.GetForToken(t.TagProps, token) + return r, err +} + +// MarshalJSON marshal this to JSON +func (t Tag) MarshalJSON() ([]byte, error) { + b1, err := json.Marshal(t.TagProps) + if err != nil { + return nil, err + } + b2, err := json.Marshal(t.VendorExtensible) + if err != nil { + return nil, err + } + return swag.ConcatJSON(b1, b2), nil +} + +// UnmarshalJSON marshal this from JSON +func (t *Tag) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &t.TagProps); err != nil { + return err + } + return json.Unmarshal(data, &t.VendorExtensible) +} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go new file mode 100644 index 000000000..945a46703 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/xml_object.go @@ -0,0 +1,68 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package spec + +// XMLObject a metadata object that allows for more fine-tuned XML model definitions. +// +// For more information: http://goo.gl/8us55a#xmlObject +type XMLObject struct { + Name string `json:"name,omitempty"` + Namespace string `json:"namespace,omitempty"` + Prefix string `json:"prefix,omitempty"` + Attribute bool `json:"attribute,omitempty"` + Wrapped bool `json:"wrapped,omitempty"` +} + +// WithName sets the xml name for the object +func (x *XMLObject) WithName(name string) *XMLObject { + x.Name = name + return x +} + +// WithNamespace sets the xml namespace for the object +func (x *XMLObject) WithNamespace(namespace string) *XMLObject { + x.Namespace = namespace + return x +} + +// WithPrefix sets the xml prefix for the object +func (x *XMLObject) WithPrefix(prefix string) *XMLObject { + x.Prefix = prefix + return x +} + +// AsAttribute flags this object as xml attribute +func (x *XMLObject) AsAttribute() *XMLObject { + x.Attribute = true + return x +} + +// AsElement flags this object as an xml node +func (x *XMLObject) AsElement() *XMLObject { + x.Attribute = false + return x +} + +// AsWrapped flags this object as wrapped, this is mostly useful for array types +func (x *XMLObject) AsWrapped() *XMLObject { + x.Wrapped = true + return x +} + +// AsUnwrapped flags this object as an xml node +func (x *XMLObject) AsUnwrapped() *XMLObject { + x.Wrapped = false + return x +} diff --git a/vendor/github.com/go-openapi/swag/.drone.sec b/vendor/github.com/go-openapi/swag/.drone.sec new file mode 100644 index 000000000..9cc7e64a2 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.drone.sec @@ -0,0 +1 @@ +eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.darMHuSYnLhsknrnqjdXLGcyEJ5kM13yQbLGi2UWcKXpddHvPT3dMCnP5y7e27c76R2HFnvr56BqDMI-x0zyuQtjYKzSFUGjOcEhOH3OL1Hxu-Cm2z2443BS8asXNA3sEveAGQvG68jffm7CvtEtMo57wBpggI7UHbfIdLMK47s1EDpC4xanjiS1BJe5NC_Ikf0jfa6vf18oggbjxuoqSEvSNVdNRyZwG_npFaZFJzvdtehTG7GqunWjGiqBb81qNcEdzSdIZW7A_Esv4U-nOL5gGr55E9jKhv3bX4Z9ygGcSrJ3NCgR_3zRhYKkPEAOXIqQKfL6-h82BY--cHq9uw.NHL3X-1tjb8a8zF7.eRmLvOG32e7260K8rkI-HmUGG5Gb6Hu-obKKxBqHd-vVzsKnwTVJavLWktPqlXGMsnDt7MimSysNqsWemMUEviW2p3godnvjOOXTDb-RAtQ-39rvxnZ2bN8qwUVFrdiKTZD06l60yTeLW7L1psyLj50NxklFObhkpUcK5uukxLXT1SzGM9aY6_3dzW4HU9pZGQrIH1pj1UzvWIjz7iIzE1a37DHBN-FiYSASsw01v1SSIFr34gwlGcqzGfJBonffVrM4ordm3IiVm50Zvr25DrmYTKrQpJRB-KOvYxBNYDzjCaanHDyWGUGN44FUx38azHHEVBTaiOM7xwPeyCc-xTTv8WXGnL1xrhL3M_jNuwnbAjzL9X_li7KUSeYajwhGihdMZaHLYaqxh3NNnbPfYhR6sBxu8vaT1Sc4eE84QC4dV4OaAglPvrPdWL-DC7OYQyoPU8u9ggwUQHpFUzJyD549T_Tlgn-2Cw7kTe41VonH9HkoXGANDGtQCGTqTIEeFQJ3MDDucf5VteFP8_SJPfyJYxpStFt5U1AuULV9sXmpGQL_-GGFXowd0X0bHxFeo_eu1vm-oTqQQNbKRnyt5V3n4U9jhOUGnnIBy3JOG3DA2YhVJsHdlLZ9vaDpFYcxts4.SqYfES30FqVSufGbPZ6YXA \ No newline at end of file diff --git a/vendor/github.com/go-openapi/swag/.drone.yml b/vendor/github.com/go-openapi/swag/.drone.yml new file mode 100644 index 000000000..acf10fdfb --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.drone.yml @@ -0,0 +1,32 @@ +clone: + path: github.com/go-openapi/swag + +matrix: + GO_VERSION: + - "1.6" + +build: + integration: + image: golang:$$GO_VERSION + pull: true + commands: + - go get -u github.com/stretchr/testify + - go get -u github.com/mailru/easyjson + - go test -race + - go test -v -cover -coverprofile=coverage.out -covermode=count ./... + +notify: + slack: + channel: bots + webhook_url: $$SLACK_URL + username: drone + +publish: + coverage: + server: https://coverage.vmware.run + token: $$GITHUB_TOKEN + # threshold: 70 + # must_increase: true + when: + matrix: + GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore new file mode 100644 index 000000000..769c24400 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -0,0 +1 @@ +secrets.yml diff --git a/vendor/github.com/go-openapi/swag/.pullapprove.yml b/vendor/github.com/go-openapi/swag/.pullapprove.yml new file mode 100644 index 000000000..5ec183e22 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.pullapprove.yml @@ -0,0 +1,13 @@ +approve_by_comment: true +approve_regex: '^(:shipit:|:\+1:|\+1|LGTM|lgtm|Approved)' +reject_regex: ^[Rr]ejected +reset_on_push: false +reviewers: + members: + - casualjim + - chancez + - frapposelli + - vburenin + - pytlesk4 + name: pullapprove + required: 1 diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..9322b065e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at ivan+abuse@flanders.co.nz. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md new file mode 100644 index 000000000..c1d3c196c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/README.md @@ -0,0 +1,12 @@ +# Swag [![Build Status](https://ci.vmware.run/api/badges/go-openapi/swag/status.svg)](https://ci.vmware.run/go-openapi/swag) [![Coverage](https://coverage.vmware.run/badges/go-openapi/swag/coverage.svg)](https://coverage.vmware.run/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) + +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) + +Contains a bunch of helper functions: + +* convert between value and pointers for builtins +* convert from string to builtin +* fast json concatenation +* search in path +* load from file or http +* name manglin \ No newline at end of file diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go new file mode 100644 index 000000000..28d912410 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -0,0 +1,188 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "strconv" + "strings" +) + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive +func IsFloat64AJSONInteger(f float64) bool { + if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { + return false + } + + return f == float64(int64(f)) || f == float64(uint64(f)) +} + +var evaluatesAsTrue = map[string]struct{}{ + "true": struct{}{}, + "1": struct{}{}, + "yes": struct{}{}, + "ok": struct{}{}, + "y": struct{}{}, + "on": struct{}{}, + "selected": struct{}{}, + "checked": struct{}{}, + "t": struct{}{}, + "enabled": struct{}{}, +} + +// ConvertBool turn a string into a boolean +func ConvertBool(str string) (bool, error) { + _, ok := evaluatesAsTrue[strings.ToLower(str)] + return ok, nil +} + +// ConvertFloat32 turn a string into a float32 +func ConvertFloat32(str string) (float32, error) { + f, err := strconv.ParseFloat(str, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// ConvertFloat64 turn a string into a float64 +func ConvertFloat64(str string) (float64, error) { + return strconv.ParseFloat(str, 64) +} + +// ConvertInt8 turn a string into int8 boolean +func ConvertInt8(str string) (int8, error) { + i, err := strconv.ParseInt(str, 10, 8) + if err != nil { + return 0, err + } + return int8(i), nil +} + +// ConvertInt16 turn a string into a int16 +func ConvertInt16(str string) (int16, error) { + i, err := strconv.ParseInt(str, 10, 16) + if err != nil { + return 0, err + } + return int16(i), nil +} + +// ConvertInt32 turn a string into a int32 +func ConvertInt32(str string) (int32, error) { + i, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// ConvertInt64 turn a string into a int64 +func ConvertInt64(str string) (int64, error) { + return strconv.ParseInt(str, 10, 64) +} + +// ConvertUint8 turn a string into a uint8 +func ConvertUint8(str string) (uint8, error) { + i, err := strconv.ParseUint(str, 10, 8) + if err != nil { + return 0, err + } + return uint8(i), nil +} + +// ConvertUint16 turn a string into a uint16 +func ConvertUint16(str string) (uint16, error) { + i, err := strconv.ParseUint(str, 10, 16) + if err != nil { + return 0, err + } + return uint16(i), nil +} + +// ConvertUint32 turn a string into a uint32 +func ConvertUint32(str string) (uint32, error) { + i, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// ConvertUint64 turn a string into a uint64 +func ConvertUint64(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} + +// FormatBool turns a boolean into a string +func FormatBool(value bool) string { + return strconv.FormatBool(value) +} + +// FormatFloat32 turns a float32 into a string +func FormatFloat32(value float32) string { + return strconv.FormatFloat(float64(value), 'f', -1, 32) +} + +// FormatFloat64 turns a float64 into a string +func FormatFloat64(value float64) string { + return strconv.FormatFloat(value, 'f', -1, 64) +} + +// FormatInt8 turns an int8 into a string +func FormatInt8(value int8) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt16 turns an int16 into a string +func FormatInt16(value int16) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt32 turns an int32 into a string +func FormatInt32(value int32) string { + return strconv.FormatInt(int64(value), 10) +} + +// FormatInt64 turns an int64 into a string +func FormatInt64(value int64) string { + return strconv.FormatInt(value, 10) +} + +// FormatUint8 turns an uint8 into a string +func FormatUint8(value uint8) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint16 turns an uint16 into a string +func FormatUint16(value uint16) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint32 turns an uint32 into a string +func FormatUint32(value uint32) string { + return strconv.FormatUint(uint64(value), 10) +} + +// FormatUint64 turns an uint64 into a string +func FormatUint64(value uint64) string { + return strconv.FormatUint(value, 10) +} diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go new file mode 100644 index 000000000..c95e4e78b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -0,0 +1,595 @@ +package swag + +import "time" + +// This file was taken from the aws go sdk + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to of the int64 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int64 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pouinter to of the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pouinter passed in or +// 0 if the pouinter is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pouinters +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pouinters uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pouinters +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pouinters uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pouinter to of the uint64 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint64 pouinter passed in or +// 0 if the pouinter is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint64 values uinto a slice of +// uint32 pouinters +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values uinto a string +// map of uint32 pouinters +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pouinters uinto a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pouinter to of the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pouinter passed in or +// 0 if the pouinter is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values uinto a slice of +// uint64 pouinters +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values uinto a string +// map of uint64 pouinters +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pouinters uinto a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go new file mode 100644 index 000000000..6e9ec20fc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/json.go @@ -0,0 +1,270 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "bytes" + "encoding/json" + "reflect" + "strings" + "sync" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// DefaultJSONNameProvider the default cache for types +var DefaultJSONNameProvider = NewNameProvider() + +const comma = byte(',') + +var closers = map[byte]byte{ + '{': '}', + '[': ']', +} + +type ejMarshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +type ejUnmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller +// so it takes the fastest option available. +func WriteJSON(data interface{}) ([]byte, error) { + if d, ok := data.(ejMarshaler); ok { + jw := new(jwriter.Writer) + d.MarshalEasyJSON(jw) + return jw.BuildBytes() + } + if d, ok := data.(json.Marshaler); ok { + return d.MarshalJSON() + } + return json.Marshal(data) +} + +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller +// so it takes the fastes option available +func ReadJSON(data []byte, value interface{}) error { + if d, ok := value.(ejUnmarshaler); ok { + jl := &jlexer.Lexer{Data: data} + d.UnmarshalEasyJSON(jl) + return jl.Error() + } + if d, ok := value.(json.Unmarshaler); ok { + return d.UnmarshalJSON(data) + } + return json.Unmarshal(data, value) +} + +// DynamicJSONToStruct converts an untyped json structure into a struct +func DynamicJSONToStruct(data interface{}, target interface{}) error { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, err := WriteJSON(data) + if err != nil { + return err + } + if err := ReadJSON(b, target); err != nil { + return err + } + return nil +} + +// ConcatJSON concatenates multiple json objects efficiently +func ConcatJSON(blobs ...[]byte) []byte { + if len(blobs) == 0 { + return nil + } + if len(blobs) == 1 { + return blobs[0] + } + + last := len(blobs) - 1 + var opening, closing byte + a := 0 + idx := 0 + buf := bytes.NewBuffer(nil) + + for i, b := range blobs { + if len(b) > 0 && opening == 0 { // is this an array or an object? + opening, closing = b[0], closers[b[0]] + } + + if opening != '{' && opening != '[' { + continue // don't know how to concatenate non container objects + } + + if len(b) < 3 { // yep empty but also the last one, so closing this thing + if i == last && a > 0 { + buf.WriteByte(closing) + } + continue + } + + idx = 0 + if a > 0 { // we need to join with a comma for everything beyond the first non-empty item + buf.WriteByte(comma) + idx = 1 // this is not the first or the last so we want to drop the leading bracket + } + + if i != last { // not the last one, strip brackets + buf.Write(b[idx : len(b)-1]) + } else { // last one, strip only the leading bracket + buf.Write(b[idx:]) + } + a++ + } + // somehow it ended up being empty, so provide a default value + if buf.Len() == 0 { + buf.WriteByte(opening) + buf.WriteByte(closing) + } + return buf.Bytes() +} + +// ToDynamicJSON turns an object into a properly JSON typed structure +func ToDynamicJSON(data interface{}) interface{} { + // TODO: convert straight to a json typed map (mergo + iterate?) + b, _ := json.Marshal(data) + var res interface{} + json.Unmarshal(b, &res) + return res +} + +// FromDynamicJSON turns an object into a properly JSON typed structure +func FromDynamicJSON(data, target interface{}) error { + b, _ := json.Marshal(data) + return json.Unmarshal(b, target) +} + +// NameProvider represents an object capabale of translating from go property names +// to json property names +// This type is thread-safe. +type NameProvider struct { + lock *sync.Mutex + index map[reflect.Type]nameIndex +} + +type nameIndex struct { + jsonNames map[string]string + goNames map[string]string +} + +// NewNameProvider creates a new name provider +func NewNameProvider() *NameProvider { + return &NameProvider{ + lock: &sync.Mutex{}, + index: make(map[reflect.Type]nameIndex), + } +} + +func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { + for i := 0; i < tpe.NumField(); i++ { + targetDes := tpe.Field(i) + + if targetDes.PkgPath != "" { // unexported + continue + } + + if targetDes.Anonymous { // walk embedded structures tree down first + buildnameIndex(targetDes.Type, idx, reverseIdx) + continue + } + + if tag := targetDes.Tag.Get("json"); tag != "" { + + parts := strings.Split(tag, ",") + if len(parts) == 0 { + continue + } + + nm := parts[0] + if nm == "-" { + continue + } + if nm == "" { // empty string means we want to use the Go name + nm = targetDes.Name + } + + idx[nm] = targetDes.Name + reverseIdx[targetDes.Name] = nm + } + } +} + +func newNameIndex(tpe reflect.Type) nameIndex { + var idx = make(map[string]string, tpe.NumField()) + var reverseIdx = make(map[string]string, tpe.NumField()) + + buildnameIndex(tpe, idx, reverseIdx) + return nameIndex{jsonNames: idx, goNames: reverseIdx} +} + +// GetJSONNames gets all the json property names for a type +func (n *NameProvider) GetJSONNames(subject interface{}) []string { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + + var res []string + for k := range names.jsonNames { + res = append(res, k) + } + return res +} + +// GetJSONName gets the json name for a go property name +func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetJSONNameForType(tpe, name) +} + +// GetJSONNameForType gets the json name for a go property name on a given type +func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.goNames[name] + return nme, ok +} + +func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { + n.lock.Lock() + defer n.lock.Unlock() + names := newNameIndex(tpe) + n.index[tpe] = names + return names +} + +// GetGoName gets the go name for a json property name +func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { + tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() + return n.GetGoNameForType(tpe, name) +} + +// GetGoNameForType gets the go name for a given type for a json property name +func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { + names, ok := n.index[tpe] + if !ok { + names = n.makeNameIndex(tpe) + } + nme, ok := names.jsonNames[name] + return nme, ok +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go new file mode 100644 index 000000000..6dbc31330 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -0,0 +1,49 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in +func LoadFromFileOrHTTP(path string) ([]byte, error) { + return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path) +} + +// LoadStrategy returns a loader function for a given path or uri +func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(path, "http") { + return remote + } + return local +} + +func loadHTTPBytes(path string) ([]byte, error) { + resp, err := http.Get(path) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) + } + + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go new file mode 100644 index 000000000..8323fa37b --- /dev/null +++ b/vendor/github.com/go-openapi/swag/net.go @@ -0,0 +1,24 @@ +package swag + +import ( + "net" + "strconv" +) + +// SplitHostPort splits a network address into a host and a port. +// The port is -1 when there is no port to be found +func SplitHostPort(addr string) (host string, port int, err error) { + h, p, err := net.SplitHostPort(addr) + if err != nil { + return "", -1, err + } + if p == "" { + return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} + } + + pi, err := strconv.Atoi(p) + if err != nil { + return "", -1, err + } + return h, pi, nil +} diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go new file mode 100644 index 000000000..273e9fbed --- /dev/null +++ b/vendor/github.com/go-openapi/swag/path.go @@ -0,0 +1,56 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +const ( + // GOPATHKey represents the env key for gopath + GOPATHKey = "GOPATH" +) + +// FindInSearchPath finds a package in a provided lists of paths +func FindInSearchPath(searchPath, pkg string) string { + pathsList := filepath.SplitList(searchPath) + for _, path := range pathsList { + if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { + if _, err := os.Stat(evaluatedPath); err == nil { + return evaluatedPath + } + } + } + return "" +} + +// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +func FindInGoSearchPath(pkg string) string { + return FindInSearchPath(FullGoSearchPath(), pkg) +} + +// FullGoSearchPath gets the search paths for finding packages +func FullGoSearchPath() string { + allPaths := os.Getenv(GOPATHKey) + if allPaths != "" { + allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") + } else { + allPaths = runtime.GOROOT() + } + return allPaths +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go new file mode 100644 index 000000000..d8b54ee61 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/util.go @@ -0,0 +1,318 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "math" + "reflect" + "regexp" + "sort" + "strings" +) + +// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698 +var commonInitialisms = map[string]bool{ + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UUID": true, + "UID": true, + "UI": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XSRF": true, + "XSS": true, +} +var initialisms []string + +func init() { + for k := range commonInitialisms { + initialisms = append(initialisms, k) + } + sort.Sort(sort.Reverse(byLength(initialisms))) +} + +// JoinByFormat joins a string array by a known format: +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func JoinByFormat(data []string, format string) []string { + if len(data) == 0 { + return data + } + var sep string + switch format { + case "ssv": + sep = " " + case "tsv": + sep = "\t" + case "pipes": + sep = "|" + case "multi": + return data + default: + sep = "," + } + return []string{strings.Join(data, sep)} +} + +// SplitByFormat splits a string by a known format: +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) +func SplitByFormat(data, format string) []string { + if data == "" { + return nil + } + var sep string + switch format { + case "ssv": + sep = " " + case "tsv": + sep = "\t" + case "pipes": + sep = "|" + case "multi": + return nil + default: + sep = "," + } + var result []string + for _, s := range strings.Split(data, sep) { + if ts := strings.TrimSpace(s); ts != "" { + result = append(result, ts) + } + } + return result +} + +type byLength []string + +func (s byLength) Len() int { + return len(s) +} +func (s byLength) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byLength) Less(i, j int) bool { + return len(s[i]) < len(s[j]) +} + +// Prepares strings by splitting by caps, spaces, dashes, and underscore +func split(str string) (words []string) { + repl := strings.NewReplacer( + "@", "At ", + "&", "And ", + "|", "Pipe ", + "$", "Dollar ", + "!", "Bang ", + "-", " ", + "_", " ", + ) + + rex1 := regexp.MustCompile(`(\p{Lu})`) + rex2 := regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) + + str = trim(str) + + // Convert dash and underscore to spaces + str = repl.Replace(str) + + // Split when uppercase is found (needed for Snake) + str = rex1.ReplaceAllString(str, " $1") + // check if consecutive single char things make up an initialism + + for _, k := range initialisms { + str = strings.Replace(str, rex1.ReplaceAllString(k, " $1"), " "+k, -1) + } + // Get the final list of words + words = rex2.FindAllString(str, -1) + + return +} + +// Removes leading whitespaces +func trim(str string) string { + return strings.Trim(str, " ") +} + +// Shortcut to strings.ToUpper() +func upper(str string) string { + return strings.ToUpper(trim(str)) +} + +// Shortcut to strings.ToLower() +func lower(str string) string { + return strings.ToLower(trim(str)) +} + +// ToFileName lowercases and underscores a go type name +func ToFileName(name string) string { + var out []string + for _, w := range split(name) { + out = append(out, lower(w)) + } + return strings.Join(out, "_") +} + +// ToCommandName lowercases and underscores a go type name +func ToCommandName(name string) string { + var out []string + for _, w := range split(name) { + out = append(out, lower(w)) + } + return strings.Join(out, "-") +} + +// ToHumanNameLower represents a code name as a human series of words +func ToHumanNameLower(name string) string { + var out []string + for _, w := range split(name) { + if !commonInitialisms[upper(w)] { + out = append(out, lower(w)) + } else { + out = append(out, w) + } + } + return strings.Join(out, " ") +} + +// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized +func ToHumanNameTitle(name string) string { + var out []string + for _, w := range split(name) { + uw := upper(w) + if !commonInitialisms[uw] { + out = append(out, upper(w[:1])+lower(w[1:])) + } else { + out = append(out, w) + } + } + return strings.Join(out, " ") +} + +// ToJSONName camelcases a name which can be underscored or pascal cased +func ToJSONName(name string) string { + var out []string + for i, w := range split(name) { + if i == 0 { + out = append(out, lower(w)) + continue + } + out = append(out, upper(w[:1])+lower(w[1:])) + } + return strings.Join(out, "") +} + +// ToVarName camelcases a name which can be underscored or pascal cased +func ToVarName(name string) string { + res := ToGoName(name) + if len(res) <= 1 { + return lower(res) + } + return lower(res[:1]) + res[1:] +} + +// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes +func ToGoName(name string) string { + var out []string + for _, w := range split(name) { + uw := upper(w) + mod := int(math.Min(float64(len(uw)), 2)) + if !commonInitialisms[uw] && !commonInitialisms[uw[:len(uw)-mod]] { + uw = upper(w[:1]) + lower(w[1:]) + } + out = append(out, uw) + } + return strings.Join(out, "") +} + +// ContainsStringsCI searches a slice of strings for a case-insensitive match +func ContainsStringsCI(coll []string, item string) bool { + for _, a := range coll { + if strings.EqualFold(a, item) { + return true + } + } + return false +} + +type zeroable interface { + IsZero() bool +} + +// IsZero returns true when the value passed into the function is a zero value. +// This allows for safer checking of interface values. +func IsZero(data interface{}) bool { + // check for things that have an IsZero method instead + if vv, ok := data.(zeroable); ok { + return vv.IsZero() + } + // continue with slightly more complex reflection + v := reflect.ValueOf(data) + switch v.Kind() { + case reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + case reflect.Struct, reflect.Array: + return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) + case reflect.Invalid: + return true + } + return false +} + +// CommandLineOptionsGroup represents a group of user-defined command line options +type CommandLineOptionsGroup struct { + ShortDescription string + LongDescription string + Options interface{} +} diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..d2c3b418f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,15 @@ +Anton Povarov +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +John Tuley +Laurent +Patrick Lee +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 000000000..c52878dd5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index cdfe2991f..000000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specificies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921e..000000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a9..000000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index e392575b3..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,229 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) - if in.IsNil() { - return pb - } - - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") - } - if in.IsNil() { - // Merging nil into non-nil is a quiet no-op - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := extendable(in.Addr().Interface()); ok { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 04dcb8813..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,874 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" - "os" - "reflect" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - return UnmarshalMerge(buf, pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err - } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 8c1b8fd1f..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,1363 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" - "sort" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -func sizeFixed64(x uint64) int { - return 8 -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -func sizeFixed32(x uint64) int { - return 4 -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - if err != nil { - return err - } - p.buf = append(p.buf, data...) - return nil - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Encode++ - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - stats.Size++ - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { - return err - } - v, _ := exts.extensionsRead() - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index 8b16f951c..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,296 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 482f3e97e..000000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,583 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok - } - return nil, false -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok = extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. - value := reflect.New(t).Elem() - - for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err - } - - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { - return nil, err - } - - if o.index >= len(o.buf) { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 170b8e87d..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,898 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // write point - - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index fd982decd..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,311 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() - case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err - } - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - if i > 0 { - b.WriteByte(',') - } - - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index fb512e2e1..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,484 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index 6b5567d47..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,270 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != ^field(0) -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil -} - -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] -} - -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p -} - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x -} - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p -} - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] -} - -func word64_IsNil(p word64) bool { - return *p == nil -} - -func word64_Get(p word64) uint64 { - return **p -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x -} - -func word64Val_Get(p word64Val) uint64 { - return *p -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 - -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } - -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index 69ddda8d4..000000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,864 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool - - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s = "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint - case "fixed32": - p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 - case "fixed64": - p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 - case "zigzag32": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 - case "zigzag64": - p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break - } - } - } -} - -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - - switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: - p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } - } - - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } - } - - case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if f != nil { - p.field = toField(f) - } - if tag == "" { - return - } - p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } - -// MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 965876bf0..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,854 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - gtNewline = []byte(">\n") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else if err := tm.writeStruct(w, v); err != nil { - return err - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 0b8c59f74..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - base := 8 - ss := s[:2] - s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) - if err != nil { - return "", "", err - } - return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b - } - s = s[n:] - return string(bs), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // Either "true", "false", 1 or 0. - switch tok.value { - case "true", "1": - fv.SetBool(true) - return nil - case "false", "0": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err - } - pb.Reset() - v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil -} diff --git a/vendor/github.com/google/cadvisor/LICENSE b/vendor/github.com/google/cadvisor/LICENSE deleted file mode 100644 index 97cec18e8..000000000 --- a/vendor/github.com/google/cadvisor/LICENSE +++ /dev/null @@ -1,190 +0,0 @@ - Copyright 2014 The cAdvisor Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/google/cadvisor/info/v1/container.go b/vendor/github.com/google/cadvisor/info/v1/container.go deleted file mode 100644 index 6e7e65897..000000000 --- a/vendor/github.com/google/cadvisor/info/v1/container.go +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "reflect" - "time" -) - -type CpuSpec struct { - Limit uint64 `json:"limit"` - MaxLimit uint64 `json:"max_limit"` - Mask string `json:"mask,omitempty"` - Quota uint64 `json:"quota,omitempty"` - Period uint64 `json:"period,omitempty"` -} - -type MemorySpec struct { - // The amount of memory requested. Default is unlimited (-1). - // Units: bytes. - Limit uint64 `json:"limit,omitempty"` - - // The amount of guaranteed memory. Default is 0. - // Units: bytes. - Reservation uint64 `json:"reservation,omitempty"` - - // The amount of swap space requested. Default is unlimited (-1). - // Units: bytes. - SwapLimit uint64 `json:"swap_limit,omitempty"` -} - -type ContainerSpec struct { - // Time at which the container was created. - CreationTime time.Time `json:"creation_time,omitempty"` - - // Metadata labels associated with this container. - Labels map[string]string `json:"labels,omitempty"` - // Metadata envs associated with this container. Only whitelisted envs are added. - Envs map[string]string `json:"envs,omitempty"` - - HasCpu bool `json:"has_cpu"` - Cpu CpuSpec `json:"cpu,omitempty"` - - HasMemory bool `json:"has_memory"` - Memory MemorySpec `json:"memory,omitempty"` - - HasNetwork bool `json:"has_network"` - - HasFilesystem bool `json:"has_filesystem"` - - // HasDiskIo when true, indicates that DiskIo stats will be available. - HasDiskIo bool `json:"has_diskio"` - - HasCustomMetrics bool `json:"has_custom_metrics"` - CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"` - - // Image name used for this container. - Image string `json:"image,omitempty"` -} - -// Container reference contains enough information to uniquely identify a container -type ContainerReference struct { - // The container id - Id string `json:"id,omitempty"` - - // The absolute name of the container. This is unique on the machine. - Name string `json:"name"` - - // Other names by which the container is known within a certain namespace. - // This is unique within that namespace. - Aliases []string `json:"aliases,omitempty"` - - // Namespace under which the aliases of a container are unique. - // An example of a namespace is "docker" for Docker containers. - Namespace string `json:"namespace,omitempty"` - - Labels map[string]string `json:"labels,omitempty"` -} - -// Sorts by container name. -type ContainerReferenceSlice []ContainerReference - -func (self ContainerReferenceSlice) Len() int { return len(self) } -func (self ContainerReferenceSlice) Swap(i, j int) { self[i], self[j] = self[j], self[i] } -func (self ContainerReferenceSlice) Less(i, j int) bool { return self[i].Name < self[j].Name } - -// ContainerInfoRequest is used when users check a container info from the REST API. -// It specifies how much data users want to get about a container -type ContainerInfoRequest struct { - // Max number of stats to return. Specify -1 for all stats currently available. - // Default: 60 - NumStats int `json:"num_stats,omitempty"` - - // Start time for which to query information. - // If ommitted, the beginning of time is assumed. - Start time.Time `json:"start,omitempty"` - - // End time for which to query information. - // If ommitted, current time is assumed. - End time.Time `json:"end,omitempty"` -} - -// Returns a ContainerInfoRequest with all default values specified. -func DefaultContainerInfoRequest() ContainerInfoRequest { - return ContainerInfoRequest{ - NumStats: 60, - } -} - -func (self *ContainerInfoRequest) Equals(other ContainerInfoRequest) bool { - return self.NumStats == other.NumStats && - self.Start.Equal(other.Start) && - self.End.Equal(other.End) -} - -type ContainerInfo struct { - ContainerReference - - // The direct subcontainers of the current container. - Subcontainers []ContainerReference `json:"subcontainers,omitempty"` - - // The isolation used in the container. - Spec ContainerSpec `json:"spec,omitempty"` - - // Historical statistics gathered from the container. - Stats []*ContainerStats `json:"stats,omitempty"` -} - -// TODO(vmarmol): Refactor to not need this equality comparison. -// ContainerInfo may be (un)marshaled by json or other en/decoder. In that -// case, the Timestamp field in each stats/sample may not be precisely -// en/decoded. This will lead to small but acceptable differences between a -// ContainerInfo and its encode-then-decode version. Eq() is used to compare -// two ContainerInfo accepting small difference (<10ms) of Time fields. -func (self *ContainerInfo) Eq(b *ContainerInfo) bool { - - // If both self and b are nil, then Eq() returns true - if self == nil { - return b == nil - } - if b == nil { - return self == nil - } - - // For fields other than time.Time, we will compare them precisely. - // This would require that any slice should have same order. - if !reflect.DeepEqual(self.ContainerReference, b.ContainerReference) { - return false - } - if !reflect.DeepEqual(self.Subcontainers, b.Subcontainers) { - return false - } - if !self.Spec.Eq(&b.Spec) { - return false - } - - for i, expectedStats := range b.Stats { - selfStats := self.Stats[i] - if !expectedStats.Eq(selfStats) { - return false - } - } - - return true -} - -func (self *ContainerSpec) Eq(b *ContainerSpec) bool { - // Creation within 1s of each other. - diff := self.CreationTime.Sub(b.CreationTime) - if (diff > time.Second) || (diff < -time.Second) { - return false - } - - if self.HasCpu != b.HasCpu { - return false - } - if !reflect.DeepEqual(self.Cpu, b.Cpu) { - return false - } - if self.HasMemory != b.HasMemory { - return false - } - if !reflect.DeepEqual(self.Memory, b.Memory) { - return false - } - if self.HasNetwork != b.HasNetwork { - return false - } - if self.HasFilesystem != b.HasFilesystem { - return false - } - if self.HasDiskIo != b.HasDiskIo { - return false - } - if self.HasCustomMetrics != b.HasCustomMetrics { - return false - } - return true -} - -func (self *ContainerInfo) StatsAfter(ref time.Time) []*ContainerStats { - n := len(self.Stats) + 1 - for i, s := range self.Stats { - if s.Timestamp.After(ref) { - n = i - break - } - } - if n > len(self.Stats) { - return nil - } - return self.Stats[n:] -} - -func (self *ContainerInfo) StatsStartTime() time.Time { - var ret time.Time - for _, s := range self.Stats { - if s.Timestamp.Before(ret) || ret.IsZero() { - ret = s.Timestamp - } - } - return ret -} - -func (self *ContainerInfo) StatsEndTime() time.Time { - var ret time.Time - for i := len(self.Stats) - 1; i >= 0; i-- { - s := self.Stats[i] - if s.Timestamp.After(ret) { - ret = s.Timestamp - } - } - return ret -} - -// This mirrors kernel internal structure. -type LoadStats struct { - // Number of sleeping tasks. - NrSleeping uint64 `json:"nr_sleeping"` - - // Number of running tasks. - NrRunning uint64 `json:"nr_running"` - - // Number of tasks in stopped state - NrStopped uint64 `json:"nr_stopped"` - - // Number of tasks in uninterruptible state - NrUninterruptible uint64 `json:"nr_uninterruptible"` - - // Number of tasks waiting on IO - NrIoWait uint64 `json:"nr_io_wait"` -} - -// CPU usage time statistics. -type CpuUsage struct { - // Total CPU usage. - // Units: nanoseconds - Total uint64 `json:"total"` - - // Per CPU/core usage of the container. - // Unit: nanoseconds. - PerCpu []uint64 `json:"per_cpu_usage,omitempty"` - - // Time spent in user space. - // Unit: nanoseconds - User uint64 `json:"user"` - - // Time spent in kernel space. - // Unit: nanoseconds - System uint64 `json:"system"` -} - -// All CPU usage metrics are cumulative from the creation of the container -type CpuStats struct { - Usage CpuUsage `json:"usage"` - // Smoothed average of number of runnable threads x 1000. - // We multiply by thousand to avoid using floats, but preserving precision. - // Load is smoothed over the last 10 seconds. Instantaneous value can be read - // from LoadStats.NrRunning. - LoadAverage int32 `json:"load_average"` -} - -type PerDiskStats struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Stats map[string]uint64 `json:"stats"` -} - -type DiskIoStats struct { - IoServiceBytes []PerDiskStats `json:"io_service_bytes,omitempty"` - IoServiced []PerDiskStats `json:"io_serviced,omitempty"` - IoQueued []PerDiskStats `json:"io_queued,omitempty"` - Sectors []PerDiskStats `json:"sectors,omitempty"` - IoServiceTime []PerDiskStats `json:"io_service_time,omitempty"` - IoWaitTime []PerDiskStats `json:"io_wait_time,omitempty"` - IoMerged []PerDiskStats `json:"io_merged,omitempty"` - IoTime []PerDiskStats `json:"io_time,omitempty"` -} - -type MemoryStats struct { - // Current memory usage, this includes all memory regardless of when it was - // accessed. - // Units: Bytes. - Usage uint64 `json:"usage"` - - // Number of bytes of page cache memory. - // Units: Bytes. - Cache uint64 `json:"cache"` - - // The amount of anonymous and swap cache memory (includes transparent - // hugepages). - // Units: Bytes. - RSS uint64 `json:"rss"` - - // The amount of working set memory, this includes recently accessed memory, - // dirty memory, and kernel memory. Working set is <= "usage". - // Units: Bytes. - WorkingSet uint64 `json:"working_set"` - - Failcnt uint64 `json:"failcnt"` - - ContainerData MemoryStatsMemoryData `json:"container_data,omitempty"` - HierarchicalData MemoryStatsMemoryData `json:"hierarchical_data,omitempty"` -} - -type MemoryStatsMemoryData struct { - Pgfault uint64 `json:"pgfault"` - Pgmajfault uint64 `json:"pgmajfault"` -} - -type InterfaceStats struct { - // The name of the interface. - Name string `json:"name"` - // Cumulative count of bytes received. - RxBytes uint64 `json:"rx_bytes"` - // Cumulative count of packets received. - RxPackets uint64 `json:"rx_packets"` - // Cumulative count of receive errors encountered. - RxErrors uint64 `json:"rx_errors"` - // Cumulative count of packets dropped while receiving. - RxDropped uint64 `json:"rx_dropped"` - // Cumulative count of bytes transmitted. - TxBytes uint64 `json:"tx_bytes"` - // Cumulative count of packets transmitted. - TxPackets uint64 `json:"tx_packets"` - // Cumulative count of transmit errors encountered. - TxErrors uint64 `json:"tx_errors"` - // Cumulative count of packets dropped while transmitting. - TxDropped uint64 `json:"tx_dropped"` -} - -type NetworkStats struct { - InterfaceStats `json:",inline"` - Interfaces []InterfaceStats `json:"interfaces,omitempty"` - // TCP connection stats (Established, Listen...) - Tcp TcpStat `json:"tcp"` - // TCP6 connection stats (Established, Listen...) - Tcp6 TcpStat `json:"tcp6"` -} - -type TcpStat struct { - //Count of TCP connections in state "Established" - Established uint64 - //Count of TCP connections in state "Syn_Sent" - SynSent uint64 - //Count of TCP connections in state "Syn_Recv" - SynRecv uint64 - //Count of TCP connections in state "Fin_Wait1" - FinWait1 uint64 - //Count of TCP connections in state "Fin_Wait2" - FinWait2 uint64 - //Count of TCP connections in state "Time_Wait - TimeWait uint64 - //Count of TCP connections in state "Close" - Close uint64 - //Count of TCP connections in state "Close_Wait" - CloseWait uint64 - //Count of TCP connections in state "Listen_Ack" - LastAck uint64 - //Count of TCP connections in state "Listen" - Listen uint64 - //Count of TCP connections in state "Closing" - Closing uint64 -} - -type FsStats struct { - // The block device name associated with the filesystem. - Device string `json:"device,omitempty"` - - // Type of the filesytem. - Type string `json:"type"` - - // Number of bytes that can be consumed by the container on this filesystem. - Limit uint64 `json:"capacity"` - - // Number of bytes that is consumed by the container on this filesystem. - Usage uint64 `json:"usage"` - - // Base Usage that is consumed by the container's writable layer. - // This field is only applicable for docker container's as of now. - BaseUsage uint64 `json:"base_usage"` - - // Number of bytes available for non-root user. - Available uint64 `json:"available"` - - // Number of available Inodes - InodesFree uint64 `json:"inodes_free"` - - // Number of reads completed - // This is the total number of reads completed successfully. - ReadsCompleted uint64 `json:"reads_completed"` - - // Number of reads merged - // Reads and writes which are adjacent to each other may be merged for - // efficiency. Thus two 4K reads may become one 8K read before it is - // ultimately handed to the disk, and so it will be counted (and queued) - // as only one I/O. This field lets you know how often this was done. - ReadsMerged uint64 `json:"reads_merged"` - - // Number of sectors read - // This is the total number of sectors read successfully. - SectorsRead uint64 `json:"sectors_read"` - - // Number of milliseconds spent reading - // This is the total number of milliseconds spent by all reads (as - // measured from __make_request() to end_that_request_last()). - ReadTime uint64 `json:"read_time"` - - // Number of writes completed - // This is the total number of writes completed successfully. - WritesCompleted uint64 `json:"writes_completed"` - - // Number of writes merged - // See the description of reads merged. - WritesMerged uint64 `json:"writes_merged"` - - // Number of sectors written - // This is the total number of sectors written successfully. - SectorsWritten uint64 `json:"sectors_written"` - - // Number of milliseconds spent writing - // This is the total number of milliseconds spent by all writes (as - // measured from __make_request() to end_that_request_last()). - WriteTime uint64 `json:"write_time"` - - // Number of I/Os currently in progress - // The only field that should go to zero. Incremented as requests are - // given to appropriate struct request_queue and decremented as they finish. - IoInProgress uint64 `json:"io_in_progress"` - - // Number of milliseconds spent doing I/Os - // This field increases so long as field 9 is nonzero. - IoTime uint64 `json:"io_time"` - - // weighted number of milliseconds spent doing I/Os - // This field is incremented at each I/O start, I/O completion, I/O - // merge, or read of these stats by the number of I/Os in progress - // (field 9) times the number of milliseconds spent doing I/O since the - // last update of this field. This can provide an easy measure of both - // I/O completion time and the backlog that may be accumulating. - WeightedIoTime uint64 `json:"weighted_io_time"` -} - -type ContainerStats struct { - // The time of this stat point. - Timestamp time.Time `json:"timestamp"` - Cpu CpuStats `json:"cpu,omitempty"` - DiskIo DiskIoStats `json:"diskio,omitempty"` - Memory MemoryStats `json:"memory,omitempty"` - Network NetworkStats `json:"network,omitempty"` - - // Filesystem statistics - Filesystem []FsStats `json:"filesystem,omitempty"` - - // Task load stats - TaskStats LoadStats `json:"task_stats,omitempty"` - - //Custom metrics from all collectors - CustomMetrics map[string][]MetricVal `json:"custom_metrics,omitempty"` -} - -func timeEq(t1, t2 time.Time, tolerance time.Duration) bool { - // t1 should not be later than t2 - if t1.After(t2) { - t1, t2 = t2, t1 - } - diff := t2.Sub(t1) - if diff <= tolerance { - return true - } - return false -} - -const ( - // 10ms, i.e. 0.01s - timePrecision time.Duration = 10 * time.Millisecond -) - -// This function is useful because we do not require precise time -// representation. -func (a *ContainerStats) Eq(b *ContainerStats) bool { - if !timeEq(a.Timestamp, b.Timestamp, timePrecision) { - return false - } - return a.StatsEq(b) -} - -// Checks equality of the stats values. -func (a *ContainerStats) StatsEq(b *ContainerStats) bool { - // TODO(vmarmol): Consider using this through reflection. - if !reflect.DeepEqual(a.Cpu, b.Cpu) { - return false - } - if !reflect.DeepEqual(a.Memory, b.Memory) { - return false - } - if !reflect.DeepEqual(a.DiskIo, b.DiskIo) { - return false - } - if !reflect.DeepEqual(a.Network, b.Network) { - return false - } - if !reflect.DeepEqual(a.Filesystem, b.Filesystem) { - return false - } - return true -} - -// Event contains information general to events such as the time at which they -// occurred, their specific type, and the actual event. Event types are -// differentiated by the EventType field of Event. -type Event struct { - // the absolute container name for which the event occurred - ContainerName string `json:"container_name"` - - // the time at which the event occurred - Timestamp time.Time `json:"timestamp"` - - // the type of event. EventType is an enumerated type - EventType EventType `json:"event_type"` - - // the original event object and all of its extraneous data, ex. an - // OomInstance - EventData EventData `json:"event_data,omitempty"` -} - -// EventType is an enumerated type which lists the categories under which -// events may fall. The Event field EventType is populated by this enum. -type EventType string - -const ( - EventOom EventType = "oom" - EventOomKill = "oomKill" - EventContainerCreation = "containerCreation" - EventContainerDeletion = "containerDeletion" -) - -// Extra information about an event. Only one type will be set. -type EventData struct { - // Information about an OOM kill event. - OomKill *OomKillEventData `json:"oom,omitempty"` -} - -// Information related to an OOM kill instance -type OomKillEventData struct { - // process id of the killed process - Pid int `json:"pid"` - - // The name of the killed process - ProcessName string `json:"process_name"` -} diff --git a/vendor/github.com/google/cadvisor/info/v1/docker.go b/vendor/github.com/google/cadvisor/info/v1/docker.go deleted file mode 100644 index 2703c5342..000000000 --- a/vendor/github.com/google/cadvisor/info/v1/docker.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Types used for docker containers. -package v1 - -type DockerStatus struct { - Version string `json:"version"` - KernelVersion string `json:"kernel_version"` - OS string `json:"os"` - Hostname string `json:"hostname"` - RootDir string `json:"root_dir"` - Driver string `json:"driver"` - DriverStatus map[string]string `json:"driver_status"` - ExecDriver string `json:"exec_driver"` - NumImages int `json:"num_images"` - NumContainers int `json:"num_containers"` -} - -type DockerImage struct { - ID string `json:"id"` - RepoTags []string `json:"repo_tags"` // repository name and tags. - Created int64 `json:"created"` // unix time since creation. - VirtualSize int64 `json:"virtual_size"` - Size int64 `json:"size"` -} diff --git a/vendor/github.com/google/cadvisor/info/v1/machine.go b/vendor/github.com/google/cadvisor/info/v1/machine.go deleted file mode 100644 index 74a5df49c..000000000 --- a/vendor/github.com/google/cadvisor/info/v1/machine.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -type FsInfo struct { - // Block device associated with the filesystem. - Device string `json:"device"` - - // Total number of bytes available on the filesystem. - Capacity uint64 `json:"capacity"` - - // Type of device. - Type string `json:"type"` - - // Total number of inodes available on the filesystem. - Inodes uint64 `json:"inodes"` -} - -type Node struct { - Id int `json:"node_id"` - // Per-node memory - Memory uint64 `json:"memory"` - Cores []Core `json:"cores"` - Caches []Cache `json:"caches"` -} - -type Core struct { - Id int `json:"core_id"` - Threads []int `json:"thread_ids"` - Caches []Cache `json:"caches"` -} - -type Cache struct { - // Size of memory cache in bytes. - Size uint64 `json:"size"` - // Type of memory cache: data, instruction, or unified. - Type string `json:"type"` - // Level (distance from cpus) in a multi-level cache hierarchy. - Level int `json:"level"` -} - -func (self *Node) FindCore(id int) (bool, int) { - for i, n := range self.Cores { - if n.Id == id { - return true, i - } - } - return false, -1 -} - -func (self *Node) AddThread(thread int, core int) { - var coreIdx int - if core == -1 { - // Assume one hyperthread per core when topology data is missing. - core = thread - } - ok, coreIdx := self.FindCore(core) - - if !ok { - // New core - core := Core{Id: core} - self.Cores = append(self.Cores, core) - coreIdx = len(self.Cores) - 1 - } - self.Cores[coreIdx].Threads = append(self.Cores[coreIdx].Threads, thread) -} - -func (self *Node) AddNodeCache(c Cache) { - self.Caches = append(self.Caches, c) -} - -func (self *Node) AddPerCoreCache(c Cache) { - for idx := range self.Cores { - self.Cores[idx].Caches = append(self.Cores[idx].Caches, c) - } -} - -type DiskInfo struct { - // device name - Name string `json:"name"` - - // Major number - Major uint64 `json:"major"` - - // Minor number - Minor uint64 `json:"minor"` - - // Size in bytes - Size uint64 `json:"size"` - - // I/O Scheduler - one of "none", "noop", "cfq", "deadline" - Scheduler string `json:"scheduler"` -} - -type NetInfo struct { - // Device name - Name string `json:"name"` - - // Mac Address - MacAddress string `json:"mac_address"` - - // Speed in MBits/s - Speed int64 `json:"speed"` - - // Maximum Transmission Unit - Mtu int64 `json:"mtu"` -} - -type CloudProvider string - -const ( - GCE CloudProvider = "GCE" - AWS = "AWS" - Azure = "Azure" - Baremetal = "Baremetal" - UnknownProvider = "Unknown" -) - -type InstanceType string - -const ( - NoInstance InstanceType = "None" - UnknownInstance = "Unknown" -) - -type InstanceID string - -const ( - UnNamedInstance InstanceID = "None" -) - -type MachineInfo struct { - // The number of cores in this machine. - NumCores int `json:"num_cores"` - - // Maximum clock speed for the cores, in KHz. - CpuFrequency uint64 `json:"cpu_frequency_khz"` - - // The amount of memory (in bytes) in this machine - MemoryCapacity uint64 `json:"memory_capacity"` - - // The machine id - MachineID string `json:"machine_id"` - - // The system uuid - SystemUUID string `json:"system_uuid"` - - // The boot id - BootID string `json:"boot_id"` - - // Filesystems on this machine. - Filesystems []FsInfo `json:"filesystems"` - - // Disk map - DiskMap map[string]DiskInfo `json:"disk_map"` - - // Network devices - NetworkDevices []NetInfo `json:"network_devices"` - - // Machine Topology - // Describes cpu/memory layout and hierarchy. - Topology []Node `json:"topology"` - - // Cloud provider the machine belongs to. - CloudProvider CloudProvider `json:"cloud_provider"` - - // Type of cloud instance (e.g. GCE standard) the machine is. - InstanceType InstanceType `json:"instance_type"` - - // ID of cloud instance (e.g. instance-1) given to it by the cloud provider. - InstanceID InstanceID `json:"instance_id"` -} - -type VersionInfo struct { - // Kernel version. - KernelVersion string `json:"kernel_version"` - - // OS image being used for cadvisor container, or host image if running on host directly. - ContainerOsVersion string `json:"container_os_version"` - - // Docker version. - DockerVersion string `json:"docker_version"` - - // cAdvisor version. - CadvisorVersion string `json:"cadvisor_version"` - // cAdvisor git revision. - CadvisorRevision string `json:"cadvisor_revision"` -} - -type MachineInfoFactory interface { - GetMachineInfo() (*MachineInfo, error) - GetVersionInfo() (*VersionInfo, error) -} diff --git a/vendor/github.com/google/cadvisor/info/v1/metric.go b/vendor/github.com/google/cadvisor/info/v1/metric.go deleted file mode 100644 index 90fd9e493..000000000 --- a/vendor/github.com/google/cadvisor/info/v1/metric.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "time" -) - -// Type of metric being exported. -type MetricType string - -const ( - // Instantaneous value. May increase or decrease. - MetricGauge MetricType = "gauge" - - // A counter-like value that is only expected to increase. - MetricCumulative = "cumulative" - - // Rate over a time period. - MetricDelta = "delta" -) - -// DataType for metric being exported. -type DataType string - -const ( - IntType DataType = "int" - FloatType = "float" -) - -// Spec for custom metric. -type MetricSpec struct { - // The name of the metric. - Name string `json:"name"` - - // Type of the metric. - Type MetricType `json:"type"` - - // Data Type for the stats. - Format DataType `json:"format"` - - // Display Units for the stats. - Units string `json:"units"` -} - -// An exported metric. -type MetricValBasic struct { - // Time at which the metric was queried - Timestamp time.Time `json:"timestamp"` - - // The value of the metric at this point. - IntValue int64 `json:"int_value,omitempty"` - FloatValue float64 `json:"float_value,omitempty"` -} - -// An exported metric. -type MetricVal struct { - // Label associated with a metric - Label string `json:"label,omitempty"` - - // Time at which the metric was queried - Timestamp time.Time `json:"timestamp"` - - // The value of the metric at this point. - IntValue int64 `json:"int_value,omitempty"` - FloatValue float64 `json:"float_value,omitempty"` -} diff --git a/vendor/github.com/howeyc/gopass/.travis.yml b/vendor/github.com/howeyc/gopass/.travis.yml new file mode 100644 index 000000000..cc5d509fd --- /dev/null +++ b/vendor/github.com/howeyc/gopass/.travis.yml @@ -0,0 +1,11 @@ +language: go + +os: + - linux + - osx + +go: + - 1.3 + - 1.4 + - 1.5 + - tip diff --git a/vendor/github.com/howeyc/gopass/LICENSE.txt b/vendor/github.com/howeyc/gopass/LICENSE.txt new file mode 100644 index 000000000..14f74708a --- /dev/null +++ b/vendor/github.com/howeyc/gopass/LICENSE.txt @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012 Chris Howey + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE b/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE new file mode 100644 index 000000000..da23621dc --- /dev/null +++ b/vendor/github.com/howeyc/gopass/OPENSOLARIS.LICENSE @@ -0,0 +1,384 @@ +Unless otherwise noted, all files in this distribution are released +under the Common Development and Distribution License (CDDL). +Exceptions are noted within the associated source files. + +-------------------------------------------------------------------- + + +COMMON DEVELOPMENT AND DISTRIBUTION LICENSE Version 1.0 + +1. Definitions. + + 1.1. "Contributor" means each individual or entity that creates + or contributes to the creation of Modifications. + + 1.2. "Contributor Version" means the combination of the Original + Software, prior Modifications used by a Contributor (if any), + and the Modifications made by that particular Contributor. + + 1.3. "Covered Software" means (a) the Original Software, or (b) + Modifications, or (c) the combination of files containing + Original Software with files containing Modifications, in + each case including portions thereof. + + 1.4. "Executable" means the Covered Software in any form other + than Source Code. + + 1.5. "Initial Developer" means the individual or entity that first + makes Original Software available under this License. + + 1.6. "Larger Work" means a work which combines Covered Software or + portions thereof with code not governed by the terms of this + License. + + 1.7. "License" means this document. + + 1.8. "Licensable" means having the right to grant, to the maximum + extent possible, whether at the time of the initial grant or + subsequently acquired, any and all of the rights conveyed + herein. + + 1.9. "Modifications" means the Source Code and Executable form of + any of the following: + + A. Any file that results from an addition to, deletion from or + modification of the contents of a file containing Original + Software or previous Modifications; + + B. Any new file that contains any part of the Original + Software or previous Modifications; or + + C. Any new file that is contributed or otherwise made + available under the terms of this License. + + 1.10. "Original Software" means the Source Code and Executable + form of computer software code that is originally released + under this License. + + 1.11. "Patent Claims" means any patent claim(s), now owned or + hereafter acquired, including without limitation, method, + process, and apparatus claims, in any patent Licensable by + grantor. + + 1.12. "Source Code" means (a) the common form of computer software + code in which modifications are made and (b) associated + documentation included in or with such code. + + 1.13. "You" (or "Your") means an individual or a legal entity + exercising rights under, and complying with all of the terms + of, this License. For legal entities, "You" includes any + entity which controls, is controlled by, or is under common + control with You. For purposes of this definition, + "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by + contract or otherwise, or (b) ownership of more than fifty + percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants. + + 2.1. The Initial Developer Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, the Initial + Developer hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Initial Developer, to use, + reproduce, modify, display, perform, sublicense and + distribute the Original Software (or portions thereof), + with or without Modifications, and/or as part of a Larger + Work; and + + (b) under Patent Claims infringed by the making, using or + selling of Original Software, to make, have made, use, + practice, sell, and offer for sale, and/or otherwise + dispose of the Original Software (or portions thereof). + + (c) The licenses granted in Sections 2.1(a) and (b) are + effective on the date Initial Developer first distributes + or otherwise makes the Original Software available to a + third party under the terms of this License. + + (d) Notwithstanding Section 2.1(b) above, no patent license is + granted: (1) for code that You delete from the Original + Software, or (2) for infringements caused by: (i) the + modification of the Original Software, or (ii) the + combination of the Original Software with other software + or devices. + + 2.2. Contributor Grant. + + Conditioned upon Your compliance with Section 3.1 below and + subject to third party intellectual property claims, each + Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + (a) under intellectual property rights (other than patent or + trademark) Licensable by Contributor to use, reproduce, + modify, display, perform, sublicense and distribute the + Modifications created by such Contributor (or portions + thereof), either on an unmodified basis, with other + Modifications, as Covered Software and/or as part of a + Larger Work; and + + (b) under Patent Claims infringed by the making, using, or + selling of Modifications made by that Contributor either + alone and/or in combination with its Contributor Version + (or portions of such combination), to make, use, sell, + offer for sale, have made, and/or otherwise dispose of: + (1) Modifications made by that Contributor (or portions + thereof); and (2) the combination of Modifications made by + that Contributor with its Contributor Version (or portions + of such combination). + + (c) The licenses granted in Sections 2.2(a) and 2.2(b) are + effective on the date Contributor first distributes or + otherwise makes the Modifications available to a third + party. + + (d) Notwithstanding Section 2.2(b) above, no patent license is + granted: (1) for any code that Contributor has deleted + from the Contributor Version; (2) for infringements caused + by: (i) third party modifications of Contributor Version, + or (ii) the combination of Modifications made by that + Contributor with other software (except as part of the + Contributor Version) or other devices; or (3) under Patent + Claims infringed by Covered Software in the absence of + Modifications made by that Contributor. + +3. Distribution Obligations. + + 3.1. Availability of Source Code. + + Any Covered Software that You distribute or otherwise make + available in Executable form must also be made available in Source + Code form and that Source Code form must be distributed only under + the terms of this License. You must include a copy of this + License with every copy of the Source Code form of the Covered + Software You distribute or otherwise make available. You must + inform recipients of any such Covered Software in Executable form + as to how they can obtain such Covered Software in Source Code + form in a reasonable manner on or through a medium customarily + used for software exchange. + + 3.2. Modifications. + + The Modifications that You create or to which You contribute are + governed by the terms of this License. You represent that You + believe Your Modifications are Your original creation(s) and/or + You have sufficient rights to grant the rights conveyed by this + License. + + 3.3. Required Notices. + + You must include a notice in each of Your Modifications that + identifies You as the Contributor of the Modification. You may + not remove or alter any copyright, patent or trademark notices + contained within the Covered Software, or any notices of licensing + or any descriptive text giving attribution to any Contributor or + the Initial Developer. + + 3.4. Application of Additional Terms. + + You may not offer or impose any terms on any Covered Software in + Source Code form that alters or restricts the applicable version + of this License or the recipients' rights hereunder. You may + choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of + Covered Software. However, you may do so only on Your own behalf, + and not on behalf of the Initial Developer or any Contributor. + You must make it absolutely clear that any such warranty, support, + indemnity or liability obligation is offered by You alone, and You + hereby agree to indemnify the Initial Developer and every + Contributor for any liability incurred by the Initial Developer or + such Contributor as a result of warranty, support, indemnity or + liability terms You offer. + + 3.5. Distribution of Executable Versions. + + You may distribute the Executable form of the Covered Software + under the terms of this License or under the terms of a license of + Your choice, which may contain terms different from this License, + provided that You are in compliance with the terms of this License + and that the license for the Executable form does not attempt to + limit or alter the recipient's rights in the Source Code form from + the rights set forth in this License. If You distribute the + Covered Software in Executable form under a different license, You + must make it absolutely clear that any terms which differ from + this License are offered by You alone, not by the Initial + Developer or Contributor. You hereby agree to indemnify the + Initial Developer and every Contributor for any liability incurred + by the Initial Developer or such Contributor as a result of any + such terms You offer. + + 3.6. Larger Works. + + You may create a Larger Work by combining Covered Software with + other code not governed by the terms of this License and + distribute the Larger Work as a single product. In such a case, + You must make sure the requirements of this License are fulfilled + for the Covered Software. + +4. Versions of the License. + + 4.1. New Versions. + + Sun Microsystems, Inc. is the initial license steward and may + publish revised and/or new versions of this License from time to + time. Each version will be given a distinguishing version number. + Except as provided in Section 4.3, no one other than the license + steward has the right to modify this License. + + 4.2. Effect of New Versions. + + You may always continue to use, distribute or otherwise make the + Covered Software available under the terms of the version of the + License under which You originally received the Covered Software. + If the Initial Developer includes a notice in the Original + Software prohibiting it from being distributed or otherwise made + available under any subsequent version of the License, You must + distribute and make the Covered Software available under the terms + of the version of the License under which You originally received + the Covered Software. Otherwise, You may also choose to use, + distribute or otherwise make the Covered Software available under + the terms of any subsequent version of the License published by + the license steward. + + 4.3. Modified Versions. + + When You are an Initial Developer and You want to create a new + license for Your Original Software, You may create and use a + modified version of this License if You: (a) rename the license + and remove any references to the name of the license steward + (except to note that the license differs from this License); and + (b) otherwise make it clear that the license contains terms which + differ from this License. + +5. DISCLAIMER OF WARRANTY. + + COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" + BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED + SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR + PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND + PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY + COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE + INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY + NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF + WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF + ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS + DISCLAIMER. + +6. TERMINATION. + + 6.1. This License and the rights granted hereunder will terminate + automatically if You fail to comply with terms herein and fail to + cure such breach within 30 days of becoming aware of the breach. + Provisions which, by their nature, must remain in effect beyond + the termination of this License shall survive. + + 6.2. If You assert a patent infringement claim (excluding + declaratory judgment actions) against Initial Developer or a + Contributor (the Initial Developer or Contributor against whom You + assert such claim is referred to as "Participant") alleging that + the Participant Software (meaning the Contributor Version where + the Participant is a Contributor or the Original Software where + the Participant is the Initial Developer) directly or indirectly + infringes any patent, then any and all rights granted directly or + indirectly to You by such Participant, the Initial Developer (if + the Initial Developer is not the Participant) and all Contributors + under Sections 2.1 and/or 2.2 of this License shall, upon 60 days + notice from Participant terminate prospectively and automatically + at the expiration of such 60 day notice period, unless if within + such 60 day period You withdraw Your claim with respect to the + Participant Software against such Participant either unilaterally + or pursuant to a written agreement with Participant. + + 6.3. In the event of termination under Sections 6.1 or 6.2 above, + all end user licenses that have been validly granted by You or any + distributor hereunder prior to termination (excluding licenses + granted to You by any distributor) shall survive termination. + +7. LIMITATION OF LIABILITY. + + UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT + (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE + INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF + COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE + LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR + CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT + LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK + STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER + COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN + INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF + LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL + INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT + APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO + NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR + CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT + APPLY TO YOU. + +8. U.S. GOVERNMENT END USERS. + + The Covered Software is a "commercial item," as that term is + defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial + computer software" (as that term is defined at 48 + C.F.R. 252.227-7014(a)(1)) and "commercial computer software + documentation" as such terms are used in 48 C.F.R. 12.212 + (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 + C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all + U.S. Government End Users acquire Covered Software with only those + rights set forth herein. This U.S. Government Rights clause is in + lieu of, and supersedes, any other FAR, DFAR, or other clause or + provision that addresses Government rights in computer software + under this License. + +9. MISCELLANEOUS. + + This License represents the complete agreement concerning subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. This License shall be governed + by the law of the jurisdiction specified in a notice contained + within the Original Software (except to the extent applicable law, + if any, provides otherwise), excluding such jurisdiction's + conflict-of-law provisions. Any litigation relating to this + License shall be subject to the jurisdiction of the courts located + in the jurisdiction and venue specified in a notice contained + within the Original Software, with the losing party responsible + for costs, including, without limitation, court costs and + reasonable attorneys' fees and expenses. The application of the + United Nations Convention on Contracts for the International Sale + of Goods is expressly excluded. Any law or regulation which + provides that the language of a contract shall be construed + against the drafter shall not apply to this License. You agree + that You alone are responsible for compliance with the United + States export administration regulations (and the export control + laws and regulation of any other countries) when You use, + distribute or otherwise make available any Covered Software. + +10. RESPONSIBILITY FOR CLAIMS. + + As between Initial Developer and the Contributors, each party is + responsible for claims and damages arising, directly or + indirectly, out of its utilization of rights under this License + and You agree to work with Initial Developer and Contributors to + distribute such responsibility on an equitable basis. Nothing + herein is intended or shall be deemed to constitute any admission + of liability. + +-------------------------------------------------------------------- + +NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND +DISTRIBUTION LICENSE (CDDL) + +For Covered Software in this distribution, this License shall +be governed by the laws of the State of California (excluding +conflict-of-law provisions). + +Any litigation relating to this License shall be subject to the +jurisdiction of the Federal Courts of the Northern District of +California and the state courts of the State of California, with +venue lying in Santa Clara County, California. diff --git a/vendor/github.com/howeyc/gopass/README.md b/vendor/github.com/howeyc/gopass/README.md new file mode 100644 index 000000000..2d6a4e72c --- /dev/null +++ b/vendor/github.com/howeyc/gopass/README.md @@ -0,0 +1,27 @@ +# getpasswd in Go [![GoDoc](https://godoc.org/github.com/howeyc/gopass?status.svg)](https://godoc.org/github.com/howeyc/gopass) [![Build Status](https://secure.travis-ci.org/howeyc/gopass.png?branch=master)](http://travis-ci.org/howeyc/gopass) + +Retrieve password from user terminal or piped input without echo. + +Verified on BSD, Linux, and Windows. + +Example: +```go +package main + +import "fmt" +import "github.com/howeyc/gopass" + +func main() { + fmt.Printf("Password: ") + + // Silent. For printing *'s use gopass.GetPasswdMasked() + pass, err := gopass.GetPasswd() + if err != nil { + // Handle gopass.ErrInterrupted or getch() read error + } + + // Do something with pass +} +``` + +Caution: Multi-byte characters not supported! diff --git a/vendor/github.com/howeyc/gopass/pass.go b/vendor/github.com/howeyc/gopass/pass.go new file mode 100644 index 000000000..31c853ae2 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/pass.go @@ -0,0 +1,91 @@ +package gopass + +import ( + "errors" + "fmt" + "io" + "os" +) + +var defaultGetCh = func() (byte, error) { + buf := make([]byte, 1) + if n, err := os.Stdin.Read(buf); n == 0 || err != nil { + if err != nil { + return 0, err + } + return 0, io.EOF + } + return buf[0], nil +} + +var ( + maxLength = 512 + ErrInterrupted = errors.New("interrupted") + ErrMaxLengthExceeded = fmt.Errorf("maximum byte limit (%v) exceeded", maxLength) + + // Provide variable so that tests can provide a mock implementation. + getch = defaultGetCh +) + +// getPasswd returns the input read from terminal. +// If masked is true, typing will be matched by asterisks on the screen. +// Otherwise, typing will echo nothing. +func getPasswd(masked bool) ([]byte, error) { + var err error + var pass, bs, mask []byte + if masked { + bs = []byte("\b \b") + mask = []byte("*") + } + + if isTerminal(os.Stdin.Fd()) { + if oldState, err := makeRaw(os.Stdin.Fd()); err != nil { + return pass, err + } else { + defer restore(os.Stdin.Fd(), oldState) + } + } + + // Track total bytes read, not just bytes in the password. This ensures any + // errors that might flood the console with nil or -1 bytes infinitely are + // capped. + var counter int + for counter = 0; counter <= maxLength; counter++ { + if v, e := getch(); e != nil { + err = e + break + } else if v == 127 || v == 8 { + if l := len(pass); l > 0 { + pass = pass[:l-1] + fmt.Print(string(bs)) + } + } else if v == 13 || v == 10 { + break + } else if v == 3 { + err = ErrInterrupted + break + } else if v != 0 { + pass = append(pass, v) + fmt.Print(string(mask)) + } + } + + if counter > maxLength { + err = ErrMaxLengthExceeded + } + + fmt.Println() + return pass, err +} + +// GetPasswd returns the password read from the terminal without echoing input. +// The returned byte array does not include end-of-line characters. +func GetPasswd() ([]byte, error) { + return getPasswd(false) +} + +// GetPasswdMasked returns the password read from the terminal, echoing asterisks. +// The returned byte array does not include end-of-line characters. +func GetPasswdMasked() ([]byte, error) { + return getPasswd(true) +} diff --git a/vendor/github.com/howeyc/gopass/terminal.go b/vendor/github.com/howeyc/gopass/terminal.go new file mode 100644 index 000000000..083564146 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/terminal.go @@ -0,0 +1,25 @@ +// +build !solaris + +package gopass + +import "golang.org/x/crypto/ssh/terminal" + +type terminalState struct { + state *terminal.State +} + +func isTerminal(fd uintptr) bool { + return terminal.IsTerminal(int(fd)) +} + +func makeRaw(fd uintptr) (*terminalState, error) { + state, err := terminal.MakeRaw(int(fd)) + + return &terminalState{ + state: state, + }, err +} + +func restore(fd uintptr, oldState *terminalState) error { + return terminal.Restore(int(fd), oldState.state) +} diff --git a/vendor/github.com/howeyc/gopass/terminal_solaris.go b/vendor/github.com/howeyc/gopass/terminal_solaris.go new file mode 100644 index 000000000..257e1b4e8 --- /dev/null +++ b/vendor/github.com/howeyc/gopass/terminal_solaris.go @@ -0,0 +1,69 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License, Version 1.0 only + * (the "License"). You may not use this file except in compliance + * with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +// Below is derived from Solaris source, so CDDL license is included. + +package gopass + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +type terminalState struct { + state *unix.Termios +} + +// isTerminal returns true if there is a terminal attached to the given +// file descriptor. +// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func isTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// makeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// Source: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c +func makeRaw(fd uintptr) (*terminalState, error) { + oldTermiosPtr, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + if err != nil { + return nil, err + } + oldTermios := *oldTermiosPtr + + newTermios := oldTermios + newTermios.Lflag &^= syscall.ECHO | syscall.ECHOE | syscall.ECHOK | syscall.ECHONL + if err := unix.IoctlSetTermios(int(fd), unix.TCSETS, &newTermios); err != nil { + return nil, err + } + + return &terminalState{ + state: oldTermiosPtr, + }, nil +} + +func restore(fd uintptr, oldState *terminalState) error { + return unix.IoctlSetTermios(int(fd), unix.TCSETS, oldState.state) +} diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore deleted file mode 100644 index 010c242bd..000000000 --- a/vendor/github.com/jonboulle/clockwork/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml deleted file mode 100644 index aefda90bf..000000000 --- a/vendor/github.com/jonboulle/clockwork/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.3 - -sudo: false diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE deleted file mode 100644 index 5c304d1a4..000000000 --- a/vendor/github.com/jonboulle/clockwork/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md deleted file mode 100644 index d43a6c799..000000000 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ /dev/null @@ -1,61 +0,0 @@ -clockwork -========= - -[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) -[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) - -a simple fake clock for golang - -# Usage - -Replace uses of the `time` package with the `clockwork.Clock` interface instead. - -For example, instead of using `time.Sleep` directly: - -``` -func my_func() { - time.Sleep(3 * time.Second) - do_something() -} -``` - -inject a clock and use its `Sleep` method instead: - -``` -func my_func(clock clockwork.Clock) { - clock.Sleep(3 * time.Second) - do_something() -} -``` - -Now you can easily test `my_func` with a `FakeClock`: - -``` -func TestMyFunc(t *testing.T) { - c := clockwork.NewFakeClock() - - // Start our sleepy function - my_func(c) - - // Ensure we wait until my_func is sleeping - c.BlockUntil(1) - - assert_state() - - // Advance the FakeClock forward in time - c.Advance(3) - - assert_state() -} -``` - -and in production builds, simply inject the real clock instead: -``` -my_func(clockwork.NewRealClock()) -``` - -See [example_test.go](example_test.go) for a full example. - -# Credits - -clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go deleted file mode 100644 index 9ec96ed29..000000000 --- a/vendor/github.com/jonboulle/clockwork/clockwork.go +++ /dev/null @@ -1,169 +0,0 @@ -package clockwork - -import ( - "sync" - "time" -) - -// Clock provides an interface that packages can use instead of directly -// using the time module, so that chronology-related behavior can be tested -type Clock interface { - After(d time.Duration) <-chan time.Time - Sleep(d time.Duration) - Now() time.Time -} - -// FakeClock provides an interface for a clock which can be -// manually advanced through time -type FakeClock interface { - Clock - // Advance advances the FakeClock to a new point in time, ensuring any existing - // sleepers are notified appropriately before returning - Advance(d time.Duration) - // BlockUntil will block until the FakeClock has the given number of - // sleepers (callers of Sleep or After) - BlockUntil(n int) -} - -// NewRealClock returns a Clock which simply delegates calls to the actual time -// package; it should be used by packages in production. -func NewRealClock() Clock { - return &realClock{} -} - -// NewFakeClock returns a FakeClock implementation which can be -// manually advanced through time for testing. The initial time of the -// FakeClock will be an arbitrary non-zero time. -func NewFakeClock() FakeClock { - // use a fixture that does not fulfill Time.IsZero() - return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) -} - -// NewFakeClockAt returns a FakeClock initialised at the given time.Time. -func NewFakeClockAt(t time.Time) FakeClock { - return &fakeClock{ - time: t, - } -} - -type realClock struct{} - -func (rc *realClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -func (rc *realClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -func (rc *realClock) Now() time.Time { - return time.Now() -} - -type fakeClock struct { - sleepers []*sleeper - blockers []*blocker - time time.Time - - l sync.RWMutex -} - -// sleeper represents a caller of After or Sleep -type sleeper struct { - until time.Time - done chan time.Time -} - -// blocker represents a caller of BlockUntil -type blocker struct { - count int - ch chan struct{} -} - -// After mimics time.After; it waits for the given duration to elapse on the -// fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { - fc.l.Lock() - defer fc.l.Unlock() - now := fc.time - done := make(chan time.Time, 1) - if d.Nanoseconds() == 0 { - // special case - trigger immediately - done <- now - } else { - // otherwise, add to the set of sleepers - s := &sleeper{ - until: now.Add(d), - done: done, - } - fc.sleepers = append(fc.sleepers, s) - // and notify any blockers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - } - return done -} - -// notifyBlockers notifies all the blockers waiting until the -// given number of sleepers are waiting on the fakeClock. It -// returns an updated slice of blockers (i.e. those still waiting) -func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { - for _, b := range blockers { - if b.count == count { - close(b.ch) - } else { - newBlockers = append(newBlockers, b) - } - } - return -} - -// Sleep blocks until the given duration has passed on the fakeClock -func (fc *fakeClock) Sleep(d time.Duration) { - <-fc.After(d) -} - -// Time returns the current time of the fakeClock -func (fc *fakeClock) Now() time.Time { - fc.l.RLock() - t := fc.time - fc.l.RUnlock() - return t -} - -// Advance advances fakeClock to a new point in time, ensuring channels from any -// previous invocations of After are notified appropriately before returning -func (fc *fakeClock) Advance(d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - end := fc.time.Add(d) - var newSleepers []*sleeper - for _, s := range fc.sleepers { - if end.Sub(s.until) >= 0 { - s.done <- end - } else { - newSleepers = append(newSleepers, s) - } - } - fc.sleepers = newSleepers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - fc.time = end -} - -// BlockUntil will block until the fakeClock has the given number of sleepers -// (callers of Sleep or After) -func (fc *fakeClock) BlockUntil(n int) { - fc.l.Lock() - // Fast path: current number of sleepers is what we're looking for - if len(fc.sleepers) == n { - fc.l.Unlock() - return - } - // Otherwise, set up a new blocker - b := &blocker{ - count: n, - ch: make(chan struct{}), - } - fc.blockers = append(fc.blockers, b) - fc.l.Unlock() - <-b.ch -} diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 000000000..fbff658f7 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 000000000..4de4a51db --- /dev/null +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,207 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size < config.PooledSize { + return make([]byte, 0, size) + } + + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) >= s { + return + } + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendBytes appends a string to buffer. +func (b *Buffer) AppendString(data string) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + var n int + for _, buf := range b.bufs { + if err == nil { + n, err = w.Write(buf) + written += n + } + putBuf(buf) + } + + if err == nil { + n, err = w.Write(b.Buf) + written += n + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. +func (b *Buffer) BuildBytes() []byte { + if len(b.bufs) == 0 { + + ret := b.Buf + b.toPool = nil + b.Buf = nil + + return ret + } + + ret := make([]byte, 0, b.Size()) + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 000000000..e90ec40d0 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 000000000..d700c0a32 --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,956 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "fmt" + "io" + "reflect" + "strconv" + "unicode/utf8" + "unsafe" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + err error // Error encountered during lexing, if any. +} + +// fetchToken scans the input for the next token. +func (r *Lexer) fetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.err = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{h.Data, h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (hasEscapes bool, length int) { + delta := 0 + + for i := 0; i < len(data); i++ { + switch data[i] { + case '\\': + i++ + delta++ + if i < len(data) && data[i] == 'u' { + delta++ + } + case '"': + return (delta > 0), (i - delta) + } + } + + return false, len(data) +} + +// processEscape processes a single escape sequence and returns number of bytes processed. +func (r *Lexer) processEscape(data []byte) (int, error) { + if len(data) < 2 { + return 0, fmt.Errorf("syntax error at %v", string(data)) + } + + c := data[1] + switch c { + case '"', '/', '\\': + r.token.byteValue = append(r.token.byteValue, c) + return 2, nil + case 'b': + r.token.byteValue = append(r.token.byteValue, '\b') + return 2, nil + case 'f': + r.token.byteValue = append(r.token.byteValue, '\f') + return 2, nil + case 'n': + r.token.byteValue = append(r.token.byteValue, '\n') + return 2, nil + case 'r': + r.token.byteValue = append(r.token.byteValue, '\r') + return 2, nil + case 't': + r.token.byteValue = append(r.token.byteValue, '\t') + return 2, nil + case 'u': + default: + return 0, fmt.Errorf("syntax error") + } + + var val rune + + for i := 2; i < len(data) && i < 6; i++ { + var v byte + c = data[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return 0, fmt.Errorf("syntax error") + } + + val <<= 4 + val |= rune(v) + } + + l := utf8.RuneLen(val) + if l == -1 { + return 0, fmt.Errorf("invalid unicode escape") + } + + var d [4]byte + utf8.EncodeRune(d[:], val) + r.token.byteValue = append(r.token.byteValue, d[:l]...) + return 6, nil +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + hasEscapes, length := findStringLen(data) + if !hasEscapes { + r.token.byteValue = data[:length] + r.pos += length + 1 + return + } + + r.token.byteValue = make([]byte, 0, length) + p := 0 + for i := 0; i < len(data); { + switch data[i] { + case '"': + r.pos += i + 1 + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + i++ + return + + case '\\': + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + off, err := r.processEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return + } + i += off + p = i + + default: + i++ + } + } + r.errParse("unterminated string literal") +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.err != nil { + return + } + + r.fetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.err == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.err == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.err = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.err == nil { + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.err = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } + } +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.delimValue != c { + r.errInvalidToken(string([]byte{c})) + } + r.consume() +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + + var start, end byte + + if r.token.delimValue == '{' { + start, end = '{', '}' + } else if r.token.delimValue == '[' { + start, end = '[', ']' + } else { + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + return + } + case c == '\\' && inQuotes: + wasEscape = true + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.err = io.EOF +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + + } + ret := string(r.token.byteValue) + r.consume() + return ret +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s := r.UnsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.err = &LexerError{ + Reason: err.Error(), + } + } + return n +} + +func (r *Lexer) Error() error { + return r.err +} + +func (r *Lexer) AddError(e error) { + if r.err == nil { + r.err = e + } +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.fetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + var ret []interface{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 000000000..907675f9c --- /dev/null +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,273 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Writer is a JSON writer. +type Writer struct { + Error error + Buffer buffer.Buffer +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. +func (w *Writer) BuildBytes() ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// RawByte appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + for i := 0; i < len(s); { + // single-with character + if c := s[i]; c < utf8.RuneSelf { + var escape byte + switch c { + case '\t': + escape = 't' + case '\r': + escape = 'r' + case '\n': + escape = 'n' + case '\\': + escape = '\\' + case '"': + escape = '"' + case '<', '>': + // do nothing + default: + if c >= 0x20 { + // no escaping is required + i++ + continue + } + } + if escape != 0 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendByte('\\') + w.Buffer.AppendByte(escape) + } else { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636a..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385cb..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad22..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/opencontainers/runc/LICENSE b/vendor/github.com/opencontainers/runc/LICENSE deleted file mode 100644 index 27448585a..000000000 --- a/vendor/github.com/opencontainers/runc/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/NOTICE b/vendor/github.com/opencontainers/runc/NOTICE deleted file mode 100644 index 5c97abce4..000000000 --- a/vendor/github.com/opencontainers/runc/NOTICE +++ /dev/null @@ -1,17 +0,0 @@ -runc - -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (http://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see http://www.bis.doc.gov - -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go deleted file mode 100644 index 274ab47dd..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build linux - -package cgroups - -import ( - "fmt" - - "github.com/opencontainers/runc/libcontainer/configs" -) - -type Manager interface { - // Applies cgroup configuration to the process with the specified pid - Apply(pid int) error - - // Returns the PIDs inside the cgroup set - GetPids() ([]int, error) - - // Returns the PIDs inside the cgroup set & all sub-cgroups - GetAllPids() ([]int, error) - - // Returns statistics for the cgroup set - GetStats() (*Stats, error) - - // Toggles the freezer cgroup according with specified state - Freeze(state configs.FreezerState) error - - // Destroys the cgroup set - Destroy() error - - // NewCgroupManager() and LoadCgroupManager() require following attributes: - // Paths map[string]string - // Cgroups *cgroups.Cgroup - // Paths maps cgroup subsystem to path at which it is mounted. - // Cgroups specifies specific cgroup settings for the various subsystems - - // Returns cgroup paths to save in a state file and to be able to - // restore the object later. - GetPaths() map[string]string - - // Set the cgroup as configured. - Set(container *configs.Config) error -} - -type NotFoundError struct { - Subsystem string -} - -func (e *NotFoundError) Error() string { - return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) -} - -func NewNotFoundError(sub string) error { - return &NotFoundError{ - Subsystem: sub, - } -} - -func IsNotFound(err error) bool { - if err == nil { - return false - } - _, ok := err.(*NotFoundError) - return ok -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go deleted file mode 100644 index 278d507e2..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package cgroups diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go deleted file mode 100644 index 633ab0427..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ /dev/null @@ -1,402 +0,0 @@ -// +build linux - -package fs - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "sync" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" -) - -var ( - subsystems = subsystemSet{ - &CpusetGroup{}, - &DevicesGroup{}, - &MemoryGroup{}, - &CpuGroup{}, - &CpuacctGroup{}, - &PidsGroup{}, - &BlkioGroup{}, - &HugetlbGroup{}, - &NetClsGroup{}, - &NetPrioGroup{}, - &PerfEventGroup{}, - &FreezerGroup{}, - &NameGroup{GroupName: "name=systemd", Join: true}, - } - CgroupProcesses = "cgroup.procs" - HugePageSizes, _ = cgroups.GetHugePageSize() -) - -var errSubsystemDoesNotExist = errors.New("cgroup: subsystem does not exist") - -type subsystemSet []subsystem - -func (s subsystemSet) Get(name string) (subsystem, error) { - for _, ss := range s { - if ss.Name() == name { - return ss, nil - } - } - return nil, errSubsystemDoesNotExist -} - -type subsystem interface { - // Name returns the name of the subsystem. - Name() string - // Returns the stats, as 'stats', corresponding to the cgroup under 'path'. - GetStats(path string, stats *cgroups.Stats) error - // Removes the cgroup represented by 'cgroupData'. - Remove(*cgroupData) error - // Creates and joins the cgroup represented by 'cgroupData'. - Apply(*cgroupData) error - // Set the cgroup represented by cgroup. - Set(path string, cgroup *configs.Cgroup) error -} - -type Manager struct { - mu sync.Mutex - Cgroups *configs.Cgroup - Paths map[string]string -} - -// The absolute path to the root of the cgroup hierarchies. -var cgroupRootLock sync.Mutex -var cgroupRoot string - -// Gets the cgroupRoot. -func getCgroupRoot() (string, error) { - cgroupRootLock.Lock() - defer cgroupRootLock.Unlock() - - if cgroupRoot != "" { - return cgroupRoot, nil - } - - root, err := cgroups.FindCgroupMountpointDir() - if err != nil { - return "", err - } - - if _, err := os.Stat(root); err != nil { - return "", err - } - - cgroupRoot = root - return cgroupRoot, nil -} - -type cgroupData struct { - root string - innerPath string - config *configs.Cgroup - pid int -} - -func (m *Manager) Apply(pid int) (err error) { - if m.Cgroups == nil { - return nil - } - - var c = m.Cgroups - - d, err := getCgroupData(m.Cgroups, pid) - if err != nil { - return err - } - - if c.Paths != nil { - paths := make(map[string]string) - for name, path := range c.Paths { - _, err := d.path(name) - if err != nil { - if cgroups.IsNotFound(err) { - continue - } - return err - } - paths[name] = path - } - m.Paths = paths - return cgroups.EnterPid(m.Paths, pid) - } - - m.mu.Lock() - defer m.mu.Unlock() - paths := make(map[string]string) - for _, sys := range subsystems { - if err := sys.Apply(d); err != nil { - return err - } - // TODO: Apply should, ideally, be reentrant or be broken up into a separate - // create and join phase so that the cgroup hierarchy for a container can be - // created then join consists of writing the process pids to cgroup.procs - p, err := d.path(sys.Name()) - if err != nil { - // The non-presence of the devices subsystem is - // considered fatal for security reasons. - if cgroups.IsNotFound(err) && sys.Name() != "devices" { - continue - } - return err - } - paths[sys.Name()] = p - } - m.Paths = paths - return nil -} - -func (m *Manager) Destroy() error { - if m.Cgroups.Paths != nil { - return nil - } - m.mu.Lock() - defer m.mu.Unlock() - if err := cgroups.RemovePaths(m.Paths); err != nil { - return err - } - m.Paths = make(map[string]string) - return nil -} - -func (m *Manager) GetPaths() map[string]string { - m.mu.Lock() - paths := m.Paths - m.mu.Unlock() - return paths -} - -func (m *Manager) GetStats() (*cgroups.Stats, error) { - m.mu.Lock() - defer m.mu.Unlock() - stats := cgroups.NewStats() - for name, path := range m.Paths { - sys, err := subsystems.Get(name) - if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) { - continue - } - if err := sys.GetStats(path, stats); err != nil { - return nil, err - } - } - return stats, nil -} - -func (m *Manager) Set(container *configs.Config) error { - for _, sys := range subsystems { - // Generate fake cgroup data. - d, err := getCgroupData(container.Cgroups, -1) - if err != nil { - return err - } - // Get the path, but don't error out if the cgroup wasn't found. - path, err := d.path(sys.Name()) - if err != nil && !cgroups.IsNotFound(err) { - return err - } - - if err := sys.Set(path, container.Cgroups); err != nil { - return err - } - } - - if m.Paths["cpu"] != "" { - if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil { - return err - } - } - return nil -} - -// Freeze toggles the container's freezer cgroup depending on the state -// provided -func (m *Manager) Freeze(state configs.FreezerState) error { - d, err := getCgroupData(m.Cgroups, 0) - if err != nil { - return err - } - dir, err := d.path("freezer") - if err != nil { - return err - } - prevState := m.Cgroups.Resources.Freezer - m.Cgroups.Resources.Freezer = state - freezer, err := subsystems.Get("freezer") - if err != nil { - return err - } - err = freezer.Set(dir, m.Cgroups) - if err != nil { - m.Cgroups.Resources.Freezer = prevState - return err - } - return nil -} - -func (m *Manager) GetPids() ([]int, error) { - dir, err := getCgroupPath(m.Cgroups) - if err != nil { - return nil, err - } - return cgroups.GetPids(dir) -} - -func (m *Manager) GetAllPids() ([]int, error) { - dir, err := getCgroupPath(m.Cgroups) - if err != nil { - return nil, err - } - return cgroups.GetAllPids(dir) -} - -func getCgroupPath(c *configs.Cgroup) (string, error) { - d, err := getCgroupData(c, 0) - if err != nil { - return "", err - } - - return d.path("devices") -} - -func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) { - root, err := getCgroupRoot() - if err != nil { - return nil, err - } - - if (c.Name != "" || c.Parent != "") && c.Path != "" { - return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used") - } - - // XXX: Do not remove this code. Path safety is important! -- cyphar - cgPath := libcontainerUtils.CleanPath(c.Path) - cgParent := libcontainerUtils.CleanPath(c.Parent) - cgName := libcontainerUtils.CleanPath(c.Name) - - innerPath := cgPath - if innerPath == "" { - innerPath = filepath.Join(cgParent, cgName) - } - - return &cgroupData{ - root: root, - innerPath: innerPath, - config: c, - pid: pid, - }, nil -} - -func (raw *cgroupData) parentPath(subsystem, mountpoint, root string) (string, error) { - // Use GetThisCgroupDir instead of GetInitCgroupDir, because the creating - // process could in container and shared pid namespace with host, and - // /proc/1/cgroup could point to whole other world of cgroups. - initPath, err := cgroups.GetThisCgroupDir(subsystem) - if err != nil { - return "", err - } - // This is needed for nested containers, because in /proc/self/cgroup we - // see pathes from host, which don't exist in container. - relDir, err := filepath.Rel(root, initPath) - if err != nil { - return "", err - } - return filepath.Join(mountpoint, relDir), nil -} - -func (raw *cgroupData) path(subsystem string) (string, error) { - mnt, root, err := cgroups.FindCgroupMountpointAndRoot(subsystem) - // If we didn't mount the subsystem, there is no point we make the path. - if err != nil { - return "", err - } - - // If the cgroup name/path is absolute do not look relative to the cgroup of the init process. - if filepath.IsAbs(raw.innerPath) { - // Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'. - return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil - } - - parentPath, err := raw.parentPath(subsystem, mnt, root) - if err != nil { - return "", err - } - - return filepath.Join(parentPath, raw.innerPath), nil -} - -func (raw *cgroupData) join(subsystem string) (string, error) { - path, err := raw.path(subsystem) - if err != nil { - return "", err - } - if err := os.MkdirAll(path, 0755); err != nil { - return "", err - } - if err := writeFile(path, CgroupProcesses, strconv.Itoa(raw.pid)); err != nil { - return "", err - } - return path, nil -} - -func writeFile(dir, file, data string) error { - // Normally dir should not be empty, one case is that cgroup subsystem - // is not mounted, we will get empty dir, and we want it fail here. - if dir == "" { - return fmt.Errorf("no such directory for %s", file) - } - if err := ioutil.WriteFile(filepath.Join(dir, file), []byte(data), 0700); err != nil { - return fmt.Errorf("failed to write %v to %v: %v", data, file, err) - } - return nil -} - -func readFile(dir, file string) (string, error) { - data, err := ioutil.ReadFile(filepath.Join(dir, file)) - return string(data), err -} - -func removePath(p string, err error) error { - if err != nil { - return err - } - if p != "" { - return os.RemoveAll(p) - } - return nil -} - -func CheckCpushares(path string, c int64) error { - var cpuShares int64 - - if c == 0 { - return nil - } - - fd, err := os.Open(filepath.Join(path, "cpu.shares")) - if err != nil { - return err - } - defer fd.Close() - - _, err = fmt.Fscanf(fd, "%d", &cpuShares) - if err != nil && err != io.EOF { - return err - } - - if c > cpuShares { - return fmt.Errorf("The maximum allowed cpu-shares is %d", cpuShares) - } else if c < cpuShares { - return fmt.Errorf("The minimum allowed cpu-shares is %d", cpuShares) - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go deleted file mode 100644 index a142cb991..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/blkio.go +++ /dev/null @@ -1,237 +0,0 @@ -// +build linux - -package fs - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type BlkioGroup struct { -} - -func (s *BlkioGroup) Name() string { - return "blkio" -} - -func (s *BlkioGroup) Apply(d *cgroupData) error { - _, err := d.join("blkio") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *BlkioGroup) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.BlkioWeight != 0 { - if err := writeFile(path, "blkio.weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioWeight), 10)); err != nil { - return err - } - } - - if cgroup.Resources.BlkioLeafWeight != 0 { - if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(cgroup.Resources.BlkioLeafWeight), 10)); err != nil { - return err - } - } - for _, wd := range cgroup.Resources.BlkioWeightDevice { - if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil { - return err - } - if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil { - return err - } - } - for _, td := range cgroup.Resources.BlkioThrottleReadBpsDevice { - if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil { - return err - } - } - for _, td := range cgroup.Resources.BlkioThrottleWriteBpsDevice { - if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil { - return err - } - } - for _, td := range cgroup.Resources.BlkioThrottleReadIOPSDevice { - if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil { - return err - } - } - for _, td := range cgroup.Resources.BlkioThrottleWriteIOPSDevice { - if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil { - return err - } - } - - return nil -} - -func (s *BlkioGroup) Remove(d *cgroupData) error { - return removePath(d.path("blkio")) -} - -/* -examples: - - blkio.sectors - 8:0 6792 - - blkio.io_service_bytes - 8:0 Read 1282048 - 8:0 Write 2195456 - 8:0 Sync 2195456 - 8:0 Async 1282048 - 8:0 Total 3477504 - Total 3477504 - - blkio.io_serviced - 8:0 Read 124 - 8:0 Write 104 - 8:0 Sync 104 - 8:0 Async 124 - 8:0 Total 228 - Total 228 - - blkio.io_queued - 8:0 Read 0 - 8:0 Write 0 - 8:0 Sync 0 - 8:0 Async 0 - 8:0 Total 0 - Total 0 -*/ - -func splitBlkioStatLine(r rune) bool { - return r == ' ' || r == ':' -} - -func getBlkioStat(path string) ([]cgroups.BlkioStatEntry, error) { - var blkioStats []cgroups.BlkioStatEntry - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return blkioStats, nil - } - return nil, err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - // format: dev type amount - fields := strings.FieldsFunc(sc.Text(), splitBlkioStatLine) - if len(fields) < 3 { - if len(fields) == 2 && fields[0] == "Total" { - // skip total line - continue - } else { - return nil, fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) - } - } - - v, err := strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - major := v - - v, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - minor := v - - op := "" - valueField := 2 - if len(fields) == 4 { - op = fields[2] - valueField = 3 - } - v, err = strconv.ParseUint(fields[valueField], 10, 64) - if err != nil { - return nil, err - } - blkioStats = append(blkioStats, cgroups.BlkioStatEntry{Major: major, Minor: minor, Op: op, Value: v}) - } - - return blkioStats, nil -} - -func (s *BlkioGroup) GetStats(path string, stats *cgroups.Stats) error { - // Try to read CFQ stats available on all CFQ enabled kernels first - if blkioStats, err := getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err == nil && blkioStats != nil { - return getCFQStats(path, stats) - } - return getStats(path, stats) // Use generic stats as fallback -} - -func getCFQStats(path string, stats *cgroups.Stats) error { - var blkioStats []cgroups.BlkioStatEntry - var err error - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.sectors_recursive")); err != nil { - return err - } - stats.BlkioStats.SectorsRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_bytes_recursive")); err != nil { - return err - } - stats.BlkioStats.IoServiceBytesRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_serviced_recursive")); err != nil { - return err - } - stats.BlkioStats.IoServicedRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_queued_recursive")); err != nil { - return err - } - stats.BlkioStats.IoQueuedRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_service_time_recursive")); err != nil { - return err - } - stats.BlkioStats.IoServiceTimeRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_wait_time_recursive")); err != nil { - return err - } - stats.BlkioStats.IoWaitTimeRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.io_merged_recursive")); err != nil { - return err - } - stats.BlkioStats.IoMergedRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.time_recursive")); err != nil { - return err - } - stats.BlkioStats.IoTimeRecursive = blkioStats - - return nil -} - -func getStats(path string, stats *cgroups.Stats) error { - var blkioStats []cgroups.BlkioStatEntry - var err error - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_service_bytes")); err != nil { - return err - } - stats.BlkioStats.IoServiceBytesRecursive = blkioStats - - if blkioStats, err = getBlkioStat(filepath.Join(path, "blkio.throttle.io_serviced")); err != nil { - return err - } - stats.BlkioStats.IoServicedRecursive = blkioStats - - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go deleted file mode 100644 index a4ef28a60..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build linux - -package fs - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type CpuGroup struct { -} - -func (s *CpuGroup) Name() string { - return "cpu" -} - -func (s *CpuGroup) Apply(d *cgroupData) error { - // We always want to join the cpu group, to allow fair cpu scheduling - // on a container basis - _, err := d.join("cpu") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.CpuShares != 0 { - if err := writeFile(path, "cpu.shares", strconv.FormatInt(cgroup.Resources.CpuShares, 10)); err != nil { - return err - } - } - if cgroup.Resources.CpuPeriod != 0 { - if err := writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(cgroup.Resources.CpuPeriod, 10)); err != nil { - return err - } - } - if cgroup.Resources.CpuQuota != 0 { - if err := writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(cgroup.Resources.CpuQuota, 10)); err != nil { - return err - } - } - if cgroup.Resources.CpuRtPeriod != 0 { - if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.Resources.CpuRtPeriod, 10)); err != nil { - return err - } - } - if cgroup.Resources.CpuRtRuntime != 0 { - if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil { - return err - } - } - - return nil -} - -func (s *CpuGroup) Remove(d *cgroupData) error { - return removePath(d.path("cpu")) -} - -func (s *CpuGroup) GetStats(path string, stats *cgroups.Stats) error { - f, err := os.Open(filepath.Join(path, "cpu.stat")) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - - sc := bufio.NewScanner(f) - for sc.Scan() { - t, v, err := getCgroupParamKeyValue(sc.Text()) - if err != nil { - return err - } - switch t { - case "nr_periods": - stats.CpuStats.ThrottlingData.Periods = v - - case "nr_throttled": - stats.CpuStats.ThrottlingData.ThrottledPeriods = v - - case "throttled_time": - stats.CpuStats.ThrottlingData.ThrottledTime = v - } - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go deleted file mode 100644 index 53afbaddf..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuacct.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build linux - -package fs - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" -) - -const ( - cgroupCpuacctStat = "cpuacct.stat" - nanosecondsInSecond = 1000000000 -) - -var clockTicks = uint64(system.GetClockTicks()) - -type CpuacctGroup struct { -} - -func (s *CpuacctGroup) Name() string { - return "cpuacct" -} - -func (s *CpuacctGroup) Apply(d *cgroupData) error { - // we just want to join this group even though we don't set anything - if _, err := d.join("cpuacct"); err != nil && !cgroups.IsNotFound(err) { - return err - } - - return nil -} - -func (s *CpuacctGroup) Set(path string, cgroup *configs.Cgroup) error { - return nil -} - -func (s *CpuacctGroup) Remove(d *cgroupData) error { - return removePath(d.path("cpuacct")) -} - -func (s *CpuacctGroup) GetStats(path string, stats *cgroups.Stats) error { - userModeUsage, kernelModeUsage, err := getCpuUsageBreakdown(path) - if err != nil { - return err - } - - totalUsage, err := getCgroupParamUint(path, "cpuacct.usage") - if err != nil { - return err - } - - percpuUsage, err := getPercpuUsage(path) - if err != nil { - return err - } - - stats.CpuStats.CpuUsage.TotalUsage = totalUsage - stats.CpuStats.CpuUsage.PercpuUsage = percpuUsage - stats.CpuStats.CpuUsage.UsageInUsermode = userModeUsage - stats.CpuStats.CpuUsage.UsageInKernelmode = kernelModeUsage - return nil -} - -// Returns user and kernel usage breakdown in nanoseconds. -func getCpuUsageBreakdown(path string) (uint64, uint64, error) { - userModeUsage := uint64(0) - kernelModeUsage := uint64(0) - const ( - userField = "user" - systemField = "system" - ) - - // Expected format: - // user - // system - data, err := ioutil.ReadFile(filepath.Join(path, cgroupCpuacctStat)) - if err != nil { - return 0, 0, err - } - fields := strings.Fields(string(data)) - if len(fields) != 4 { - return 0, 0, fmt.Errorf("failure - %s is expected to have 4 fields", filepath.Join(path, cgroupCpuacctStat)) - } - if fields[0] != userField { - return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[0], cgroupCpuacctStat, userField) - } - if fields[2] != systemField { - return 0, 0, fmt.Errorf("unexpected field %q in %q, expected %q", fields[2], cgroupCpuacctStat, systemField) - } - if userModeUsage, err = strconv.ParseUint(fields[1], 10, 64); err != nil { - return 0, 0, err - } - if kernelModeUsage, err = strconv.ParseUint(fields[3], 10, 64); err != nil { - return 0, 0, err - } - - return (userModeUsage * nanosecondsInSecond) / clockTicks, (kernelModeUsage * nanosecondsInSecond) / clockTicks, nil -} - -func getPercpuUsage(path string) ([]uint64, error) { - percpuUsage := []uint64{} - data, err := ioutil.ReadFile(filepath.Join(path, "cpuacct.usage_percpu")) - if err != nil { - return percpuUsage, err - } - for _, value := range strings.Fields(string(data)) { - value, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return percpuUsage, fmt.Errorf("Unable to convert param value to uint64: %s", err) - } - percpuUsage = append(percpuUsage, value) - } - return percpuUsage, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go deleted file mode 100644 index cbe62bd98..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpuset.go +++ /dev/null @@ -1,139 +0,0 @@ -// +build linux - -package fs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" -) - -type CpusetGroup struct { -} - -func (s *CpusetGroup) Name() string { - return "cpuset" -} - -func (s *CpusetGroup) Apply(d *cgroupData) error { - dir, err := d.path("cpuset") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return s.ApplyDir(dir, d.config, d.pid) -} - -func (s *CpusetGroup) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.CpusetCpus != "" { - if err := writeFile(path, "cpuset.cpus", cgroup.Resources.CpusetCpus); err != nil { - return err - } - } - if cgroup.Resources.CpusetMems != "" { - if err := writeFile(path, "cpuset.mems", cgroup.Resources.CpusetMems); err != nil { - return err - } - } - return nil -} - -func (s *CpusetGroup) Remove(d *cgroupData) error { - return removePath(d.path("cpuset")) -} - -func (s *CpusetGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} - -func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) error { - // This might happen if we have no cpuset cgroup mounted. - // Just do nothing and don't fail. - if dir == "" { - return nil - } - root, err := getCgroupRoot() - if err != nil { - return err - } - if err := s.ensureParent(dir, root); err != nil { - return err - } - // because we are not using d.join we need to place the pid into the procs file - // unlike the other subsystems - if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil { - return err - } - - return nil -} - -func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []byte, err error) { - if cpus, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.cpus")); err != nil { - return - } - if mems, err = ioutil.ReadFile(filepath.Join(parent, "cpuset.mems")); err != nil { - return - } - return cpus, mems, nil -} - -// ensureParent makes sure that the parent directory of current is created -// and populated with the proper cpus and mems files copied from -// it's parent. -func (s *CpusetGroup) ensureParent(current, root string) error { - parent := filepath.Dir(current) - if libcontainerUtils.CleanPath(parent) == root { - return nil - } - // Avoid infinite recursion. - if parent == current { - return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") - } - if err := s.ensureParent(parent, root); err != nil { - return err - } - if err := os.MkdirAll(current, 0755); err != nil { - return err - } - return s.copyIfNeeded(current, parent) -} - -// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent -// directory to the current directory if the file's contents are 0 -func (s *CpusetGroup) copyIfNeeded(current, parent string) error { - var ( - err error - currentCpus, currentMems []byte - parentCpus, parentMems []byte - ) - - if currentCpus, currentMems, err = s.getSubsystemSettings(current); err != nil { - return err - } - if parentCpus, parentMems, err = s.getSubsystemSettings(parent); err != nil { - return err - } - - if s.isEmpty(currentCpus) { - if err := writeFile(current, "cpuset.cpus", string(parentCpus)); err != nil { - return err - } - } - if s.isEmpty(currentMems) { - if err := writeFile(current, "cpuset.mems", string(parentMems)); err != nil { - return err - } - } - return nil -} - -func (s *CpusetGroup) isEmpty(b []byte) bool { - return len(bytes.Trim(b, "\n")) == 0 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go deleted file mode 100644 index 5f7833109..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/devices.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build linux - -package fs - -import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/system" -) - -type DevicesGroup struct { -} - -func (s *DevicesGroup) Name() string { - return "devices" -} - -func (s *DevicesGroup) Apply(d *cgroupData) error { - _, err := d.join("devices") - if err != nil { - // We will return error even it's `not found` error, devices - // cgroup is hard requirement for container's security. - return err - } - return nil -} - -func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error { - if system.RunningInUserNS() { - return nil - } - - devices := cgroup.Resources.Devices - if len(devices) > 0 { - for _, dev := range devices { - file := "devices.deny" - if dev.Allow { - file = "devices.allow" - } - if err := writeFile(path, file, dev.CgroupString()); err != nil { - return err - } - } - return nil - } - if !cgroup.Resources.AllowAllDevices { - if err := writeFile(path, "devices.deny", "a"); err != nil { - return err - } - - for _, dev := range cgroup.Resources.AllowedDevices { - if err := writeFile(path, "devices.allow", dev.CgroupString()); err != nil { - return err - } - } - return nil - } - - if err := writeFile(path, "devices.allow", "a"); err != nil { - return err - } - - for _, dev := range cgroup.Resources.DeniedDevices { - if err := writeFile(path, "devices.deny", dev.CgroupString()); err != nil { - return err - } - } - - return nil -} - -func (s *DevicesGroup) Remove(d *cgroupData) error { - return removePath(d.path("devices")) -} - -func (s *DevicesGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go deleted file mode 100644 index e70dfe3b9..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/freezer.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build linux - -package fs - -import ( - "fmt" - "strings" - "time" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type FreezerGroup struct { -} - -func (s *FreezerGroup) Name() string { - return "freezer" -} - -func (s *FreezerGroup) Apply(d *cgroupData) error { - _, err := d.join("freezer") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *FreezerGroup) Set(path string, cgroup *configs.Cgroup) error { - switch cgroup.Resources.Freezer { - case configs.Frozen, configs.Thawed: - if err := writeFile(path, "freezer.state", string(cgroup.Resources.Freezer)); err != nil { - return err - } - - for { - state, err := readFile(path, "freezer.state") - if err != nil { - return err - } - if strings.TrimSpace(state) == string(cgroup.Resources.Freezer) { - break - } - time.Sleep(1 * time.Millisecond) - } - case configs.Undefined: - return nil - default: - return fmt.Errorf("Invalid argument '%s' to freezer.state", string(cgroup.Resources.Freezer)) - } - - return nil -} - -func (s *FreezerGroup) Remove(d *cgroupData) error { - return removePath(d.path("freezer")) -} - -func (s *FreezerGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go deleted file mode 100644 index 3ef9e0315..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/fs_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package fs diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go deleted file mode 100644 index 2f9727719..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/hugetlb.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build linux - -package fs - -import ( - "fmt" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type HugetlbGroup struct { -} - -func (s *HugetlbGroup) Name() string { - return "hugetlb" -} - -func (s *HugetlbGroup) Apply(d *cgroupData) error { - _, err := d.join("hugetlb") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *HugetlbGroup) Set(path string, cgroup *configs.Cgroup) error { - for _, hugetlb := range cgroup.Resources.HugetlbLimit { - if err := writeFile(path, strings.Join([]string{"hugetlb", hugetlb.Pagesize, "limit_in_bytes"}, "."), strconv.FormatUint(hugetlb.Limit, 10)); err != nil { - return err - } - } - - return nil -} - -func (s *HugetlbGroup) Remove(d *cgroupData) error { - return removePath(d.path("hugetlb")) -} - -func (s *HugetlbGroup) GetStats(path string, stats *cgroups.Stats) error { - hugetlbStats := cgroups.HugetlbStats{} - for _, pageSize := range HugePageSizes { - usage := strings.Join([]string{"hugetlb", pageSize, "usage_in_bytes"}, ".") - value, err := getCgroupParamUint(path, usage) - if err != nil { - return fmt.Errorf("failed to parse %s - %v", usage, err) - } - hugetlbStats.Usage = value - - maxUsage := strings.Join([]string{"hugetlb", pageSize, "max_usage_in_bytes"}, ".") - value, err = getCgroupParamUint(path, maxUsage) - if err != nil { - return fmt.Errorf("failed to parse %s - %v", maxUsage, err) - } - hugetlbStats.MaxUsage = value - - failcnt := strings.Join([]string{"hugetlb", pageSize, "failcnt"}, ".") - value, err = getCgroupParamUint(path, failcnt) - if err != nil { - return fmt.Errorf("failed to parse %s - %v", failcnt, err) - } - hugetlbStats.Failcnt = value - - stats.HugetlbStats[pageSize] = hugetlbStats - } - - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go deleted file mode 100644 index b83712829..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/memory.go +++ /dev/null @@ -1,291 +0,0 @@ -// +build linux - -package fs - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type MemoryGroup struct { -} - -func (s *MemoryGroup) Name() string { - return "memory" -} - -func (s *MemoryGroup) Apply(d *cgroupData) (err error) { - path, err := d.path("memory") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - // reset error. - err = nil - if path == "" { - // Invalid input. - return fmt.Errorf("invalid path for memory cgroups: %+v", d) - } - defer func() { - if err != nil { - os.RemoveAll(path) - } - }() - if !cgroups.PathExists(path) { - if err = os.MkdirAll(path, 0755); err != nil { - return err - } - } - if memoryAssigned(d.config) { - // We have to set kernel memory here, as we can't change it once - // processes have been attached to the cgroup. - if err = s.SetKernelMemory(path, d.config); err != nil { - return err - } - } - // We need to join memory cgroup after set memory limits, because - // kmem.limit_in_bytes can only be set when the cgroup is empty. - if _, jerr := d.join("memory"); jerr != nil && !cgroups.IsNotFound(jerr) { - err = jerr - return err - } - return nil -} - -func getModifyTime(path string) (time.Time, error) { - stat, err := os.Stat(path) - if err != nil { - return time.Time{}, fmt.Errorf("failed to get memory cgroups creation time: %v", err) - } - return stat.ModTime(), nil -} - -func (s *MemoryGroup) SetKernelMemory(path string, cgroup *configs.Cgroup) error { - // This has to be done separately because it has special - // constraints (it can only be initialized before setting up a - // hierarchy or adding a task to the cgroups. However, if - // sucessfully initialized, it can be updated anytime afterwards) - if cgroup.Resources.KernelMemory != 0 { - // Is kmem.limit_in_bytes already set? - // memory.kmem.max_usage_in_bytes is a read-only file. Use it to get cgroups creation time. - kmemCreationTime, err := getModifyTime(filepath.Join(path, "memory.kmem.max_usage_in_bytes")) - if err != nil { - return err - } - kmemLimitsUpdateTime, err := getModifyTime(filepath.Join(path, "memory.kmem.limit_in_bytes")) - if err != nil { - return err - } - // kmem.limit_in_bytes has already been set if its update time is after that of creation time. - // We use `!=` op instead of `>` because updates are losing precision compared to creation. - kmemInitialized := !kmemLimitsUpdateTime.Equal(kmemCreationTime) - if !kmemInitialized { - // If there's already tasks in the cgroup, we can't change the limit either - tasks, err := getCgroupParamString(path, "tasks") - if err != nil { - return err - } - if tasks != "" { - return fmt.Errorf("cannot set kmem.limit_in_bytes after task have joined this cgroup") - } - } - if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil { - return err - } - } - return nil -} - -func setMemoryAndSwap(path string, cgroup *configs.Cgroup) error { - // When memory and swap memory are both set, we need to handle the cases - // for updating container. - if cgroup.Resources.Memory != 0 && cgroup.Resources.MemorySwap > 0 { - memoryUsage, err := getMemoryData(path, "") - if err != nil { - return err - } - - // When update memory limit, we should adapt the write sequence - // for memory and swap memory, so it won't fail because the new - // value and the old value don't fit kernel's validation. - if memoryUsage.Limit < uint64(cgroup.Resources.MemorySwap) { - if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { - return err - } - if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { - return err - } - } else { - if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { - return err - } - if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { - return err - } - } - } else { - if cgroup.Resources.Memory != 0 { - if err := writeFile(path, "memory.limit_in_bytes", strconv.FormatInt(cgroup.Resources.Memory, 10)); err != nil { - return err - } - } - if cgroup.Resources.MemorySwap > 0 { - if err := writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemorySwap, 10)); err != nil { - return err - } - } - } - - return nil -} - -func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error { - if err := setMemoryAndSwap(path, cgroup); err != nil { - return err - } - - if err := s.SetKernelMemory(path, cgroup); err != nil { - return err - } - - if cgroup.Resources.MemoryReservation != 0 { - if err := writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(cgroup.Resources.MemoryReservation, 10)); err != nil { - return err - } - } - if cgroup.Resources.KernelMemoryTCP != 0 { - if err := writeFile(path, "memory.kmem.tcp.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemoryTCP, 10)); err != nil { - return err - } - } - if cgroup.Resources.OomKillDisable { - if err := writeFile(path, "memory.oom_control", "1"); err != nil { - return err - } - } - if cgroup.Resources.MemorySwappiness == nil || int64(*cgroup.Resources.MemorySwappiness) == -1 { - return nil - } else if int64(*cgroup.Resources.MemorySwappiness) >= 0 && int64(*cgroup.Resources.MemorySwappiness) <= 100 { - if err := writeFile(path, "memory.swappiness", strconv.FormatInt(*cgroup.Resources.MemorySwappiness, 10)); err != nil { - return err - } - } else { - return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", int64(*cgroup.Resources.MemorySwappiness)) - } - - return nil -} - -func (s *MemoryGroup) Remove(d *cgroupData) error { - return removePath(d.path("memory")) -} - -func (s *MemoryGroup) GetStats(path string, stats *cgroups.Stats) error { - // Set stats from memory.stat. - statsFile, err := os.Open(filepath.Join(path, "memory.stat")) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer statsFile.Close() - - sc := bufio.NewScanner(statsFile) - for sc.Scan() { - t, v, err := getCgroupParamKeyValue(sc.Text()) - if err != nil { - return fmt.Errorf("failed to parse memory.stat (%q) - %v", sc.Text(), err) - } - stats.MemoryStats.Stats[t] = v - } - stats.MemoryStats.Cache = stats.MemoryStats.Stats["cache"] - - memoryUsage, err := getMemoryData(path, "") - if err != nil { - return err - } - stats.MemoryStats.Usage = memoryUsage - swapUsage, err := getMemoryData(path, "memsw") - if err != nil { - return err - } - stats.MemoryStats.SwapUsage = swapUsage - kernelUsage, err := getMemoryData(path, "kmem") - if err != nil { - return err - } - stats.MemoryStats.KernelUsage = kernelUsage - kernelTCPUsage, err := getMemoryData(path, "kmem.tcp") - if err != nil { - return err - } - stats.MemoryStats.KernelTCPUsage = kernelTCPUsage - - return nil -} - -func memoryAssigned(cgroup *configs.Cgroup) bool { - return cgroup.Resources.Memory != 0 || - cgroup.Resources.MemoryReservation != 0 || - cgroup.Resources.MemorySwap > 0 || - cgroup.Resources.KernelMemory > 0 || - cgroup.Resources.KernelMemoryTCP > 0 || - cgroup.Resources.OomKillDisable || - (cgroup.Resources.MemorySwappiness != nil && *cgroup.Resources.MemorySwappiness != -1) -} - -func getMemoryData(path, name string) (cgroups.MemoryData, error) { - memoryData := cgroups.MemoryData{} - - moduleName := "memory" - if name != "" { - moduleName = strings.Join([]string{"memory", name}, ".") - } - usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".") - maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".") - failcnt := strings.Join([]string{moduleName, "failcnt"}, ".") - limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".") - - value, err := getCgroupParamUint(path, usage) - if err != nil { - if moduleName != "memory" && os.IsNotExist(err) { - return cgroups.MemoryData{}, nil - } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", usage, err) - } - memoryData.Usage = value - value, err = getCgroupParamUint(path, maxUsage) - if err != nil { - if moduleName != "memory" && os.IsNotExist(err) { - return cgroups.MemoryData{}, nil - } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", maxUsage, err) - } - memoryData.MaxUsage = value - value, err = getCgroupParamUint(path, failcnt) - if err != nil { - if moduleName != "memory" && os.IsNotExist(err) { - return cgroups.MemoryData{}, nil - } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err) - } - memoryData.Failcnt = value - value, err = getCgroupParamUint(path, limit) - if err != nil { - if moduleName != "memory" && os.IsNotExist(err) { - return cgroups.MemoryData{}, nil - } - return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err) - } - memoryData.Limit = value - - return memoryData, nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go deleted file mode 100644 index d8cf1d87c..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/name.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux - -package fs - -import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type NameGroup struct { - GroupName string - Join bool -} - -func (s *NameGroup) Name() string { - return s.GroupName -} - -func (s *NameGroup) Apply(d *cgroupData) error { - if s.Join { - // ignore errors if the named cgroup does not exist - d.join(s.GroupName) - } - return nil -} - -func (s *NameGroup) Set(path string, cgroup *configs.Cgroup) error { - return nil -} - -func (s *NameGroup) Remove(d *cgroupData) error { - if s.Join { - removePath(d.path(s.GroupName)) - } - return nil -} - -func (s *NameGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go deleted file mode 100644 index 8a4054ba8..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_cls.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build linux - -package fs - -import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type NetClsGroup struct { -} - -func (s *NetClsGroup) Name() string { - return "net_cls" -} - -func (s *NetClsGroup) Apply(d *cgroupData) error { - _, err := d.join("net_cls") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *NetClsGroup) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.NetClsClassid != "" { - if err := writeFile(path, "net_cls.classid", cgroup.Resources.NetClsClassid); err != nil { - return err - } - } - - return nil -} - -func (s *NetClsGroup) Remove(d *cgroupData) error { - return removePath(d.path("net_cls")) -} - -func (s *NetClsGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go deleted file mode 100644 index d0ab2af89..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/net_prio.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build linux - -package fs - -import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type NetPrioGroup struct { -} - -func (s *NetPrioGroup) Name() string { - return "net_prio" -} - -func (s *NetPrioGroup) Apply(d *cgroupData) error { - _, err := d.join("net_prio") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *NetPrioGroup) Set(path string, cgroup *configs.Cgroup) error { - for _, prioMap := range cgroup.Resources.NetPrioIfpriomap { - if err := writeFile(path, "net_prio.ifpriomap", prioMap.CgroupString()); err != nil { - return err - } - } - - return nil -} - -func (s *NetPrioGroup) Remove(d *cgroupData) error { - return removePath(d.path("net_prio")) -} - -func (s *NetPrioGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go deleted file mode 100644 index 5693676d3..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/perf_event.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux - -package fs - -import ( - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type PerfEventGroup struct { -} - -func (s *PerfEventGroup) Name() string { - return "perf_event" -} - -func (s *PerfEventGroup) Apply(d *cgroupData) error { - // we just want to join this group even though we don't set anything - if _, err := d.join("perf_event"); err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *PerfEventGroup) Set(path string, cgroup *configs.Cgroup) error { - return nil -} - -func (s *PerfEventGroup) Remove(d *cgroupData) error { - return removePath(d.path("perf_event")) -} - -func (s *PerfEventGroup) GetStats(path string, stats *cgroups.Stats) error { - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go deleted file mode 100644 index f1e372055..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/pids.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build linux - -package fs - -import ( - "fmt" - "path/filepath" - "strconv" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/configs" -) - -type PidsGroup struct { -} - -func (s *PidsGroup) Name() string { - return "pids" -} - -func (s *PidsGroup) Apply(d *cgroupData) error { - _, err := d.join("pids") - if err != nil && !cgroups.IsNotFound(err) { - return err - } - return nil -} - -func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error { - if cgroup.Resources.PidsLimit != 0 { - // "max" is the fallback value. - limit := "max" - - if cgroup.Resources.PidsLimit > 0 { - limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10) - } - - if err := writeFile(path, "pids.max", limit); err != nil { - return err - } - } - - return nil -} - -func (s *PidsGroup) Remove(d *cgroupData) error { - return removePath(d.path("pids")) -} - -func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error { - current, err := getCgroupParamUint(path, "pids.current") - if err != nil { - return fmt.Errorf("failed to parse pids.current - %s", err) - } - - maxString, err := getCgroupParamString(path, "pids.max") - if err != nil { - return fmt.Errorf("failed to parse pids.max - %s", err) - } - - // Default if pids.max == "max" is 0 -- which represents "no limit". - var max uint64 - if maxString != "max" { - max, err = parseUint(maxString, 10, 64) - if err != nil { - return fmt.Errorf("failed to parse pids.max - unable to parse %q as a uint from Cgroup file %q", maxString, filepath.Join(path, "pids.max")) - } - } - - stats.PidsStats.Current = current - stats.PidsStats.Limit = max - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go deleted file mode 100644 index 5ff0a1615..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/utils.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build linux - -package fs - -import ( - "errors" - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -var ( - ErrNotValidFormat = errors.New("line is not a valid key value format") -) - -// Saturates negative values at zero and returns a uint64. -// Due to kernel bugs, some of the memory cgroup stats can be negative. -func parseUint(s string, base, bitSize int) (uint64, error) { - value, err := strconv.ParseUint(s, base, bitSize) - if err != nil { - intValue, intErr := strconv.ParseInt(s, base, bitSize) - // 1. Handle negative values greater than MinInt64 (and) - // 2. Handle negative values lesser than MinInt64 - if intErr == nil && intValue < 0 { - return 0, nil - } else if intErr != nil && intErr.(*strconv.NumError).Err == strconv.ErrRange && intValue < 0 { - return 0, nil - } - - return value, err - } - - return value, nil -} - -// Parses a cgroup param and returns as name, value -// i.e. "io_service_bytes 1234" will return as io_service_bytes, 1234 -func getCgroupParamKeyValue(t string) (string, uint64, error) { - parts := strings.Fields(t) - switch len(parts) { - case 2: - value, err := parseUint(parts[1], 10, 64) - if err != nil { - return "", 0, fmt.Errorf("unable to convert param value (%q) to uint64: %v", parts[1], err) - } - - return parts[0], value, nil - default: - return "", 0, ErrNotValidFormat - } -} - -// Gets a single uint64 value from the specified cgroup file. -func getCgroupParamUint(cgroupPath, cgroupFile string) (uint64, error) { - fileName := filepath.Join(cgroupPath, cgroupFile) - contents, err := ioutil.ReadFile(fileName) - if err != nil { - return 0, err - } - - res, err := parseUint(strings.TrimSpace(string(contents)), 10, 64) - if err != nil { - return res, fmt.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), fileName) - } - return res, nil -} - -// Gets a string value from the specified cgroup file -func getCgroupParamString(cgroupPath, cgroupFile string) (string, error) { - contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile)) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(contents)), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go deleted file mode 100644 index b483f1bf9..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build linux - -package cgroups - -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods,omitempty"` - // Number of periods when the container hit its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time,omitempty"` -} - -// CpuUsage denotes the usage of a CPU. -// All CPU stats are aggregate since container inception. -type CpuUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage,omitempty"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -type CpuStats struct { - CpuUsage CpuUsage `json:"cpu_usage,omitempty"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -type MemoryData struct { - Usage uint64 `json:"usage,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty"` - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -type MemoryStats struct { - // memory used for cache - Cache uint64 `json:"cache,omitempty"` - // usage of memory - Usage MemoryData `json:"usage,omitempty"` - // usage of memory + swap - SwapUsage MemoryData `json:"swap_usage,omitempty"` - // usage of kernel memory - KernelUsage MemoryData `json:"kernel_usage,omitempty"` - // usage of kernel TCP memory - KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"` - - Stats map[string]uint64 `json:"stats,omitempty"` -} - -type PidsStats struct { - // number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // active pids hard limit - Limit uint64 `json:"limit,omitempty"` -} - -type BlkioStatEntry struct { - Major uint64 `json:"major,omitempty"` - Minor uint64 `json:"minor,omitempty"` - Op string `json:"op,omitempty"` - Value uint64 `json:"value,omitempty"` -} - -type BlkioStats struct { - // number of bytes tranferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` -} - -type HugetlbStats struct { - // current res_counter usage for hugetlb - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // number of times hugetlb usage allocation failure. - Failcnt uint64 `json:"failcnt"` -} - -type Stats struct { - CpuStats CpuStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - // the map is in the format "size of hugepage: stats of the hugepage" - HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"` -} - -func NewStats() *Stats { - memoryStats := MemoryStats{Stats: make(map[string]uint64)} - hugetlbStats := make(map[string]HugetlbStats) - return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats} -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go deleted file mode 100644 index 1a7c4e1a0..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ /dev/null @@ -1,413 +0,0 @@ -// +build linux - -package cgroups - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/go-units" -) - -const cgroupNamePrefix = "name=" - -// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt -func FindCgroupMountpoint(subsystem string) (string, error) { - // We are not using mount.GetMounts() because it's super-inefficient, - // parsing it directly sped up x10 times because of not using Sscanf. - // It was one of two major performance drawbacks in container start. - if !isSubsystemAvailable(subsystem) { - return "", NewNotFoundError(subsystem) - } - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - txt := scanner.Text() - fields := strings.Split(txt, " ") - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if opt == subsystem { - return fields[4], nil - } - } - } - if err := scanner.Err(); err != nil { - return "", err - } - - return "", NewNotFoundError(subsystem) -} - -func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) { - if !isSubsystemAvailable(subsystem) { - return "", "", NewNotFoundError(subsystem) - } - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", "", err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - txt := scanner.Text() - fields := strings.Split(txt, " ") - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if opt == subsystem { - return fields[4], fields[3], nil - } - } - } - if err := scanner.Err(); err != nil { - return "", "", err - } - - return "", "", NewNotFoundError(subsystem) -} - -func isSubsystemAvailable(subsystem string) bool { - cgroups, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return false - } - _, avail := cgroups[subsystem] - return avail -} - -func FindCgroupMountpointDir() (string, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - text := scanner.Text() - fields := strings.Split(text, " ") - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - numPostFields := len(postSeparatorFields) - - // This is an error as we can't detect if the mount is for "cgroup" - if numPostFields == 0 { - return "", fmt.Errorf("Found no fields post '-' in %q", text) - } - - if postSeparatorFields[0] == "cgroup" { - // Check that the mount is properly formated. - if numPostFields < 3 { - return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - return filepath.Dir(fields[4]), nil - } - } - if err := scanner.Err(); err != nil { - return "", err - } - - return "", NewNotFoundError("cgroup") -} - -type Mount struct { - Mountpoint string - Root string - Subsystems []string -} - -func (m Mount) GetThisCgroupDir(cgroups map[string]string) (string, error) { - if len(m.Subsystems) == 0 { - return "", fmt.Errorf("no subsystem for mount") - } - - return getControllerPath(m.Subsystems[0], cgroups) -} - -func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) { - res := make([]Mount, 0, len(ss)) - scanner := bufio.NewScanner(mi) - numFound := 0 - for scanner.Scan() && numFound < len(ss) { - txt := scanner.Text() - sepIdx := strings.Index(txt, " - ") - if sepIdx == -1 { - return nil, fmt.Errorf("invalid mountinfo format") - } - if txt[sepIdx+3:sepIdx+9] != "cgroup" { - continue - } - fields := strings.Split(txt, " ") - m := Mount{ - Mountpoint: fields[4], - Root: fields[3], - } - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if !ss[opt] { - continue - } - if strings.HasPrefix(opt, cgroupNamePrefix) { - m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):]) - } else { - m.Subsystems = append(m.Subsystems, opt) - } - numFound++ - } - res = append(res, m) - } - if err := scanner.Err(); err != nil { - return nil, err - } - return res, nil -} - -func GetCgroupMounts() ([]Mount, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - all, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return nil, err - } - - allMap := make(map[string]bool) - for s := range all { - allMap[s] = true - } - return getCgroupMountsHelper(allMap, f) -} - -// GetAllSubsystems returns all the cgroup subsystems supported by the kernel -func GetAllSubsystems() ([]string, error) { - f, err := os.Open("/proc/cgroups") - if err != nil { - return nil, err - } - defer f.Close() - - subsystems := []string{} - - s := bufio.NewScanner(f) - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - text := s.Text() - if text[0] != '#' { - parts := strings.Fields(text) - if len(parts) >= 4 && parts[3] != "0" { - subsystems = append(subsystems, parts[0]) - } - } - } - return subsystems, nil -} - -// GetThisCgroupDir returns the relative path to the cgroup docker is running in. -func GetThisCgroupDir(subsystem string) (string, error) { - cgroups, err := ParseCgroupFile("/proc/self/cgroup") - if err != nil { - return "", err - } - - return getControllerPath(subsystem, cgroups) -} - -func GetInitCgroupDir(subsystem string) (string, error) { - - cgroups, err := ParseCgroupFile("/proc/1/cgroup") - if err != nil { - return "", err - } - - return getControllerPath(subsystem, cgroups) -} - -func readProcsFile(dir string) ([]int, error) { - f, err := os.Open(filepath.Join(dir, "cgroup.procs")) - if err != nil { - return nil, err - } - defer f.Close() - - var ( - s = bufio.NewScanner(f) - out = []int{} - ) - - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, err - } - out = append(out, pid) - } - } - return out, nil -} - -// ParseCgroupFile parses the given cgroup file, typically from -// /proc//cgroup, into a map of subgroups to cgroup names. -func ParseCgroupFile(path string) (map[string]string, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - return parseCgroupFromReader(f) -} - -// helper function for ParseCgroupFile to make testing easier -func parseCgroupFromReader(r io.Reader) (map[string]string, error) { - s := bufio.NewScanner(r) - cgroups := make(map[string]string) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - // from cgroups(7): - // /proc/[pid]/cgroup - // ... - // For each cgroup hierarchy ... there is one entry - // containing three colon-separated fields of the form: - // hierarchy-ID:subsystem-list:cgroup-path - parts := strings.SplitN(text, ":", 3) - if len(parts) < 3 { - return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text) - } - - for _, subs := range strings.Split(parts[1], ",") { - cgroups[subs] = parts[2] - } - } - return cgroups, nil -} - -func getControllerPath(subsystem string, cgroups map[string]string) (string, error) { - - if p, ok := cgroups[subsystem]; ok { - return p, nil - } - - if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok { - return p, nil - } - - return "", NewNotFoundError(subsystem) -} - -func PathExists(path string) bool { - if _, err := os.Stat(path); err != nil { - return false - } - return true -} - -func EnterPid(cgroupPaths map[string]string, pid int) error { - for _, path := range cgroupPaths { - if PathExists(path) { - if err := ioutil.WriteFile(filepath.Join(path, "cgroup.procs"), - []byte(strconv.Itoa(pid)), 0700); err != nil { - return err - } - } - } - return nil -} - -// RemovePaths iterates over the provided paths removing them. -// We trying to remove all paths five times with increasing delay between tries. -// If after all there are not removed cgroups - appropriate error will be -// returned. -func RemovePaths(paths map[string]string) (err error) { - delay := 10 * time.Millisecond - for i := 0; i < 5; i++ { - if i != 0 { - time.Sleep(delay) - delay *= 2 - } - for s, p := range paths { - os.RemoveAll(p) - // TODO: here probably should be logging - _, err := os.Stat(p) - // We need this strange way of checking cgroups existence because - // RemoveAll almost always returns error, even on already removed - // cgroups - if os.IsNotExist(err) { - delete(paths, s) - } - } - if len(paths) == 0 { - return nil - } - } - return fmt.Errorf("Failed to remove paths: %v", paths) -} - -func GetHugePageSize() ([]string, error) { - var pageSizes []string - sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"} - files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") - if err != nil { - return pageSizes, err - } - for _, st := range files { - nameArray := strings.Split(st.Name(), "-") - pageSize, err := units.RAMInBytes(nameArray[1]) - if err != nil { - return []string{}, err - } - sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList) - pageSizes = append(pageSizes, sizeString) - } - - return pageSizes, nil -} - -// GetPids returns all pids, that were added to cgroup at path. -func GetPids(path string) ([]int, error) { - return readProcsFile(path) -} - -// GetAllPids returns all pids, that were added to cgroup at path and to all its -// subcgroups. -func GetAllPids(path string) ([]int, error) { - var pids []int - // collect pids from all sub-cgroups - err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error { - dir, file := filepath.Split(p) - if file != "cgroup.procs" { - return nil - } - if iErr != nil { - return iErr - } - cPids, err := readProcsFile(dir) - if err != nil { - return err - } - pids = append(pids, cPids...) - return nil - }) - return pids, err -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go deleted file mode 100644 index e0f3ca165..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go +++ /dev/null @@ -1,61 +0,0 @@ -package configs - -import "fmt" - -// blockIODevice holds major:minor format supported in blkio cgroup -type blockIODevice struct { - // Major is the device's major number - Major int64 `json:"major"` - // Minor is the device's minor number - Minor int64 `json:"minor"` -} - -// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair -type WeightDevice struct { - blockIODevice - // Weight is the bandwidth rate for the device, range is from 10 to 1000 - Weight uint16 `json:"weight"` - // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only - LeafWeight uint16 `json:"leafWeight"` -} - -// NewWeightDevice returns a configured WeightDevice pointer -func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice { - wd := &WeightDevice{} - wd.Major = major - wd.Minor = minor - wd.Weight = weight - wd.LeafWeight = leafWeight - return wd -} - -// WeightString formats the struct to be writable to the cgroup specific file -func (wd *WeightDevice) WeightString() string { - return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight) -} - -// LeafWeightString formats the struct to be writable to the cgroup specific file -func (wd *WeightDevice) LeafWeightString() string { - return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight) -} - -// ThrottleDevice struct holds a `major:minor rate_per_second` pair -type ThrottleDevice struct { - blockIODevice - // Rate is the IO rate limit per cgroup per device - Rate uint64 `json:"rate"` -} - -// NewThrottleDevice returns a configured ThrottleDevice pointer -func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice { - td := &ThrottleDevice{} - td.Major = major - td.Minor = minor - td.Rate = rate - return td -} - -// String formats the struct to be writable to the cgroup specific file -func (td *ThrottleDevice) String() string { - return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go deleted file mode 100644 index f2eff91cf..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build linux freebsd - -package configs - -type FreezerState string - -const ( - Undefined FreezerState = "" - Frozen FreezerState = "FROZEN" - Thawed FreezerState = "THAWED" -) - -type Cgroup struct { - // Deprecated, use Path instead - Name string `json:"name,omitempty"` - - // name of parent of cgroup or slice - // Deprecated, use Path instead - Parent string `json:"parent,omitempty"` - - // Path specifies the path to cgroups that are created and/or joined by the container. - // The path is assumed to be relative to the host system cgroup mountpoint. - Path string `json:"path"` - - // ScopePrefix decribes prefix for the scope name - ScopePrefix string `json:"scope_prefix"` - - // Paths represent the absolute cgroups paths to join. - // This takes precedence over Path. - Paths map[string]string - - // Resources contains various cgroups settings to apply - *Resources -} - -type Resources struct { - // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. - // Deprecated - AllowAllDevices bool `json:"allow_all_devices,omitempty"` - // Deprecated - AllowedDevices []*Device `json:"allowed_devices,omitempty"` - // Deprecated - DeniedDevices []*Device `json:"denied_devices,omitempty"` - - Devices []*Device `json:"devices"` - - // Memory limit (in bytes) - Memory int64 `json:"memory"` - - // Memory reservation or soft_limit (in bytes) - MemoryReservation int64 `json:"memory_reservation"` - - // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwap int64 `json:"memory_swap"` - - // Kernel memory limit (in bytes) - KernelMemory int64 `json:"kernel_memory"` - - // Kernel memory limit for TCP use (in bytes) - KernelMemoryTCP int64 `json:"kernel_memory_tcp"` - - // CPU shares (relative weight vs. other containers) - CpuShares int64 `json:"cpu_shares"` - - // CPU hardcap limit (in usecs). Allowed cpu time in a given period. - CpuQuota int64 `json:"cpu_quota"` - - // CPU period to be used for hardcapping (in usecs). 0 to use system default. - CpuPeriod int64 `json:"cpu_period"` - - // How many time CPU will use in realtime scheduling (in usecs). - CpuRtRuntime int64 `json:"cpu_quota"` - - // CPU period to be used for realtime scheduling (in usecs). - CpuRtPeriod int64 `json:"cpu_period"` - - // CPU to use - CpusetCpus string `json:"cpuset_cpus"` - - // MEM to use - CpusetMems string `json:"cpuset_mems"` - - // Process limit; set <= `0' to disable limit. - PidsLimit int64 `json:"pids_limit"` - - // Specifies per cgroup weight, range is from 10 to 1000. - BlkioWeight uint16 `json:"blkio_weight"` - - // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only - BlkioLeafWeight uint16 `json:"blkio_leaf_weight"` - - // Weight per cgroup per device, can override BlkioWeight. - BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"` - - // IO read rate limit per cgroup per device, bytes per second. - BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"` - - // IO write rate limit per cgroup per divice, bytes per second. - BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"` - - // IO read rate limit per cgroup per device, IO per second. - BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"` - - // IO write rate limit per cgroup per device, IO per second. - BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"` - - // set the freeze value for the process - Freezer FreezerState `json:"freezer"` - - // Hugetlb limit (in bytes) - HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"` - - // Whether to disable OOM Killer - OomKillDisable bool `json:"oom_kill_disable"` - - // Tuning swappiness behaviour per cgroup - MemorySwappiness *int64 `json:"memory_swappiness"` - - // Set priority of network traffic for container - NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` - - // Set class identifier for container's network packets - NetClsClassid string `json:"net_cls_classid"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go deleted file mode 100644 index 95e2830a4..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows,!linux,!freebsd - -package configs - -type Cgroup struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go deleted file mode 100644 index d74847b0d..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package configs - -// TODO Windows: This can ultimately be entirely factored out on Windows as -// cgroups are a Unix-specific construct. -type Cgroup struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go deleted file mode 100644 index 806e0be96..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ /dev/null @@ -1,332 +0,0 @@ -package configs - -import ( - "bytes" - "encoding/json" - "fmt" - "os/exec" - "time" - - "github.com/Sirupsen/logrus" -) - -type Rlimit struct { - Type int `json:"type"` - Hard uint64 `json:"hard"` - Soft uint64 `json:"soft"` -} - -// IDMap represents UID/GID Mappings for User Namespaces. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -// Seccomp represents syscall restrictions -// By default, only the native architecture of the kernel is allowed to be used -// for syscalls. Additional architectures can be added by specifying them in -// Architectures. -type Seccomp struct { - DefaultAction Action `json:"default_action"` - Architectures []string `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Action is taken upon rule match in Seccomp -type Action int - -const ( - Kill Action = iota + 1 - Errno - Trap - Allow - Trace -) - -// Operator is a comparison operator to be used when matching syscall arguments in Seccomp -type Operator int - -const ( - EqualTo Operator = iota + 1 - NotEqualTo - GreaterThan - GreaterThanOrEqualTo - LessThan - LessThanOrEqualTo - MaskEqualTo -) - -// Arg is a rule to match a specific syscall argument in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"value_two"` - Op Operator `json:"op"` -} - -// Syscall is a rule to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} - -// TODO Windows. Many of these fields should be factored out into those parts -// which are common across platforms, and those which are platform specific. - -// Config defines configuration options for executing a process inside a contained environment. -type Config struct { - // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs - // This is a common option when the container is running in ramdisk - NoPivotRoot bool `json:"no_pivot_root"` - - // ParentDeathSignal specifies the signal that is sent to the container's process in the case - // that the parent process dies. - ParentDeathSignal int `json:"parent_death_signal"` - - // PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set. - // When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable. - // This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot. - PivotDir string `json:"pivot_dir"` - - // Path to a directory containing the container's root filesystem. - Rootfs string `json:"rootfs"` - - // Readonlyfs will remount the container's rootfs as readonly where only externally mounted - // bind mounts are writtable. - Readonlyfs bool `json:"readonlyfs"` - - // Specifies the mount propagation flags to be applied to /. - RootPropagation int `json:"rootPropagation"` - - // Mounts specify additional source and destination paths that will be mounted inside the container's - // rootfs and mount namespace if specified - Mounts []*Mount `json:"mounts"` - - // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! - Devices []*Device `json:"devices"` - - MountLabel string `json:"mount_label"` - - // Hostname optionally sets the container's hostname if provided - Hostname string `json:"hostname"` - - // Namespaces specifies the container's namespaces that it should setup when cloning the init process - // If a namespace is not provided that namespace is shared from the container's parent process - Namespaces Namespaces `json:"namespaces"` - - // Capabilities specify the capabilities to keep when executing the process inside the container - // All capbilities not specified will be dropped from the processes capability mask - Capabilities []string `json:"capabilities"` - - // Networks specifies the container's network setup to be created - Networks []*Network `json:"networks"` - - // Routes can be specified to create entries in the route table as the container is started - Routes []*Route `json:"routes"` - - // Cgroups specifies specific cgroup settings for the various subsystems that the container is - // placed into to limit the resources the container has available - Cgroups *Cgroup `json:"cgroups"` - - // AppArmorProfile specifies the profile to apply to the process running in the container and is - // change at the time the process is execed - AppArmorProfile string `json:"apparmor_profile,omitempty"` - - // ProcessLabel specifies the label to apply to the process running in the container. It is - // commonly used by selinux - ProcessLabel string `json:"process_label,omitempty"` - - // Rlimits specifies the resource limits, such as max open files, to set in the container - // If Rlimits are not set, the container will inherit rlimits from the parent process - Rlimits []Rlimit `json:"rlimits,omitempty"` - - // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores - // for a process. Valid values are between the range [-1000, '1000'], where processes with - // higher scores are preferred for being killed. - // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ - OomScoreAdj int `json:"oom_score_adj"` - - // AdditionalGroups specifies the gids that should be added to supplementary groups - // in addition to those that the user belongs to. - AdditionalGroups []string `json:"additional_groups"` - - // UidMappings is an array of User ID mappings for User Namespaces - UidMappings []IDMap `json:"uid_mappings"` - - // GidMappings is an array of Group ID mappings for User Namespaces - GidMappings []IDMap `json:"gid_mappings"` - - // MaskPaths specifies paths within the container's rootfs to mask over with a bind - // mount pointing to /dev/null as to prevent reads of the file. - MaskPaths []string `json:"mask_paths"` - - // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only - // so that these files prevent any writes. - ReadonlyPaths []string `json:"readonly_paths"` - - // Sysctl is a map of properties and their values. It is the equivalent of using - // sysctl -w my.property.name value in Linux. - Sysctl map[string]string `json:"sysctl"` - - // Seccomp allows actions to be taken whenever a syscall is made within the container. - // A number of rules are given, each having an action to be taken if a syscall matches it. - // A default action to be taken if no rules match is also given. - Seccomp *Seccomp `json:"seccomp"` - - // NoNewPrivileges controls whether processes in the container can gain additional privileges. - NoNewPrivileges bool `json:"no_new_privileges,omitempty"` - - // Hooks are a collection of actions to perform at various container lifecycle events. - // CommandHooks are serialized to JSON, but other hooks are not. - Hooks *Hooks - - // Version is the version of opencontainer specification that is supported. - Version string `json:"version"` - - // Labels are user defined metadata that is stored in the config and populated on the state - Labels []string `json:"labels"` - - // NoNewKeyring will not allocated a new session keyring for the container. It will use the - // callers keyring in this case. - NoNewKeyring bool `json:"no_new_keyring"` -} - -type Hooks struct { - // Prestart commands are executed after the container namespaces are created, - // but before the user supplied command is executed from init. - Prestart []Hook - - // Poststart commands are executed after the container init process starts. - Poststart []Hook - - // Poststop commands are executed after the container init process exits. - Poststop []Hook -} - -func (hooks *Hooks) UnmarshalJSON(b []byte) error { - var state struct { - Prestart []CommandHook - Poststart []CommandHook - Poststop []CommandHook - } - - if err := json.Unmarshal(b, &state); err != nil { - return err - } - - deserialize := func(shooks []CommandHook) (hooks []Hook) { - for _, shook := range shooks { - hooks = append(hooks, shook) - } - - return hooks - } - - hooks.Prestart = deserialize(state.Prestart) - hooks.Poststart = deserialize(state.Poststart) - hooks.Poststop = deserialize(state.Poststop) - return nil -} - -func (hooks Hooks) MarshalJSON() ([]byte, error) { - serialize := func(hooks []Hook) (serializableHooks []CommandHook) { - for _, hook := range hooks { - switch chook := hook.(type) { - case CommandHook: - serializableHooks = append(serializableHooks, chook) - default: - logrus.Warnf("cannot serialize hook of type %T, skipping", hook) - } - } - - return serializableHooks - } - - return json.Marshal(map[string]interface{}{ - "prestart": serialize(hooks.Prestart), - "poststart": serialize(hooks.Poststart), - "poststop": serialize(hooks.Poststop), - }) -} - -// HookState is the payload provided to a hook on execution. -type HookState struct { - Version string `json:"ociVersion"` - ID string `json:"id"` - Pid int `json:"pid"` - Root string `json:"root"` - BundlePath string `json:"bundlePath"` -} - -type Hook interface { - // Run executes the hook with the provided state. - Run(HookState) error -} - -// NewFunctionHook will call the provided function when the hook is run. -func NewFunctionHook(f func(HookState) error) FuncHook { - return FuncHook{ - run: f, - } -} - -type FuncHook struct { - run func(HookState) error -} - -func (f FuncHook) Run(s HookState) error { - return f.run(s) -} - -type Command struct { - Path string `json:"path"` - Args []string `json:"args"` - Env []string `json:"env"` - Dir string `json:"dir"` - Timeout *time.Duration `json:"timeout"` -} - -// NewCommandHook will execute the provided command when the hook is run. -func NewCommandHook(cmd Command) CommandHook { - return CommandHook{ - Command: cmd, - } -} - -type CommandHook struct { - Command -} - -func (c Command) Run(s HookState) error { - b, err := json.Marshal(s) - if err != nil { - return err - } - cmd := exec.Cmd{ - Path: c.Path, - Args: c.Args, - Env: c.Env, - Stdin: bytes.NewReader(b), - } - errC := make(chan error, 1) - go func() { - out, err := cmd.CombinedOutput() - if err != nil { - err = fmt.Errorf("%s: %s", err, out) - } - errC <- err - }() - if c.Timeout != nil { - select { - case err := <-errC: - return err - case <-time.After(*c.Timeout): - cmd.Process.Kill() - cmd.Wait() - return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) - } - } - return <-errC -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go deleted file mode 100644 index a60554a7b..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_unix.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build freebsd linux - -package configs - -import "fmt" - -// HostUID gets the root uid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostUID() (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if c.UidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no user mappings found.") - } - id, found := c.hostIDFromMapping(0, c.UidMappings) - if !found { - return -1, fmt.Errorf("User namespaces enabled, but no root user mapping found.") - } - return id, nil - } - // Return default root uid 0 - return 0, nil -} - -// HostGID gets the root gid for the process on host which could be non-zero -// when user namespaces are enabled. -func (c Config) HostGID() (int, error) { - if c.Namespaces.Contains(NEWUSER) { - if c.GidMappings == nil { - return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.") - } - id, found := c.hostIDFromMapping(0, c.GidMappings) - if !found { - return -1, fmt.Errorf("User namespaces enabled, but no root group mapping found.") - } - return id, nil - } - // Return default root gid 0 - return 0, nil -} - -// Utility function that gets a host ID for a container ID from user namespace map -// if that ID is present in the map. -func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) { - for _, m := range uMap { - if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (containerID - m.ContainerID) - return hostID, true - } - } - return -1, false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go deleted file mode 100644 index 8701bb212..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go +++ /dev/null @@ -1,57 +0,0 @@ -package configs - -import ( - "fmt" - "os" -) - -const ( - Wildcard = -1 -) - -// TODO Windows: This can be factored out in the future - -type Device struct { - // Device type, block, char, etc. - Type rune `json:"type"` - - // Path to the device. - Path string `json:"path"` - - // Major is the device's major number. - Major int64 `json:"major"` - - // Minor is the device's minor number. - Minor int64 `json:"minor"` - - // Cgroup permissions format, rwm. - Permissions string `json:"permissions"` - - // FileMode permission bits for the device. - FileMode os.FileMode `json:"file_mode"` - - // Uid of the device. - Uid uint32 `json:"uid"` - - // Gid of the device. - Gid uint32 `json:"gid"` - - // Write the file to the allowed list - Allow bool `json:"allow"` -} - -func (d *Device) CgroupString() string { - return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions) -} - -func (d *Device) Mkdev() int { - return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12)) -} - -// deviceNumberString converts the device number to a string return result. -func deviceNumberString(number int64) string { - if number == Wildcard { - return "*" - } - return fmt.Sprint(number) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go deleted file mode 100644 index ba1f437f3..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go +++ /dev/null @@ -1,125 +0,0 @@ -// +build linux freebsd - -package configs - -var ( - // DefaultSimpleDevices are devices that are to be both allowed and created. - DefaultSimpleDevices = []*Device{ - // /dev/null and zero - { - Path: "/dev/null", - Type: 'c', - Major: 1, - Minor: 3, - Permissions: "rwm", - FileMode: 0666, - }, - { - Path: "/dev/zero", - Type: 'c', - Major: 1, - Minor: 5, - Permissions: "rwm", - FileMode: 0666, - }, - - { - Path: "/dev/full", - Type: 'c', - Major: 1, - Minor: 7, - Permissions: "rwm", - FileMode: 0666, - }, - - // consoles and ttys - { - Path: "/dev/tty", - Type: 'c', - Major: 5, - Minor: 0, - Permissions: "rwm", - FileMode: 0666, - }, - - // /dev/urandom,/dev/random - { - Path: "/dev/urandom", - Type: 'c', - Major: 1, - Minor: 9, - Permissions: "rwm", - FileMode: 0666, - }, - { - Path: "/dev/random", - Type: 'c', - Major: 1, - Minor: 8, - Permissions: "rwm", - FileMode: 0666, - }, - } - DefaultAllowedDevices = append([]*Device{ - // allow mknod for any device - { - Type: 'c', - Major: Wildcard, - Minor: Wildcard, - Permissions: "m", - }, - { - Type: 'b', - Major: Wildcard, - Minor: Wildcard, - Permissions: "m", - }, - - { - Path: "/dev/console", - Type: 'c', - Major: 5, - Minor: 1, - Permissions: "rwm", - }, - // /dev/pts/ - pts namespaces are "coming soon" - { - Path: "", - Type: 'c', - Major: 136, - Minor: Wildcard, - Permissions: "rwm", - }, - { - Path: "", - Type: 'c', - Major: 5, - Minor: 2, - Permissions: "rwm", - }, - - // tuntap - { - Path: "", - Type: 'c', - Major: 10, - Minor: 200, - Permissions: "rwm", - }, - }, DefaultSimpleDevices...) - DefaultAutoCreatedDevices = append([]*Device{ - { - // /dev/fuse is created but not allowed. - // This is to allow java to work. Because java - // Insists on there being a /dev/fuse - // https://github.com/docker/docker/issues/514 - // https://github.com/docker/docker/issues/2393 - // - Path: "/dev/fuse", - Type: 'c', - Major: 10, - Minor: 229, - Permissions: "rwm", - }, - }, DefaultSimpleDevices...) -) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go deleted file mode 100644 index d30216380..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go +++ /dev/null @@ -1,9 +0,0 @@ -package configs - -type HugepageLimit struct { - // which type of hugepage to limit. - Pagesize string `json:"page_size"` - - // usage limit for hugepage. - Limit uint64 `json:"limit"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go deleted file mode 100644 index 9a0395eaf..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go +++ /dev/null @@ -1,14 +0,0 @@ -package configs - -import ( - "fmt" -) - -type IfPrioMap struct { - Interface string `json:"interface"` - Priority int64 `json:"priority"` -} - -func (i *IfPrioMap) CgroupString() string { - return fmt.Sprintf("%s %d", i.Interface, i.Priority) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go deleted file mode 100644 index cc770c916..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go +++ /dev/null @@ -1,30 +0,0 @@ -package configs - -type Mount struct { - // Source path for the mount. - Source string `json:"source"` - - // Destination path for the mount inside the container. - Destination string `json:"destination"` - - // Device the mount is for. - Device string `json:"device"` - - // Mount flags. - Flags int `json:"flags"` - - // Propagation Flags - PropagationFlags []int `json:"propagation_flags"` - - // Mount data applied to the mount. - Data string `json:"data"` - - // Relabel source if set, "z" indicates shared, "Z" indicates unshared. - Relabel string `json:"relabel"` - - // Optional Command to be run before Source is mounted. - PremountCmds []Command `json:"premount_cmds"` - - // Optional Command to be run after Source is mounted. - PostmountCmds []Command `json:"postmount_cmds"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go deleted file mode 100644 index a3329a31a..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go +++ /dev/null @@ -1,5 +0,0 @@ -package configs - -type NamespaceType string - -type Namespaces []Namespace diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go deleted file mode 100644 index fb4b85222..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build linux - -package configs - -import "syscall" - -func (n *Namespace) Syscall() int { - return namespaceInfo[n.Type] -} - -var namespaceInfo = map[NamespaceType]int{ - NEWNET: syscall.CLONE_NEWNET, - NEWNS: syscall.CLONE_NEWNS, - NEWUSER: syscall.CLONE_NEWUSER, - NEWIPC: syscall.CLONE_NEWIPC, - NEWUTS: syscall.CLONE_NEWUTS, - NEWPID: syscall.CLONE_NEWPID, -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - var flag int - for _, v := range *n { - if v.Path != "" { - continue - } - flag |= namespaceInfo[v.Type] - } - return uintptr(flag) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go deleted file mode 100644 index 0547223a9..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux,!windows - -package configs - -func (n *Namespace) Syscall() int { - panic("No namespace syscall support") - return 0 -} - -// CloneFlags parses the container's Namespaces options to set the correct -// flags on clone, unshare. This function returns flags only for new namespaces. -func (n *Namespaces) CloneFlags() uintptr { - panic("No namespace syscall support") - return uintptr(0) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go deleted file mode 100644 index b9c820d06..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go +++ /dev/null @@ -1,127 +0,0 @@ -// +build linux freebsd - -package configs - -import ( - "fmt" - "os" - "sync" -) - -const ( - NEWNET NamespaceType = "NEWNET" - NEWPID NamespaceType = "NEWPID" - NEWNS NamespaceType = "NEWNS" - NEWUTS NamespaceType = "NEWUTS" - NEWIPC NamespaceType = "NEWIPC" - NEWUSER NamespaceType = "NEWUSER" -) - -var ( - nsLock sync.Mutex - supportedNamespaces = make(map[NamespaceType]bool) -) - -// nsToFile converts the namespace type to its filename -func nsToFile(ns NamespaceType) string { - switch ns { - case NEWNET: - return "net" - case NEWNS: - return "mnt" - case NEWPID: - return "pid" - case NEWIPC: - return "ipc" - case NEWUSER: - return "user" - case NEWUTS: - return "uts" - } - return "" -} - -// IsNamespaceSupported returns whether a namespace is available or -// not -func IsNamespaceSupported(ns NamespaceType) bool { - nsLock.Lock() - defer nsLock.Unlock() - supported, ok := supportedNamespaces[ns] - if ok { - return supported - } - nsFile := nsToFile(ns) - // if the namespace type is unknown, just return false - if nsFile == "" { - return false - } - _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile)) - // a namespace is supported if it exists and we have permissions to read it - supported = err == nil - supportedNamespaces[ns] = supported - return supported -} - -func NamespaceTypes() []NamespaceType { - return []NamespaceType{ - NEWNET, - NEWPID, - NEWNS, - NEWUTS, - NEWIPC, - NEWUSER, - } -} - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct { - Type NamespaceType `json:"type"` - Path string `json:"path"` -} - -func (n *Namespace) GetPath(pid int) string { - if n.Path != "" { - return n.Path - } - return fmt.Sprintf("/proc/%d/ns/%s", pid, nsToFile(n.Type)) -} - -func (n *Namespaces) Remove(t NamespaceType) bool { - i := n.index(t) - if i == -1 { - return false - } - *n = append((*n)[:i], (*n)[i+1:]...) - return true -} - -func (n *Namespaces) Add(t NamespaceType, path string) { - i := n.index(t) - if i == -1 { - *n = append(*n, Namespace{Type: t, Path: path}) - return - } - (*n)[i].Path = path -} - -func (n *Namespaces) index(t NamespaceType) int { - for i, ns := range *n { - if ns.Type == t { - return i - } - } - return -1 -} - -func (n *Namespaces) Contains(t NamespaceType) bool { - return n.index(t) != -1 -} - -func (n *Namespaces) PathOf(t NamespaceType) string { - i := n.index(t) - if i == -1 { - return "" - } - return (*n)[i].Path -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go deleted file mode 100644 index 9a74033ce..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!freebsd - -package configs - -// Namespace defines configuration for each namespace. It specifies an -// alternate path that is able to be joined via setns. -type Namespace struct { -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go deleted file mode 100644 index ccdb228e1..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go +++ /dev/null @@ -1,72 +0,0 @@ -package configs - -// Network defines configuration for a container's networking stack -// -// The network configuration can be omitted from a container causing the -// container to be setup with the host's networking stack -type Network struct { - // Type sets the networks type, commonly veth and loopback - Type string `json:"type"` - - // Name of the network interface - Name string `json:"name"` - - // The bridge to use. - Bridge string `json:"bridge"` - - // MacAddress contains the MAC address to set on the network interface - MacAddress string `json:"mac_address"` - - // Address contains the IPv4 and mask to set on the network interface - Address string `json:"address"` - - // Gateway sets the gateway address that is used as the default for the interface - Gateway string `json:"gateway"` - - // IPv6Address contains the IPv6 and mask to set on the network interface - IPv6Address string `json:"ipv6_address"` - - // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface - IPv6Gateway string `json:"ipv6_gateway"` - - // Mtu sets the mtu value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - Mtu int `json:"mtu"` - - // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and - // container's interfaces if a pair is created, specifically in the case of type veth - // Note: This does not apply to loopback interfaces. - TxQueueLen int `json:"txqueuelen"` - - // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the - // container. - HostInterfaceName string `json:"host_interface_name"` - - // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - // bridge port in the case of type veth - // Note: This is unsupported on some systems. - // Note: This does not apply to loopback interfaces. - HairpinMode bool `json:"hairpin_mode"` -} - -// Routes can be specified to create entries in the route table as the container is started -// -// All of destination, source, and gateway should be either IPv4 or IPv6. -// One of the three options must be present, and omitted entries will use their -// IP family default for the route table. For IPv4 for example, setting the -// gateway to 1.2.3.4 and the interface to eth0 will set up a standard -// destination of 0.0.0.0(or *) when viewed in the route table. -type Route struct { - // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 - Destination string `json:"destination"` - - // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 - Source string `json:"source"` - - // Sets the gateway. Accepts IPv4 and IPv6 - Gateway string `json:"gateway"` - - // The device to set this route up for, for example: eth0 - InterfaceName string `json:"interface_name"` -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go deleted file mode 100644 index 1afc52b4b..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build linux - -package system - -import ( - "bufio" - "fmt" - "os" - "os/exec" - "syscall" - "unsafe" -) - -// If arg2 is nonzero, set the "child subreaper" attribute of the -// calling process; if arg2 is zero, unset the attribute. When a -// process is marked as a child subreaper, all of the children -// that it creates, and their descendants, will be marked as -// having a subreaper. In effect, a subreaper fulfills the role -// of init(1) for its descendant processes. Upon termination of -// a process that is orphaned (i.e., its immediate parent has -// already terminated) and marked as having a subreaper, the -// nearest still living ancestor subreaper will receive a SIGCHLD -// signal and be able to wait(2) on the process to discover its -// termination status. -const PR_SET_CHILD_SUBREAPER = 36 - -type ParentDeathSignal int - -func (p ParentDeathSignal) Restore() error { - if p == 0 { - return nil - } - current, err := GetParentDeathSignal() - if err != nil { - return err - } - if p == current { - return nil - } - return p.Set() -} - -func (p ParentDeathSignal) Set() error { - return SetParentDeathSignal(uintptr(p)) -} - -func Execv(cmd string, args []string, env []string) error { - name, err := exec.LookPath(cmd) - if err != nil { - return err - } - - return syscall.Exec(name, args, env) -} - -func Prlimit(pid, resource int, limit syscall.Rlimit) error { - _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) - if err != 0 { - return err - } - return nil -} - -func SetParentDeathSignal(sig uintptr) error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { - return err - } - return nil -} - -func GetParentDeathSignal() (ParentDeathSignal, error) { - var sig int - _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) - if err != 0 { - return -1, err - } - return ParentDeathSignal(sig), nil -} - -func SetKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { - return err - } - - return nil -} - -func ClearKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { - return err - } - - return nil -} - -func Setctty() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { - return err - } - return nil -} - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return false - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return false - } - return true -} - -// SetSubreaper sets the value i as the subreaper setting for the calling process -func SetSubreaper(i int) error { - return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) -} - -func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go deleted file mode 100644 index 37808a29f..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -// look in /proc to find the process start time so that we can verify -// that this pid has started after ourself -func GetProcessStartTime(pid int) (string, error) { - data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) - if err != nil { - return "", err - } - - parts := strings.Split(string(data), " ") - // the starttime is located at pos 22 - // from the man page - // - // starttime %llu (was %lu before Linux 2.6) - // (22) The time the process started after system boot. In kernels before Linux 2.6, this - // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks - // (divide by sysconf(_SC_CLK_TCK)). - return parts[22-1], nil // starts at 1 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go deleted file mode 100644 index 615ff4c82..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "syscall" -) - -// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 -// -// We need different setns values for the different platforms and arch -// We are declaring the macro here because the SETNS syscall does not exist in th stdlib -var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/arm64": 268, - "linux/amd64": 308, - "linux/arm": 375, - "linux/ppc": 350, - "linux/ppc64": 350, - "linux/ppc64le": 350, - "linux/s390x": 339, -} - -var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - -func SysSetns() uint32 { - return uint32(sysSetns) -} - -func Setns(fd uintptr, flags uintptr) error { - ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - if !exists { - return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - } - _, _, err := syscall.RawSyscall(ns, fd, flags, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index c99006518..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go deleted file mode 100644 index 0816bf828..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go deleted file mode 100644 index 3f780f312..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go deleted file mode 100644 index b3a07cba3..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build cgo,linux cgo,freebsd - -package system - -/* -#include -*/ -import "C" - -func GetClockTicks() int { - return int(C.sysconf(C._SC_CLK_TCK)) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go deleted file mode 100644 index d93b5d5fd..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !cgo windows - -package system - -func GetClockTicks() int { - // TODO figure out a better alternative for platforms where we're missing cgo - // - // TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency(). - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx - // - // An example of its usage can be found here. - // https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx - - return 100 -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go deleted file mode 100644 index e7cfd62b2..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package system - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go deleted file mode 100644 index 30f74dfb1..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ /dev/null @@ -1,99 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var _zero uintptr - -// Returns the size of xattrs and nil error -// Requires path, takes allocated []byte or nil as last argument -func Llistxattr(path string, dest []byte) (size int, err error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return -1, err - } - var newpathBytes unsafe.Pointer - if len(dest) > 0 { - newpathBytes = unsafe.Pointer(&dest[0]) - } else { - newpathBytes = unsafe.Pointer(&_zero) - } - - _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) - size = int(_size) - if errno != 0 { - return -1, errno - } - - return size, nil -} - -// Returns a []byte slice if the xattr is set and nil otherwise -// Requires path and its attribute as arguments -func Lgetxattr(path string, attr string) ([]byte, error) { - var sz int - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - // Start with a 128 length byte array - sz = 128 - dest := make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - - switch { - case errno == syscall.ENODATA: - return nil, errno - case errno == syscall.ENOTSUP: - return nil, errno - case errno == syscall.ERANGE: - // 128 byte array might just not be good enough, - // A dummy buffer is used ``uintptr(0)`` to get real size - // of the xattrs on disk - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) - sz = int(_sz) - if sz < 0 { - return nil, errno - } - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno != 0 { - return nil, errno - } - case errno != 0: - return nil, errno - } - sz = int(_sz) - return dest[:sz], nil -} - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go deleted file mode 100644 index 3466bfcea..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils.go +++ /dev/null @@ -1,121 +0,0 @@ -package utils - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "io" - "os" - "path/filepath" - "strings" - "syscall" -) - -const ( - exitSignalOffset = 128 -) - -// GenerateRandomName returns a new name joined with a prefix. This size -// specified is used to truncate the randomly generated value -func GenerateRandomName(prefix string, size int) (string, error) { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - return "", err - } - if size > 64 { - size = 64 - } - return prefix + hex.EncodeToString(id)[:size], nil -} - -// ResolveRootfs ensures that the current working directory is -// not a symlink and returns the absolute path to the rootfs -func ResolveRootfs(uncleanRootfs string) (string, error) { - rootfs, err := filepath.Abs(uncleanRootfs) - if err != nil { - return "", err - } - return filepath.EvalSymlinks(rootfs) -} - -// ExitStatus returns the correct exit status for a process based on if it -// was signaled or exited cleanly -func ExitStatus(status syscall.WaitStatus) int { - if status.Signaled() { - return exitSignalOffset + int(status.Signal()) - } - return status.ExitStatus() -} - -// WriteJSON writes the provided struct v to w using standard json marshaling -func WriteJSON(w io.Writer, v interface{}) error { - data, err := json.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(data) - return err -} - -// CleanPath makes a path safe for use with filepath.Join. This is done by not -// only cleaning the path, but also (if the path is relative) adding a leading -// '/' and cleaning it (then removing the leading '/'). This ensures that a -// path resulting from prepending another path will always resolve to lexically -// be a subdirectory of the prefixed path. This is all done lexically, so paths -// that include symlinks won't be safe as a result of using CleanPath. -func CleanPath(path string) string { - // Deal with empty strings nicely. - if path == "" { - return "" - } - - // Ensure that all paths are cleaned (especially problematic ones like - // "/../../../../../" which can cause lots of issues). - path = filepath.Clean(path) - - // If the path isn't absolute, we need to do more processing to fix paths - // such as "../../../..//some/path". We also shouldn't convert absolute - // paths to relative ones. - if !filepath.IsAbs(path) { - path = filepath.Clean(string(os.PathSeparator) + path) - // This can't fail, as (by definition) all paths are relative to root. - path, _ = filepath.Rel(string(os.PathSeparator), path) - } - - // Clean the path again for good measure. - return filepath.Clean(path) -} - -// SearchLabels searches a list of key-value pairs for the provided key and -// returns the corresponding value. The pairs must be separated with '='. -func SearchLabels(labels []string, query string) string { - for _, l := range labels { - parts := strings.SplitN(l, "=", 2) - if len(parts) < 2 { - continue - } - if parts[0] == query { - return parts[1] - } - } - return "" -} - -// Annotations returns the bundle path and user defined annotations from the -// libcontianer state. We need to remove the bundle because that is a label -// added by libcontainer. -func Annotations(labels []string) (bundle string, userAnnotations map[string]string) { - userAnnotations = make(map[string]string) - for _, l := range labels { - parts := strings.SplitN(l, "=", 2) - if len(parts) < 2 { - continue - } - if parts[0] == "bundle" { - bundle = parts[1] - } else { - userAnnotations[parts[0]] = parts[1] - } - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go deleted file mode 100644 index 408918f27..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build !windows - -package utils - -import ( - "io/ioutil" - "strconv" - "syscall" -) - -func CloseExecFrom(minFd int) error { - fdList, err := ioutil.ReadDir("/proc/self/fd") - if err != nil { - return err - } - for _, fi := range fdList { - fd, err := strconv.Atoi(fi.Name()) - if err != nil { - // ignore non-numeric file names - continue - } - - if fd < minFd { - // ignore descriptors lower than our specified minimum - continue - } - - // intentionally ignore errors from syscall.CloseOnExec - syscall.CloseOnExec(fd) - // the cases where this might fail are basically file descriptors that have already been closed (including and especially the one that was created when ioutil.ReadDir did the "opendir" syscall) - } - return nil -} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml deleted file mode 100644 index a6a98db8a..000000000 --- a/vendor/github.com/pborman/uuid/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - release - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09f1..000000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS deleted file mode 100644 index b382a04ed..000000000 --- a/vendor/github.com/pborman/uuid/CONTRIBUTORS +++ /dev/null @@ -1 +0,0 @@ -Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE deleted file mode 100644 index 5dc68268d..000000000 --- a/vendor/github.com/pborman/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md deleted file mode 100644 index f023d47ca..000000000 --- a/vendor/github.com/pborman/uuid/README.md +++ /dev/null @@ -1,13 +0,0 @@ -This project was automatically exported from code.google.com/p/go-uuid - -# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on [RFC 412](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. - -###### Install -`go get github.com/pborman/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) - -Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go deleted file mode 100755 index 50a0f2d09..000000000 --- a/vendor/github.com/pborman/uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go deleted file mode 100755 index d8bd013e6..000000000 --- a/vendor/github.com/pborman/uuid/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. -package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go deleted file mode 100644 index a0420c1ef..000000000 --- a/vendor/github.com/pborman/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/pborman/uuid/json.go b/vendor/github.com/pborman/uuid/json.go deleted file mode 100644 index 9dda1dfba..000000000 --- a/vendor/github.com/pborman/uuid/json.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "errors" - -func (u UUID) MarshalJSON() ([]byte, error) { - if len(u) != 16 { - return []byte(`""`), nil - } - var js [38]byte - js[0] = '"' - encodeHex(js[1:], u) - js[37] = '"' - return js[:], nil -} - -func (u *UUID) UnmarshalJSON(data []byte) error { - if string(data) == `""` { - return nil - } - if data[0] != '"' { - return errors.New("invalid UUID format") - } - data = data[1 : len(data)-1] - uu := Parse(string(data)) - if uu == nil { - return errors.New("invalid UUID format") - } - *u = uu - return nil -} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go deleted file mode 100755 index 42d60da8f..000000000 --- a/vendor/github.com/pborman/uuid/node.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "net" - "sync" -) - -var ( - nodeMu sync.Mutex - interfaces []net.Interface // cached list of interfaces - ifname string // name of interface being used - nodeID []byte // hardware for version 1 UUIDs -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil && name != "" { - return false - } - } - - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - if setNodeID(ifs.HardwareAddr) { - ifname = ifs.Name - return true - } - } - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - if nodeID == nil { - nodeID = make([]byte, 6) - } - randomBits(nodeID) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == nil { - setNodeInterface("") - } - nid := make([]byte, 6) - copy(nid, nodeID) - return nid -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - if setNodeID(id) { - ifname = "user" - return true - } - return false -} - -func setNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - if nodeID == nil { - nodeID = make([]byte, 6) - } - copy(nodeID, id) - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go deleted file mode 100644 index d015bfd13..000000000 --- a/vendor/github.com/pborman/uuid/sql.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "errors" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src.(type) { - case string: - // if an empty UUID comes from a table, we return a null UUID - if src.(string) == "" { - return nil - } - - // see uuid.Parse for required string format - parsed := Parse(src.(string)) - - if parsed == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = parsed - case []byte: - b := src.([]byte) - - // if an empty UUID comes from a table, we return a null UUID - if len(b) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(b) == 16 { - *uuid = UUID(b) - } else { - u := Parse(string(b)) - - if u == nil { - return errors.New("Scan: invalid UUID format") - } - - *uuid = u - } - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go deleted file mode 100755 index eedf24219..000000000 --- a/vendor/github.com/pborman/uuid/time.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clock_seq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clock_seq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clock_seq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clock_seq == 0 { - setClockSequence(-1) - } - return int(clock_seq & 0x3fff) -} - -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - old_seq := clock_seq - clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if old_seq != clock_seq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go deleted file mode 100644 index fc8e052c7..000000000 --- a/vendor/github.com/pborman/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go deleted file mode 100644 index 82c9e7ee7..000000000 --- a/vendor/github.com/pborman/uuid/uuid.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "fmt" - "io" - "strings" -) - -// Array is a pass-by-value UUID that can be used as an effecient key in a map. -type Array [16]byte - -// UUID converts uuid into a slice. -func (uuid Array) UUID() UUID { - return uuid[:] -} - -// String returns the string representation of uuid, -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (uuid Array) String() string { - return uuid.UUID().String() -} - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version byte - -// A Variant represents a UUIDs variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. -func Parse(s string) UUID { - if len(s) == 36+9 { - if strings.ToLower(s[:9]) != "urn:uuid:" { - return nil - } - s = s[9:] - } else if len(s) != 36 { - return nil - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return nil - } - var uuid [16]byte - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - if v, ok := xtob(s[x:]); !ok { - return nil - } else { - uuid[i] = v - } - } - return uuid[:] -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// Array returns an array representation of uuid that can be used as a map key. -// Array panics if uuid is not valid. -func (uuid UUID) Array() Array { - if len(uuid) != 16 { - panic("invalid uuid") - } - var a Array - copy(a[:], uuid) - return a -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if len(uuid) != 16 { - return "" - } - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if len(uuid) != 16 { - return "" - } - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst[:], uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implents io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go deleted file mode 100644 index 0127eacfa..000000000 --- a/vendor/github.com/pborman/uuid/version1.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - if nodeID == nil { - SetNodeInterface("") - } - - now, seq, err := GetTime() - if err != nil { - return nil - } - - uuid := make([]byte, 16) - - time_low := uint32(now & 0xffffffff) - time_mid := uint16((now >> 32) & 0xffff) - time_hi := uint16((now >> 48) & 0x0fff) - time_hi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], time_low) - binary.BigEndian.PutUint16(uuid[4:], time_mid) - binary.BigEndian.PutUint16(uuid[6:], time_hi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID) - - return uuid -} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go deleted file mode 100644 index b3d4a368d..000000000 --- a/vendor/github.com/pborman/uuid/version4.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// Random returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - uuid := make([]byte, 16) - randomBits([]byte(uuid)) - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index 37e4a7d41..000000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,28 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -goautoneg -http://bitbucket.org/ww/goautoneg -Copyright 2011, Open Knowledge Foundation Ltd. -See README.txt for license details. - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 81032bed8..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Overview -This is the [Prometheus](http://www.prometheus.io) telemetric -instrumentation client [Go](http://golang.org) client library. It -enable authors to define process-space metrics for their servers and -expose them through a web service interface for extraction, -aggregation, and a whole slew of other post processing techniques. - -# Installing - $ go get github.com/prometheus/client_golang/prometheus - -# Example -```go -package main - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - indexed = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "my_company", - Subsystem: "indexer", - Name: "documents_indexed", - Help: "The number of documents indexed.", - }) - size = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "my_company", - Subsystem: "storage", - Name: "documents_total_size_bytes", - Help: "The total size of all documents in the storage.", - }) -) - -func main() { - http.Handle("/metrics", prometheus.Handler()) - - indexed.Inc() - size.Set(5) - - http.ListenAndServe(":8080", nil) -} - -func init() { - prometheus.MustRegister(indexed) - prometheus.MustRegister(size) -} -``` - -# Documentation - -[![GoDoc](https://godoc.org/github.com/prometheus/client_golang?status.png)](https://godoc.org/github.com/prometheus/client_golang) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index c04688009..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Register, MustRegister, RegisterOrGet, and MustRegisterOrGet. -// -// The stock metrics provided by this package (like Gauge, Counter, Summary) are -// also Collectors (which only ever collect one metric, namely itself). An -// implementer of Collector may, however, collect multiple metrics in a -// coordinated fashion and/or create metrics on the fly. Examples for collectors -// already implemented in this library are the metric vectors (i.e. collection -// of multiple instances of the same Metric but with different label values) -// like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. (It is valid if one and the same Collector sends - // duplicate descriptors. Those duplicates are simply ignored. However, - // two different Collectors must not send duplicate descriptors.) This - // method idempotently sends the same descriptors throughout the - // lifetime of the Collector. If a Collector encounters an error while - // executing this method, it must send an invalid descriptor (created - // with NewInvalidDesc) to signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by Prometheus when collecting metrics. The - // implementation sends each collected metric via the provided channel - // and returns once the last metric has been sent. The descriptor of - // each sent metric is one of those returned by Describe. Returned - // metrics that share the same descriptor must differ in their variable - // label values. This method may be called concurrently and must - // therefore be implemented in a concurrency safe way. Blocking occurs - // at the expense of total performance of rendering all registered - // metrics. Ideally, Collector implementations support concurrent - // readers. - Collect(chan<- Metric) -} - -// SelfCollector implements Collector for a single Metric so that that the -// Metric collects itself. Add it as an anonymous field to a struct that -// implements Metric, and call Init with the Metric itself as an argument. -type SelfCollector struct { - self Metric -} - -// Init provides the SelfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *SelfCollector) Init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *SelfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *SelfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index d2a564b53..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Set is used to set the Counter to an arbitrary value. It is only used - // if you have to transfer a value from an external counter into this - // Prometheus metric. Do not use it for regular handling of a - // Prometheus counter (as it can be used to break the contract of - // monotonically increasing values). - Set(float64) - // Inc increments the counter by 1. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} - result.Init(result) // Init self-collection. - return result -} - -type counter struct { - value -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - c.value.Add(v) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -// -// CounterVec embeds MetricVec. See there for a full list of methods with -// detailed documentation. -type CounterVec struct { - MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - newMetric: func(lvs ...string) Metric { - result := &counter{value: value{ - desc: desc, - valType: CounterValue, - labelPairs: makeLabelPairs(desc, lvs), - }} - result.Init(result) // Init self-collection. - return result - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Counter and not a -// Metric so that no type conversion is required. -func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Counter and not a Metric so that no -// type conversion is required. -func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *CounterVec) WithLabelValues(lvs ...string) Counter { - return m.MetricVec.WithLabelValues(lvs...).(Counter) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *CounterVec) With(labels Labels) Counter { - return m.MetricVec.With(labels).(Counter) -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index ee02d9b8e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,192 +0,0 @@ -package prometheus - -import ( - "errors" - "fmt" - "regexp" - "sort" - "strings" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -var ( - metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) - labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") -) - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occured during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName and help must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Opts documentation for the implications of -// constant labels. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if help == "" { - d.err = errors.New("empty help string") - return d - } - if !metricNameRE.MatchString(fqName) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name", labelName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - vh := hashNew() - for _, val := range labelValues { - vh = hashAdd(vh, val) - vh = hashAddByte(vh, separatorByte) - } - d.id = vh - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - lh := hashNew() - lh = hashAdd(lh, help) - lh = hashAddByte(lh, separatorByte) - for _, labelName := range labelNames { - lh = hashAdd(lh, labelName) - lh = hashAddByte(lh, separatorByte) - } - d.dimHash = lh - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(LabelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} - -func checkLabelName(l string) bool { - return labelNameRE.MatchString(l) && - !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index ca56f5ede..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus provides embeddable metric primitives for servers and -// standardized exposition of telemetry through a web services interface. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// To expose metrics registered with the Prometheus registry, an HTTP server -// needs to know about the Prometheus handler. The usual endpoint is "/metrics". -// -// http.Handle("/metrics", prometheus.Handler()) -// -// As a starting point a very basic usage example: -// -// package main -// -// import ( -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounter(prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }) -// ) -// -// func init() { -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.Inc() -// -// http.Handle("/metrics", prometheus.Handler()) -// http.ListenAndServe(":8080", nil) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter. -// It also exports some stats about the HTTP usage of the /metrics -// endpoint. (See the Handler function for more detail.) -// -// Two more advanced metric types are the Summary and Histogram. A more -// thorough description of metric types can be found in the prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// Those are all the parts needed for basic usage. Detailed documentation and -// examples are provided below. -// -// Everything else this package offers is essentially for "power users" only. A -// few pointers to "power user features": -// -// All the various ...Opts structs have a ConstLabels field for labels that -// never change their value (which is only useful under special circumstances, -// see documentation of the Opts type). -// -// The Untyped metric behaves like a Gauge, but signals the Prometheus server -// not to assume anything about its type. -// -// Functions to fine-tune how the metric registry works: EnableCollectChecks, -// PanicOnCollectError, Register, Unregister, SetMetricFamilyInjectionHook. -// -// For custom metric collection, there are two entry points: Custom Metric -// implementations and custom Collector implementations. A Metric is the -// fundamental unit in the Prometheus data model: a sample at a point in time -// together with its meta-data (like its fully-qualified name and any number of -// pairs of label name and label value) that knows how to marshal itself into a -// data transfer object (aka DTO, implemented as a protocol buffer). A Collector -// gets registered with the Prometheus registry and manages the collection of -// one or more Metrics. Many parts of this package are building blocks for -// Metrics and Collectors. Desc is the metric descriptor, actually used by all -// metrics under the hood, and by Collectors to describe the Metrics to be -// collected, but only to be dealt with by users if they implement their own -// Metrics or Collectors. To create a Desc, the BuildFQName function will come -// in handy. Other useful components for Metric and Collector implementation -// include: LabelPairSorter to sort the DTO version of label pairs, -// NewConstMetric and MustNewConstMetric to create "throw away" Metrics at -// collection time, MetricVec to bundle custom Metrics into a metric vector -// Collector, SelfCollector to make a custom Metric collect itself. -// -// A good example for a custom Collector is the ExpVarCollector included in this -// package, which exports variables exported via the "expvar" package as -// Prometheus metrics. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar.go deleted file mode 100644 index 0f7630d53..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -// ExpvarCollector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the ExpvarCollector is inherently -// slow. Thus, the ExpvarCollector is probably great for experiments and -// prototying, but you should seriously consider a more direct implementation of -// Prometheus metrics for monitoring production systems. -// -// Use NewExpvarCollector to create new instances. -type ExpvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector returns a newly allocated ExpvarCollector that still has -// to be registered with the Prometheus registry. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) *ExpvarCollector { - return &ExpvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *ExpvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *ExpvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index e3b67df8a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,29 +0,0 @@ -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index 390c0746f..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. - Inc() - // Dec decrements the Gauge by 1. - Dec() - // Add adds the given value to the Gauge. (The value can be - // negative, resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -func NewGauge(opts GaugeOpts) Gauge { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, 0) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - newMetric: func(lvs ...string) Metric { - return newValue(desc, GaugeValue, 0, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Gauge and not a -// Metric so that no type conversion is required. -func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Gauge and not a Metric so that no -// type conversion is required. -func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { - return m.MetricVec.WithLabelValues(lvs...).(Gauge) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *GaugeVec) With(labels Labels) Gauge { - return m.MetricVec.With(labels).(Gauge) -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. If that results in concurrent calls to Write, like in the case -// where a GaugeFunc is directly registered with Prometheus, the provided -// function must be concurrency-safe. -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index b0d4fb95c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,263 +0,0 @@ -package prometheus - -import ( - "fmt" - "runtime" - "runtime/debug" - "time" -) - -type goCollector struct { - goroutines Gauge - gcDesc *Desc - - // metrics to describe and collect - metrics memStatsMetrics -} - -// NewGoCollector returns a collector which exports metrics about the current -// go process. -func NewGoCollector() *goCollector { - return &goCollector{ - goroutines: NewGauge(GaugeOpts{ - Namespace: "go", - Name: "goroutines", - Help: "Number of goroutines that currently exist.", - }), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the GC invocation durations.", - nil, nil), - metrics: memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained by system. Sum of all system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes_total"), - "Total number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("last_gc_time_seconds"), - "Number of seconds since 1970 of last garbage collection.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, - }, - } -} - -func memstatNamespace(s string) string { - return fmt.Sprintf("go_memstats_%s", s) -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutines.Desc() - ch <- c.gcDesc - - for _, i := range c.metrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - c.goroutines.Set(float64(runtime.NumGoroutine())) - ch <- c.goroutines - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) - - ms := &runtime.MemStats{} - runtime.ReadMemStats(ms) - for _, i := range c.metrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} - -// memStatsMetrics provide description, value, and value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index 7a6891089..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync/atomic" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -var ( - // DefBuckets are the default Histogram buckets. The default buckets are - // tailored to broadly measure the response time (in seconds) of a - // network service. Most likely, however, you will be required to define - // buckets customized to your use case. - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Histogram. Histograms with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // HistogramVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Histograms with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make counts. - h.counts = make([]uint64, len(h.upperBounds)) - - h.Init(h) // Init self-collection. - return h -} - -type histogram struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - - SelfCollector - // Note that there is no mutex required. - - desc *Desc - - upperBounds []float64 - counts []uint64 - - labelPairs []*dto.LabelPair -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - i := sort.SearchFloat64s(h.upperBounds, v) - if i < len(h.counts) { - atomic.AddUint64(&h.counts[i], 1) - } - atomic.AddUint64(&h.count, 1) - for { - oldBits := atomic.LoadUint64(&h.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { - break - } - } -} - -func (h *histogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, len(h.upperBounds)) - - his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) - his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) - var count uint64 - for i, upperBound := range h.upperBounds { - count += atomic.LoadUint64(&h.counts[i]) - buckets[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - } - } - his.Bucket = buckets - out.Histogram = his - out.Label = h.labelPairs - return nil -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - newMetric: func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Histogram and not a -// Metric so that no type conversion is required. -func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Histogram), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Histogram and not a Metric so that no -// type conversion is required. -func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Histogram), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { - return m.MetricVec.WithLabelValues(lvs...).(Histogram) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *HistogramVec) With(labels Labels) Histogram { - return m.MetricVec.With(labels).(Histogram) -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go deleted file mode 100644 index e078e3ed1..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/http.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bufio" - "io" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -var instLabels = []string{"method", "code"} - -type nower interface { - Now() time.Time -} - -type nowFunc func() time.Time - -func (n nowFunc) Now() time.Time { - return n() -} - -var now nower = nowFunc(func() time.Time { - return time.Now() -}) - -func nowSeries(t ...time.Time) nower { - return nowFunc(func() time.Time { - defer func() { - t = t[1:] - }() - - return t[0] - }) -} - -// InstrumentHandler wraps the given HTTP handler for instrumentation. It -// registers four metric collectors (if not already done) and reports HTTP -// metrics to the (newly or already) registered collectors: http_requests_total -// (CounterVec), http_request_duration_microseconds (Summary), -// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each -// has a constant label named "handler" with the provided handlerName as -// value. http_requests_total is a metric vector partitioned by HTTP method -// (label name "method") and HTTP status code (label name "code"). -// -// Note that InstrumentHandler has several issues: -// -// - It uses Summaries rather than Histograms. Summaries are not useful if -// aggregation across multiple instances is required. -// -// - It uses microseconds as unit, which is deprecated and should be replaced by -// seconds. -// -// - The size of the request is calculated in a separate goroutine. Since this -// calculator requires access to the request header, it creates a race with -// any writes to the header performed during request handling. -// httputil.ReverseProxy is a prominent example for a handler -// performing such writes. -// -// Upcoming versions of this package will provide ways of instrumenting HTTP -// handlers that are more flexible and have fewer issues. Consider this function -// DEPRECATED and prefer direct instrumentation in the meantime. -func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) -} - -// InstrumentHandlerFunc wraps the given function for instrumentation. It -// otherwise works in the same way as InstrumentHandler (and shares the same -// issues). -func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts( - SummaryOpts{ - Subsystem: "http", - ConstLabels: Labels{"handler": handlerName}, - }, - handlerFunc, - ) -} - -// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same -// issues) but provides more flexibility (at the cost of a more complex call -// syntax). As InstrumentHandler, this function registers four metric -// collectors, but it uses the provided SummaryOpts to create them. However, the -// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced -// by "requests_total", "request_duration_microseconds", "request_size_bytes", -// and "response_size_bytes", respectively. "Help" is replaced by an appropriate -// help string. The names of the variable labels of the http_requests_total -// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). -// -// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the -// behavior of InstrumentHandler: -// -// prometheus.InstrumentHandlerWithOpts( -// prometheus.SummaryOpts{ -// Subsystem: "http", -// ConstLabels: prometheus.Labels{"handler": handlerName}, -// }, -// handler, -// ) -// -// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it -// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, -// and all its fields are set to the equally named fields in the provided -// SummaryOpts. -func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) -} - -// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares -// the same issues) but provides more flexibility (at the cost of a more complex -// call syntax). See InstrumentHandlerWithOpts for details how the provided -// SummaryOpts are used. -func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - reqCnt := NewCounterVec( - CounterOpts{ - Namespace: opts.Namespace, - Subsystem: opts.Subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: opts.ConstLabels, - }, - instLabels, - ) - - opts.Name = "request_duration_microseconds" - opts.Help = "The HTTP request latencies in microseconds." - reqDur := NewSummary(opts) - - opts.Name = "request_size_bytes" - opts.Help = "The HTTP request sizes in bytes." - reqSz := NewSummary(opts) - - opts.Name = "response_size_bytes" - opts.Help = "The HTTP response sizes in bytes." - resSz := NewSummary(opts) - - regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) - regReqDur := MustRegisterOrGet(reqDur).(Summary) - regReqSz := MustRegisterOrGet(reqSz).(Summary) - regResSz := MustRegisterOrGet(resSz).(Summary) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - - delegate := &responseWriterDelegator{ResponseWriter: w} - out := make(chan int) - urlLen := 0 - if r.URL != nil { - urlLen = len(r.URL.String()) - } - go computeApproximateRequestSize(r, out, urlLen) - - _, cn := w.(http.CloseNotifier) - _, fl := w.(http.Flusher) - _, hj := w.(http.Hijacker) - _, rf := w.(io.ReaderFrom) - var rw http.ResponseWriter - if cn && fl && hj && rf { - rw = &fancyResponseWriterDelegator{delegate} - } else { - rw = delegate - } - handlerFunc(rw, r) - - elapsed := float64(time.Since(now)) / float64(time.Microsecond) - - method := sanitizeMethod(r.Method) - code := sanitizeCode(delegate.status) - regReqCnt.WithLabelValues(method, code).Inc() - regReqDur.Observe(elapsed) - regResSz.Observe(float64(delegate.written)) - regReqSz.Observe(float64(<-out)) - }) -} - -func computeApproximateRequestSize(r *http.Request, out chan int, s int) { - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - out <- s -} - -type responseWriterDelegator struct { - http.ResponseWriter - - handler, method string - status int - written int64 - wroteHeader bool -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type fancyResponseWriterDelegator struct { - *responseWriterDelegator -} - -func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { - return f.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (f *fancyResponseWriterDelegator) Flush() { - f.ResponseWriter.(http.Flusher).Flush() -} - -func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return f.ResponseWriter.(http.Hijacker).Hijack() -} - -func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { - if !f.wroteHeader { - f.WriteHeader(http.StatusOK) - } - n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) - f.written += n - return n, err -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index 86fd81c10..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - - dto "github.com/prometheus/client_model/go" -) - -const separatorByte byte = 255 - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementers of Metric in this package inclued Gauge, Counter, -// Untyped, and Summary. Users can implement their own Metric types, but that -// should be rarely needed. See the example for SelfCollector, which is also an -// example for a user-implemented Metric. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Implementers of custom Metric types must observe concurrency safety - // as reads of this metric may occur at any time, and any blocking - // occurs at the expense of total performance of rendering all - // registered metrics. Ideally Metric implementations should support - // concurrent readers. - // - // The Prometheus client library attempts to minimize memory allocations - // and will provide a pre-existing reset dto.Metric pointer. Prometheus - // may recycle the dto.Metric proto message, so Metric implementations - // should just populate the provided dto.Metric and then should not keep - // any reference to it. - // - // While populating dto.Metric, labels must be sorted lexicographically. - // (Implementers may find LabelPairSorter useful for that.) - Write(*dto.Metric) error -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name and Help to a non-empty string. All other fields -// are optional and can safely be left at their zero value. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a metric - // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels - // serve only special purposes. One is for the special case where the - // value of a label does not change during the lifetime of a process, - // e.g. if the revision of the running binary is put into a - // label. Another, more advanced purpose is if more than one Collector - // needs to collect Metrics with the same fully-qualified name. In that - // case, those Metrics must differ in the values of their - // ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. This is useful for implementing the Write method of -// custom metrics. -type LabelPairSorter []*dto.LabelPair - -func (s LabelPairSorter) Len() int { - return len(s) -} - -func (s LabelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s LabelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type hashSorter []uint64 - -func (s hashSorter) Len() int { - return len(s) -} - -func (s hashSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s hashSorter) Less(i, j int) bool { - return s[i] < s[j] -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index d8cf0eda3..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "github.com/prometheus/procfs" - -type processCollector struct { - pid int - collectFn func(chan<- Metric) - pidFn func() (int, error) - cpuTotal Counter - openFDs, maxFDs Gauge - vsize, rss Gauge - startTime Gauge -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including cpu, memory and file descriptor usage as well as -// the process start time for the given process id under the given namespace. -func NewProcessCollector(pid int, namespace string) *processCollector { - return NewProcessCollectorPIDFn( - func() (int, error) { return pid, nil }, - namespace, - ) -} - -// NewProcessCollectorPIDFn returns a collector which exports the current state -// of process metrics including cpu, memory and file descriptor usage as well -// as the process start time under the given namespace. The given pidFn is -// called on each collect and is used to determine the process to export -// metrics for. -func NewProcessCollectorPIDFn( - pidFn func() (int, error), - namespace string, -) *processCollector { - c := processCollector{ - pidFn: pidFn, - collectFn: func(chan<- Metric) {}, - - cpuTotal: NewCounter(CounterOpts{ - Namespace: namespace, - Name: "process_cpu_seconds_total", - Help: "Total user and system CPU time spent in seconds.", - }), - openFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_open_fds", - Help: "Number of open file descriptors.", - }), - maxFDs: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_max_fds", - Help: "Maximum number of open file descriptors.", - }), - vsize: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_virtual_memory_bytes", - Help: "Virtual memory size in bytes.", - }), - rss: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_resident_memory_bytes", - Help: "Resident memory size in bytes.", - }), - startTime: NewGauge(GaugeOpts{ - Namespace: namespace, - Name: "process_start_time_seconds", - Help: "Start time of the process since unix epoch in seconds.", - }), - } - - // Set up process metric collection if supported by the runtime. - if _, err := procfs.NewStat(); err == nil { - c.collectFn = c.processCollect - } - - return &c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal.Desc() - ch <- c.openFDs.Desc() - ch <- c.maxFDs.Desc() - ch <- c.vsize.Desc() - ch <- c.rss.Desc() - ch <- c.startTime.Desc() -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the -// client allows users to configure the error behavior. -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - return - } - - if stat, err := p.NewStat(); err == nil { - c.cpuTotal.Set(stat.CPUTime()) - ch <- c.cpuTotal - c.vsize.Set(float64(stat.VirtualMemory())) - ch <- c.vsize - c.rss.Set(float64(stat.ResidentMemory())) - ch <- c.rss - - if startTime, err := stat.StartTime(); err == nil { - c.startTime.Set(startTime) - ch <- c.startTime - } - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - c.openFDs.Set(float64(fds)) - ch <- c.openFDs - } - - if limits, err := p.NewLimits(); err == nil { - c.maxFDs.Set(float64(limits.OpenFiles)) - ch <- c.maxFDs - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push.go deleted file mode 100644 index 5ec0a3ab3..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/push.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package prometheus - -// Push triggers a metric collection by the default registry and pushes all -// collected metrics to the Pushgateway specified by url. See the Pushgateway -// documentation for detailed implications of the job and instance -// parameter. instance can be left empty. You can use just host:port or ip:port -// as url, in which case 'http://' is added automatically. You can also include -// the schema in the URL. However, do not include the '/metrics/jobs/...' part. -// -// Note that all previously pushed metrics with the same job and instance will -// be replaced with the metrics pushed by this call. (It uses HTTP method 'PUT' -// to push to the Pushgateway.) -func Push(job, instance, url string) error { - return defRegistry.Push(job, instance, url, "PUT") -} - -// PushAdd works like Push, but only previously pushed metrics with the same -// name (and the same job and instance) will be replaced. (It uses HTTP method -// 'POST' to push to the Pushgateway.) -func PushAdd(job, instance, url string) error { - return defRegistry.Push(job, instance, url, "POST") -} - -// PushCollectors works like Push, but it does not collect from the default -// registry. Instead, it collects from the provided collectors. It is a -// convenient way to push only a few metrics. -func PushCollectors(job, instance, url string, collectors ...Collector) error { - return pushCollectors(job, instance, url, "PUT", collectors...) -} - -// PushAddCollectors works like PushAdd, but it does not collect from the -// default registry. Instead, it collects from the provided collectors. It is a -// convenient way to push only a few metrics. -func PushAddCollectors(job, instance, url string, collectors ...Collector) error { - return pushCollectors(job, instance, url, "POST", collectors...) -} - -func pushCollectors(job, instance, url, method string, collectors ...Collector) error { - r := newRegistry() - for _, collector := range collectors { - if _, err := r.Register(collector); err != nil { - return err - } - } - return r.Push(job, instance, url, method) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index f6ae51bed..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,741 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Copyright (c) 2013, The Prometheus Authors -// All rights reserved. -// -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -package prometheus - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "sort" - "strings" - "sync" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" -) - -var ( - defRegistry = newDefaultRegistry() - errAlreadyReg = errors.New("duplicate metrics collector registration attempted") -) - -// Constants relevant to the HTTP interface. -const ( - // APIVersion is the version of the format of the exported data. This - // will match this library's version, which subscribes to the Semantic - // Versioning scheme. - APIVersion = "0.0.4" - - // DelimitedTelemetryContentType is the content type set on telemetry - // data responses in delimited protobuf format. - DelimitedTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited` - // TextTelemetryContentType is the content type set on telemetry data - // responses in text format. - TextTelemetryContentType = `text/plain; version=` + APIVersion - // ProtoTextTelemetryContentType is the content type set on telemetry - // data responses in protobuf text format. (Only used for debugging.) - ProtoTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text` - // ProtoCompactTextTelemetryContentType is the content type set on - // telemetry data responses in protobuf compact text format. (Only used - // for debugging.) - ProtoCompactTextTelemetryContentType = `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text` - - // Constants for object pools. - numBufs = 4 - numMetricFamilies = 1000 - numMetrics = 10000 - - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 - - contentTypeHeader = "Content-Type" - contentLengthHeader = "Content-Length" - contentEncodingHeader = "Content-Encoding" - - acceptEncodingHeader = "Accept-Encoding" - acceptHeader = "Accept" -) - -// Handler returns the HTTP handler for the global Prometheus registry. It is -// already instrumented with InstrumentHandler (using "prometheus" as handler -// name). Usually the handler is used to handle the "/metrics" endpoint. -// -// Please note the issues described in the doc comment of InstrumentHandler. You -// might want to consider using UninstrumentedHandler instead. -func Handler() http.Handler { - return InstrumentHandler("prometheus", defRegistry) -} - -// UninstrumentedHandler works in the same way as Handler, but the returned HTTP -// handler is not instrumented. This is useful if no instrumentation is desired -// (for whatever reason) or if the instrumentation has to happen with a -// different handler name (or with a different instrumentation approach -// altogether). See the InstrumentHandler example. -func UninstrumentedHandler() http.Handler { - return defRegistry -} - -// Register registers a new Collector to be included in metrics collection. It -// returns an error if the descriptors provided by the Collector are invalid or -// if they - in combination with descriptors of already registered Collectors - -// do not fulfill the consistency and uniqueness criteria described in the Desc -// documentation. -// -// Do not register the same Collector multiple times concurrently. (Registering -// the same Collector twice would result in an error anyway, but on top of that, -// it is not safe to do so concurrently.) -func Register(m Collector) error { - _, err := defRegistry.Register(m) - return err -} - -// MustRegister works like Register but panics where Register would have -// returned an error. MustRegister is also Variadic, where Register only -// accepts a single Collector to register. -func MustRegister(m ...Collector) { - for i := range m { - if err := Register(m[i]); err != nil { - panic(err) - } - } -} - -// RegisterOrGet works like Register but does not return an error if a Collector -// is registered that equals a previously registered Collector. (Two Collectors -// are considered equal if their Describe method yields the same set of -// descriptors.) Instead, the previously registered Collector is returned (which -// is helpful if the new and previously registered Collectors are equal but not -// identical, i.e. not pointers to the same object). -// -// As for Register, it is still not safe to call RegisterOrGet with the same -// Collector multiple times concurrently. -func RegisterOrGet(m Collector) (Collector, error) { - return defRegistry.RegisterOrGet(m) -} - -// MustRegisterOrGet works like Register but panics where RegisterOrGet would -// have returned an error. -func MustRegisterOrGet(m Collector) Collector { - existing, err := RegisterOrGet(m) - if err != nil { - panic(err) - } - return existing -} - -// Unregister unregisters the Collector that equals the Collector passed in as -// an argument. (Two Collectors are considered equal if their Describe method -// yields the same set of descriptors.) The function returns whether a Collector -// was unregistered. -func Unregister(c Collector) bool { - return defRegistry.Unregister(c) -} - -// SetMetricFamilyInjectionHook sets a function that is called whenever metrics -// are collected. The hook function must be set before metrics collection begins -// (i.e. call SetMetricFamilyInjectionHook before setting the HTTP handler.) The -// MetricFamily protobufs returned by the hook function are merged with the -// metrics collected in the usual way. -// -// This is a way to directly inject MetricFamily protobufs managed and owned by -// the caller. The caller has full responsibility. As no registration of the -// injected metrics has happened, there is no descriptor to check against, and -// there are no registration-time checks. If collect-time checks are disabled -// (see function EnableCollectChecks), no sanity checks are performed on the -// returned protobufs at all. If collect-checks are enabled, type and uniqueness -// checks are performed, but no further consistency checks (which would require -// knowledge of a metric descriptor). -// -// Sorting concerns: The caller is responsible for sorting the label pairs in -// each metric. However, the order of metrics will be sorted by the registry as -// it is required anyway after merging with the metric families collected -// conventionally. -// -// The function must be callable at any time and concurrently. -func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { - defRegistry.metricFamilyInjectionHook = hook -} - -// PanicOnCollectError sets the behavior whether a panic is caused upon an error -// while metrics are collected and served to the HTTP endpoint. By default, an -// internal server error (status code 500) is served with an error message. -func PanicOnCollectError(b bool) { - defRegistry.panicOnCollectError = b -} - -// EnableCollectChecks enables (or disables) additional consistency checks -// during metrics collection. These additional checks are not enabled by default -// because they inflict a performance penalty and the errors they check for can -// only happen if the used Metric and Collector types have internal programming -// errors. It can be helpful to enable these checks while working with custom -// Collectors or Metrics whose correctness is not well established yet. -func EnableCollectChecks(b bool) { - defRegistry.collectChecksEnabled = b -} - -// encoder is a function that writes a dto.MetricFamily to an io.Writer in a -// certain encoding. It returns the number of bytes written and any error -// encountered. Note that pbutil.WriteDelimited and pbutil.MetricFamilyToText -// are encoders. -type encoder func(io.Writer, *dto.MetricFamily) (int, error) - -type registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - bufPool chan *bytes.Buffer - metricFamilyPool chan *dto.MetricFamily - metricPool chan *dto.Metric - metricFamilyInjectionHook func() []*dto.MetricFamily - - panicOnCollectError, collectChecksEnabled bool -} - -func (r *registry) Register(c Collector) (Collector, error) { - descChan := make(chan *Desc, capDescChan) - go func() { - c.Describe(descChan) - close(descChan) - }() - - newDescIDs := map[uint64]struct{}{} - newDimHashesByName := map[string]uint64{} - var collectorID uint64 // Just a sum of all desc IDs. - var duplicateDescErr error - - r.mtx.Lock() - defer r.mtx.Unlock() - // Coduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return c, fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, add it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID += desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return nil, fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return nil, fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // Did anything happen at all? - if len(newDescIDs) == 0 { - return nil, errors.New("collector has no descriptors") - } - if existing, exists := r.collectorsByID[collectorID]; exists { - return existing, errAlreadyReg - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return nil, duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return c, nil -} - -func (r *registry) RegisterOrGet(m Collector) (Collector, error) { - existing, err := r.Register(m) - if err != nil && err != errAlreadyReg { - return nil, err - } - return existing, nil -} - -func (r *registry) Unregister(c Collector) bool { - descChan := make(chan *Desc, capDescChan) - go func() { - c.Describe(descChan) - close(descChan) - }() - - descIDs := map[uint64]struct{}{} - var collectorID uint64 // Just a sum of the desc IDs. - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID += desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -func (r *registry) Push(job, instance, pushURL, method string) error { - if !strings.Contains(pushURL, "://") { - pushURL = "http://" + pushURL - } - if strings.HasSuffix(pushURL, "/") { - pushURL = pushURL[:len(pushURL)-1] - } - pushURL = fmt.Sprintf("%s/metrics/jobs/%s", pushURL, url.QueryEscape(job)) - if instance != "" { - pushURL += "/instances/" + url.QueryEscape(instance) - } - buf := r.getBuf() - defer r.giveBuf(buf) - if err := r.writePB(expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)); err != nil { - if r.panicOnCollectError { - panic(err) - } - return err - } - req, err := http.NewRequest(method, pushURL, buf) - if err != nil { - return err - } - req.Header.Set(contentTypeHeader, DelimitedTelemetryContentType) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 202 { - return fmt.Errorf("unexpected status code %d while pushing to %s", resp.StatusCode, pushURL) - } - return nil -} - -func (r *registry) ServeHTTP(w http.ResponseWriter, req *http.Request) { - contentType := expfmt.Negotiate(req.Header) - buf := r.getBuf() - defer r.giveBuf(buf) - writer, encoding := decorateWriter(req, buf) - if err := r.writePB(expfmt.NewEncoder(writer, contentType)); err != nil { - if r.panicOnCollectError { - panic(err) - } - http.Error(w, "An error has occurred:\n\n"+err.Error(), http.StatusInternalServerError) - return - } - if closer, ok := writer.(io.Closer); ok { - closer.Close() - } - header := w.Header() - header.Set(contentTypeHeader, string(contentType)) - header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) - if encoding != "" { - header.Set(contentEncodingHeader, encoding) - } - w.Write(buf.Bytes()) -} - -func (r *registry) writePB(encoder expfmt.Encoder) error { - var metricHashes map[uint64]struct{} - if r.collectChecksEnabled { - metricHashes = make(map[uint64]struct{}) - } - metricChan := make(chan Metric, capMetricChan) - wg := sync.WaitGroup{} - - r.mtx.RLock() - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - - // Scatter. - // (Collectors could be complex and slow, so we call them all at once.) - wg.Add(len(r.collectorsByID)) - go func() { - wg.Wait() - close(metricChan) - }() - for _, collector := range r.collectorsByID { - go func(collector Collector) { - defer wg.Done() - collector.Collect(metricChan) - }(collector) - } - r.mtx.RUnlock() - - // Drain metricChan in case of premature return. - defer func() { - for _ = range metricChan { - } - }() - - // Gather. - for metric := range metricChan { - // This could be done concurrently, too, but it required locking - // of metricFamiliesByName (and of metricHashes if checks are - // enabled). Most likely not worth it. - desc := metric.Desc() - metricFamily, ok := metricFamiliesByName[desc.fqName] - if !ok { - metricFamily = r.getMetricFamily() - defer r.giveMetricFamily(metricFamily) - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - metricFamiliesByName[desc.fqName] = metricFamily - } - dtoMetric := r.getMetric() - defer r.giveMetric(dtoMetric) - if err := metric.Write(dtoMetric); err != nil { - // TODO: Consider different means of error reporting so - // that a single erroneous metric could be skipped - // instead of blowing up the whole collection. - return fmt.Errorf("error collecting metric %v: %s", desc, err) - } - switch { - case metricFamily.Type != nil: - // Type already set. We are good. - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if r.collectChecksEnabled { - if err := r.checkConsistency(metricFamily, dtoMetric, desc, metricHashes); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - } - - if r.metricFamilyInjectionHook != nil { - for _, mf := range r.metricFamilyInjectionHook() { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if !exists { - metricFamiliesByName[mf.GetName()] = mf - if r.collectChecksEnabled { - for _, m := range mf.Metric { - if err := r.checkConsistency(mf, m, nil, metricHashes); err != nil { - return err - } - } - } - continue - } - for _, m := range mf.Metric { - if r.collectChecksEnabled { - if err := r.checkConsistency(existingMF, m, nil, metricHashes); err != nil { - return err - } - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - - // Now that MetricFamilies are all set, sort their Metrics - // lexicographically by their label values. - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - - // Write out MetricFamilies sorted by their name. - names := make([]string, 0, len(metricFamiliesByName)) - for name := range metricFamiliesByName { - names = append(names, name) - } - sort.Strings(names) - - for _, name := range names { - if err := encoder.Encode(metricFamiliesByName[name]); err != nil { - return err - } - } - return nil -} - -func (r *registry) checkConsistency(metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, metricHashes map[uint64]struct{}) error { - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s is not a %s", - metricFamily.GetName(), dtoMetric, metricFamily.GetType(), - ) - } - - // Is the metric unique (i.e. no other metric with the same name and the same label values)? - h := hashNew() - h = hashAdd(h, metricFamily.GetName()) - h = hashAddByte(h, separatorByte) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. Label pairs must be sorted by contract. But the point of this - // method is to check for contract violations. So we better do the sort - // now. - sort.Sort(LabelPairSorter(dtoMetric.Label)) - for _, lp := range dtoMetric.Label { - h = hashAdd(h, lp.GetValue()) - h = hashAddByte(h, separatorByte) - } - if _, exists := metricHashes[h]; exists { - return fmt.Errorf( - "collected metric %s %s was collected before with the same name and label values", - metricFamily.GetName(), dtoMetric, - ) - } - metricHashes[h] = struct{}{} - - if desc == nil { - return nil // Nothing left to check if we have no desc. - } - - // Desc consistency with metric family. - if metricFamily.GetName() != desc.fqName { - return fmt.Errorf( - "collected metric %s %s has name %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetName(), desc.fqName, - ) - } - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) - lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - - r.mtx.RLock() // Remaining checks need the read lock. - defer r.mtx.RUnlock() - - // Is the desc registered? - if _, exist := r.descIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - - return nil -} - -func (r *registry) getBuf() *bytes.Buffer { - select { - case buf := <-r.bufPool: - return buf - default: - return &bytes.Buffer{} - } -} - -func (r *registry) giveBuf(buf *bytes.Buffer) { - buf.Reset() - select { - case r.bufPool <- buf: - default: - } -} - -func (r *registry) getMetricFamily() *dto.MetricFamily { - select { - case mf := <-r.metricFamilyPool: - return mf - default: - return &dto.MetricFamily{} - } -} - -func (r *registry) giveMetricFamily(mf *dto.MetricFamily) { - mf.Reset() - select { - case r.metricFamilyPool <- mf: - default: - } -} - -func (r *registry) getMetric() *dto.Metric { - select { - case m := <-r.metricPool: - return m - default: - return &dto.Metric{} - } -} - -func (r *registry) giveMetric(m *dto.Metric) { - m.Reset() - select { - case r.metricPool <- m: - default: - } -} - -func newRegistry() *registry { - return ®istry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - bufPool: make(chan *bytes.Buffer, numBufs), - metricFamilyPool: make(chan *dto.MetricFamily, numMetricFamilies), - metricPool: make(chan *dto.Metric, numMetrics), - } -} - -func newDefaultRegistry() *registry { - r := newRegistry() - r.Register(NewProcessCollector(os.Getpid(), "")) - r.Register(NewGoCollector()) - return r -} - -// decorateWriter wraps a writer to handle gzip compression if requested. It -// returns the decorated writer and the appropriate "Content-Encoding" header -// (which is empty if no compression is enabled). -func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { - header := request.Header.Get(acceptEncodingHeader) - parts := strings.Split(header, ",") - for _, part := range parts { - part := strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return gzip.NewWriter(writer), "gzip" - } - } - return writer, "" -} - -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index eb8496166..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/beorn7/perks/quantile" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. - Observe(float64) -} - -var ( - // DefObjectives are the default Summary quantile values. - DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} - - errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, - ) -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name and Help to a non-empty string. All other fields are -// optional and can safely be left at their zero value. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. Mandatory! - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this - // Summary. Summaries with the same fully-qualified name must have the - // same label names in their ConstLabels. - // - // Note that in most cases, labels have a value that varies during the - // lifetime of a process. Those labels are usually managed with a - // SummaryVec. ConstLabels serve only special purposes. One is for the - // special case where the value of a label does not change during the - // lifetime of a process, e.g. if the revision of the running binary is - // put into a label. Another, more advanced purpose is if more than one - // Collector needs to collect Summaries with the same fully-qualified - // name. In that case, those Summaries must differ in the values of - // their ConstLabels. See the Collector examples. - // - // If the value of a label never changes (not even between binaries), - // that label most likely should not be a label at all (but part of the - // metric name). - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported - // for q will be the φ-quantile value for some φ between q-e and q+e. - // The default value is DefObjectives. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// TODO: Great fuck-up with the sliding-window decay algorithm... The Merge -// method of perk/quantile is actually not working as advertised - and it might -// be unfixable, as the underlying algorithm is apparently not capable of -// merging summaries in the first place. To avoid using Merge, we are currently -// adding observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(errInconsistentCardinality) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if len(opts.Objectives) == 0 { - opts.Objectives = DefObjectives - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: makeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.Init(s) // Init self-collection. - return s -} - -type summary struct { - SelfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - newMetric: func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns a Summary and not a -// Metric so that no type conversion is required. -func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Summary), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns a Summary and not a Metric so that no -// type conversion is required. -func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Summary), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { - return m.MetricVec.WithLabelValues(lvs...).(Summary) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (m *SummaryVec) With(labels Labels) Summary { - return m.MetricVec.With(labels).(Summary) -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 89b86ea98..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Untyped is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// An Untyped metric works the same as a Gauge. The only difference is that to -// no type information is implied. -// -// To create Untyped instances, use NewUntyped. -type Untyped interface { - Metric - Collector - - // Set sets the Untyped metric to an arbitrary value. - Set(float64) - // Inc increments the Untyped metric by 1. - Inc() - // Dec decrements the Untyped metric by 1. - Dec() - // Add adds the given value to the Untyped metric. (The value can be - // negative, resulting in a decrease.) - Add(float64) - // Sub subtracts the given value from the Untyped metric. (The value can - // be negative, resulting in an increase.) - Sub(float64) -} - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// NewUntyped creates a new Untyped metric from the provided UntypedOpts. -func NewUntyped(opts UntypedOpts) Untyped { - return newValue(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, 0) -} - -// UntypedVec is a Collector that bundles a set of Untyped metrics that all -// share the same Desc, but have different values for their variable -// labels. This is used if you want to count the same thing partitioned by -// various dimensions. Create instances with NewUntypedVec. -type UntypedVec struct { - MetricVec -} - -// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and -// partitioned by the given label names. At least one label name must be -// provided. -func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &UntypedVec{ - MetricVec: MetricVec{ - children: map[uint64]Metric{}, - desc: desc, - newMetric: func(lvs ...string) Metric { - return newValue(desc, UntypedValue, 0, lvs...) - }, - }, - } -} - -// GetMetricWithLabelValues replaces the method of the same name in -// MetricVec. The difference is that this method returns an Untyped and not a -// Metric so that no type conversion is required. -func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// GetMetricWith replaces the method of the same name in MetricVec. The -// difference is that this method returns an Untyped and not a Metric so that no -// type conversion is required. -func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { - metric, err := m.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Untyped), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. By not returning an -// error, WithLabelValues allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { - return m.MetricVec.WithLabelValues(lvs...).(Untyped) -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. By not returning an error, With allows shortcuts like -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -func (m *UntypedVec) With(labels Labels) Untyped { - return m.MetricVec.With(labels).(Untyped) -} - -// UntypedFunc is an Untyped whose value is determined at collect time by -// calling a provided function. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index b54ac11e8..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "math" - "sort" - "sync/atomic" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -// value is a generic metric for simple values. It implements Metric, Collector, -// Counter, Gauge, and Untyped. Its effective type is determined by -// ValueType. This is a low-level building block used by the library to back the -// implementations of Counter, Gauge, and Untyped. -type value struct { - // valBits containst the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - SelfCollector - - desc *Desc - valType ValueType - labelPairs []*dto.LabelPair -} - -// newValue returns a newly allocated value with the given Desc, ValueType, -// sample value and label values. It panics if the number of label -// values is different from the number of variable labels in Desc. -func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { - if len(labelValues) != len(desc.variableLabels) { - panic(errInconsistentCardinality) - } - result := &value{ - desc: desc, - valType: valueType, - valBits: math.Float64bits(val), - labelPairs: makeLabelPairs(desc, labelValues), - } - result.Init(result) - return result -} - -func (v *value) Desc() *Desc { - return v.desc -} - -func (v *value) Set(val float64) { - atomic.StoreUint64(&v.valBits, math.Float64bits(val)) -} - -func (v *value) Inc() { - v.Add(1) -} - -func (v *value) Dec() { - v.Add(-1) -} - -func (v *value) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&v.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { - return - } - } -} - -func (v *value) Sub(val float64) { - v.Add(val * -1) -} - -func (v *value) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) - return populateMetric(v.valType, val, v.labelPairs, out) -} - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - SelfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: makeLabelPairs(desc, nil), - } - result.Init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if len(desc.variableLabels) != len(labelValues) { - return nil, errInconsistentCardinality - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v)} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - for _, lp := range desc.constLabelPairs { - labelPairs = append(labelPairs, lp) - } - sort.Sort(LabelPairSorter(labelPairs)) - return labelPairs -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 68f946123..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" -) - -// MetricVec is a Collector to bundle metrics of the same name that -// differ in their label values. MetricVec is usually not used directly but as a -// building block for implementations of vectors of a given metric -// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already -// provided in this package. -type MetricVec struct { - mtx sync.RWMutex // Protects the children. - children map[uint64]Metric - desc *Desc - - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. The length of the returned slice -// is always one. -func (m *MetricVec) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metric := range m.children { - ch <- metric - } -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created. -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it at its start value (e.g. a Summary or -// Histogram without any observations). See also the SummaryVec example. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - m.mtx.RLock() - metric, ok := m.children[h] - m.mtx.RUnlock() - if ok { - return metric, nil - } - - m.mtx.Lock() - defer m.mtx.Unlock() - return m.getOrCreateMetric(h, lvs...), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - m.mtx.RLock() - metric, ok := m.children[h] - m.mtx.RUnlock() - if ok { - return metric, nil - } - - lvs := make([]string, len(labels)) - for i, label := range m.desc.variableLabels { - lvs[i] = labels[label] - } - m.mtx.Lock() - defer m.mtx.Unlock() - return m.getOrCreateMetric(h, lvs...), nil -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics if an error -// occurs. The method allows neat syntax like: -// httpReqs.WithLabelValues("404", "POST").Inc() -func (m *MetricVec) WithLabelValues(lvs ...string) Metric { - metric, err := m.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return metric -} - -// With works as GetMetricWith, but panics if an error occurs. The method allows -// neat syntax like: -// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() -func (m *MetricVec) With(labels Labels) Metric { - metric, err := m.GetMetricWith(labels) - if err != nil { - panic(err) - } - return metric -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual Metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - if _, ok := m.children[h]; !ok { - return false - } - delete(m.children, h) - return true -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in the Desc of the MetricVec. However, such -// inconsistent Labels can never match an actual Metric, so the method will -// always return false in that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - h, err := m.hashLabels(labels) - if err != nil { - return false - } - if _, ok := m.children[h]; !ok { - return false - } - delete(m.children, h) - return true -} - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.children { - delete(m.children, h) - } -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if len(vals) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, val := range vals { - h = hashAdd(h, val) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if len(labels) != len(m.desc.variableLabels) { - return 0, errInconsistentCardinality - } - h := hashNew() - for _, label := range m.desc.variableLabels { - val, ok := labels[label] - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = hashAdd(h, val) - } - return h, nil -} - -func (m *MetricVec) getOrCreateMetric(hash uint64, labelValues ...string) Metric { - metric, ok := m.children[hash] - if !ok { - // Copy labelValues. Otherwise, they would be allocated even if we don't go - // down this code path. - copiedLabelValues := append(make([]string, 0, len(labelValues)), labelValues...) - metric = m.newMetric(copiedLabelValues...) - m.children[hash] = metric - } - return metric -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e410..000000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index b065f8683..000000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,364 +0,0 @@ -// Code generated by protoc-gen-go. -// source: metrics.proto -// DO NOT EDIT! - -/* -Package io_prometheus_client is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - LabelPair - Gauge - Counter - Quantile - Summary - Untyped - Histogram - Bucket - Metric - MetricFamily -*/ -package io_prometheus_client - -import proto "github.com/golang/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a5..000000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index 487fdc6cc..000000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -func (sd *SampleDecoder) Decode(s *model.Vector) error { - if err := sd.Dec.Decode(&sd.f); err != nil { - return err - } - *s = extractSamples(&sd.f, sd.Opts) - return nil -} - -// Extract samples builds a slice of samples from the provided metric families. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector { - var all model.Vector - for _, f := range fams { - all = append(all, extractSamples(f, o)...) - } - return all -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f) - case dto.MetricType_GAUGE: - return extractGauge(o, f) - case dto.MetricType_SUMMARY: - return extractSummary(o, f) - case dto.MetricType_UNTYPED: - return extractUntyped(o, f) - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f) - } - panic("expfmt.extractSamples: unknown metric family type") -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index 11839ed65..000000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -type encoder func(*dto.MetricFamily) error - -func (e encoder) Encode(v *dto.MetricFamily) error { - return e(v) -} - -// Negotiate returns the Content-Type based on the given Accept header. -// If no appropriate accepted type is found, FmtText is returned. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - // Check for protocol buffer - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - // Check for text format. - ver := ac.Params["version"] - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoder(func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }) - case FmtProtoCompact: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }) - case FmtProtoText: - return encoder(func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }) - case FmtText: - return encoder(func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }) - } - panic("expfmt.NewEncoder: unknown format") -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index 366fbde98..000000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A package for reading and writing Prometheus metrics. -package expfmt - -type Format string - -const ( - TextVersion = "0.0.4" - - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - - // fmtJSON2 is hidden as it is deprecated. - fmtJSON2 Format = `application/json; version=0.0.2` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dc2eedeef..000000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 0bb9c14cc..000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bytes" - "fmt" - "io" - "math" - "strings" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. This function does not perform checks on the -// content of the metric and label names, i.e. invalid metric or label names -// will result in invalid text format output. -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { - var written int - - // Fail-fast checks. - if len(in.Metric) == 0 { - return written, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return written, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err := fmt.Fprintf( - out, "# HELP %s %s\n", - name, escapeString(*in.Help, false), - ) - written += n - if err != nil { - return written, err - } - } - metricType := in.GetType() - n, err := fmt.Fprintf( - out, "# TYPE %s %s\n", - name, strings.ToLower(metricType.String()), - ) - written += n - if err != nil { - return written, err - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Counter.GetValue(), - out, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Gauge.GetValue(), - out, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - name, metric, "", "", - metric.Untyped.GetValue(), - out, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - name, metric, - model.QuantileLabel, fmt.Sprint(q.GetQuantile()), - q.GetValue(), - out, - ) - written += n - if err != nil { - return written, err - } - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Summary.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Summary.GetSampleCount()), - out, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, q := range metric.Histogram.Bucket { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, fmt.Sprint(q.GetUpperBound()), - float64(q.GetCumulativeCount()), - out, - ) - written += n - if err != nil { - return written, err - } - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - name+"_bucket", metric, - model.BucketLabel, "+Inf", - float64(metric.Histogram.GetSampleCount()), - out, - ) - if err != nil { - return written, err - } - written += n - } - n, err = writeSample( - name+"_sum", metric, "", "", - metric.Histogram.GetSampleSum(), - out, - ) - if err != nil { - return written, err - } - written += n - n, err = writeSample( - name+"_count", metric, "", "", - float64(metric.Histogram.GetSampleCount()), - out, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeSample writes a single sample in text format to out, given the metric -// name, the metric proto message itself, optionally an additional label name -// and value (use empty strings if not required), and the value. The function -// returns the number of bytes written and any error encountered. -func writeSample( - name string, - metric *dto.Metric, - additionalLabelName, additionalLabelValue string, - value float64, - out io.Writer, -) (int, error) { - var written int - n, err := fmt.Fprint(out, name) - written += n - if err != nil { - return written, err - } - n, err = labelPairsToText( - metric.Label, - additionalLabelName, additionalLabelValue, - out, - ) - written += n - if err != nil { - return written, err - } - n, err = fmt.Fprintf(out, " %v", value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - n, err = out.Write([]byte{'\n'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// labelPairsToText converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'out'. An empty slice in combination with an -// empty string 'additionalLabelName' results in nothing being -// written. Otherwise, the label pairs are written, escaped as required by the -// text format, and enclosed in '{...}'. The function returns the number of -// bytes written and any error encountered. -func labelPairsToText( - in []*dto.LabelPair, - additionalLabelName, additionalLabelValue string, - out io.Writer, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var written int - separator := '{' - for _, lp := range in { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, lp.GetName(), escapeString(lp.GetValue(), true), - ) - written += n - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - n, err := fmt.Fprintf( - out, `%c%s="%s"`, - separator, additionalLabelName, - escapeString(additionalLabelValue, true), - ) - written += n - if err != nil { - return written, err - } - } - n, err := out.Write([]byte{'}'}) - written += n - if err != nil { - return written, err - } - return written, nil -} - -// escapeString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -func escapeString(v string, includeDoubleQuote bool) string { - result := bytes.NewBuffer(make([]byte, 0, len(v))) - for _, c := range v { - switch { - case c == '\\': - result.WriteString(`\\`) - case includeDoubleQuote && c == '"': - result.WriteString(`\"`) - case c == '\n': - result.WriteString(`\n`) - default: - result.WriteRune(c) - } - } - return result.String() -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index bd170b167..000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// nil value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb6..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c7a..000000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106..000000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 038fc1c90..000000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index 3b72e7ff8..000000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelNameRE.MatchString(s) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelNameRE.MatchString(s) { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 5f931cdb9..000000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !LabelNameRE.MatchString(string(ln)) { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index a5da59a50..000000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - separator = []byte{0} - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := Metric{} - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b969170..000000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13c6..000000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index 7538e2997..000000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definiton -// in the Prometheus eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 548968aeb..000000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes and interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - *t = Time(v + va) - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") - -// StringToDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var ( - n, _ = strconv.Atoi(matches[1]) - dur = time.Duration(n) * time.Millisecond - ) - switch unit := matches[2]; unit { - case "y": - dur *= 1000 * 60 * 60 * 24 * 365 - case "w": - dur *= 1000 * 60 * 60 * 24 * 7 - case "d": - dur *= 1000 * 60 * 60 * 24 - case "h": - dur *= 1000 * 60 * 60 - case "m": - dur *= 1000 * 60 - case "s": - dur *= 1000 - case "ms": - // Value already correct - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) - } - return Duration(dur), nil -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - unit = "ms" - ) - factors := map[string]int64{ - "y": 1000 * 60 * 60 * 24 * 365, - "w": 1000 * 60 * 60 * 24 * 7, - "d": 1000 * 60 * 60 * 24, - "h": 1000 * 60 * 60, - "m": 1000 * 60, - "s": 1000, - "ms": 1, - } - - switch int64(0) { - case ms % factors["y"]: - unit = "y" - case ms % factors["w"]: - unit = "w" - case ms % factors["d"]: - unit = "d" - case ms % factors["h"]: - unit = "h" - case ms % factors["m"]: - unit = "m" - case ms % factors["s"]: - unit = "s" - } - return fmt.Sprintf("%v%v", ms/factors[unit], unit) -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index dbf5d10e4..000000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// sematics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - if s.Value.Equal(o.Value) { - return false - } - - return true -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml deleted file mode 100644 index 2b4554da5..000000000 --- a/vendor/github.com/prometheus/procfs/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -sudo: false -language: go -go: - - 1.5 - - 1.6 diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md deleted file mode 100644 index 0c802dd87..000000000 --- a/vendor/github.com/prometheus/procfs/AUTHORS.md +++ /dev/null @@ -1,20 +0,0 @@ -The Prometheus project was started by Matt T. Proud (emeritus) and -Julius Volz in 2012. - -Maintainers of this repository: - -* Tobias Schmidt - -The following individuals have contributed code to this repository -(listed in alphabetical order): - -* Armen Baghumian -* Bjoern Rabenstein -* David Cournapeau -* Ji-Hoon, Seol -* Jonas Große Sundrup -* Julius Volz -* Matthias Rampke -* Nicky Gerritsen -* Rémi Audebert -* Tobias Schmidt diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 5705f0fbe..000000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you have a trivial fix or improvement, go ahead and create a pull - request, addressing (with `@...`) one or more of the maintainers - (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index c264a49d1..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -ci: - ! gofmt -l *.go | read nothing - go vet - go test -v ./... - go get github.com/golang/lint/golint - golint *.go diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa1..000000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 6e7ee6b8b..000000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# procfs - -This procfs package provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index e2acd6d40..000000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.NewStat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// -package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 49aaab050..000000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,33 +0,0 @@ -package procfs - -import ( - "fmt" - "os" - "path" -) - -// FS represents the pseudo-filesystem proc, which provides an interface to -// kernel data structures. -type FS string - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = "/proc" - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path returns the path of the given subsystem relative to the procfs root. -func (fs FS) Path(p ...string) string { - return path.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index e7012f732..000000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,224 +0,0 @@ -package procfs - -import ( - "bufio" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The transport protocol (TCP, UDP). - Proto string - // The remote (real) IP address. - RemoteAddress net.IP - // The remote (real) port. - RemotePort uint16 - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// NewIPVSStats reads the IPVS statistics. -func NewIPVSStats() (IPVSStats, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return IPVSStats{}, err - } - - return fs.NewIPVSStats() -} - -// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) NewIPVSStats() (IPVSStats, error) { - file, err := os.Open(fs.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - defer file.Close() - - return parseIPVSStats(file) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(file io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := ioutil.ReadAll(file) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. -func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return []IPVSBackendStatus{}, err - } - - return fs.NewIPVSBackendStatus() -} - -// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(string(scanner.Text())) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - tmp := strings.SplitN(s, ":", 2) - - if len(tmp) != 2 { - return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) - } - - if len(tmp[0]) != 8 && len(tmp[0]) != 32 { - return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0]) - } - - ip, err := hex.DecodeString(tmp[0]) - if err != nil { - return nil, 0, err - } - - port, err := strconv.ParseUint(tmp[1], 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index d7a248c0d..000000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,138 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" -) - -var ( - statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device consists of. - DisksTotal int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 -} - -// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. -func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { - mdStatusFilePath := fs.Path("mdstat") - content, err := ioutil.ReadFile(mdStatusFilePath) - if err != nil { - return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - mdStates := []MDStat{} - lines := strings.Split(string(content), "\n") - for i, l := range lines { - if l == "" { - continue - } - if l[0] == ' ' { - continue - } - if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { - continue - } - - mainLine := strings.Split(l, " ") - if len(mainLine) < 3 { - return mdStates, fmt.Errorf("error parsing mdline: %s", l) - } - mdName := mainLine[0] - activityState := mainLine[2] - - if len(lines) <= i+3 { - return mdStates, fmt.Errorf( - "error parsing %s: too few lines for md device %s", - mdStatusFilePath, - mdName, - ) - } - - active, total, size, err := evalStatusline(lines[i+1]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - - // j is the line number of the syncing-line. - j := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - j = i + 3 - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { - syncedBlocks, err = evalBuildline(lines[j]) - if err != nil { - return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) - } - } - - mdStates = append(mdStates, MDStat{ - Name: mdName, - ActivityState: activityState, - DisksActive: active, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - }) - } - - return mdStates, nil -} - -func evalStatusline(statusline string) (active, total, size int64, err error) { - matches := statuslineRE.FindStringSubmatch(statusline) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) - } - - size, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) - } - - return active, total, size, nil -} - -func evalBuildline(buildline string) (syncedBlocks int64, err error) { - matches := buildlineRE.FindStringSubmatch(buildline) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected buildline: %s", buildline) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in buildline: %s", err, buildline) - } - - return syncedBlocks, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 0d0a6a90f..000000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,212 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.NewProc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) - if err != nil { - return Proc{}, err - } - return fs.NewProc(pid) -} - -// NewProc returns a process for the given pid. -func (fs FS) NewProc(pid int) (Proc, error) { - if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - f, err := os.Open(p.path("cmdline")) - if err != nil { - return nil, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - f, err := os.Open(p.path("comm")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index b4e31d7ba..000000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,55 +0,0 @@ -package procfs - -import ( - "fmt" - "io/ioutil" - "os" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// NewIO creates a new ProcIO instance from a given Proc instance. -func (p Proc) NewIO() (ProcIO, error) { - pio := ProcIO{} - - f, err := os.Open(p.path("io")) - if err != nil { - return pio, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - if err != nil { - return pio, err - } - - return pio, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index 2df997ce1..000000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,137 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime int - // Maximum size of files that the process may create. - FileSize int - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize int - // Maximum size of the process stack in bytes. - StackSize int - // Maximum size of a core file. - CoreFileSize int - // Limit of the process's resident set in pages. - ResidentSet int - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes int - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles int - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks int - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals int - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize int - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority int - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout int -} - -const ( - limitsFields = 3 - limitsUnlimited = "unlimited" -) - -var ( - limitsDelimiter = regexp.MustCompile(" +") -) - -// NewLimits returns the current soft limits of the process. -func (p Proc) NewLimits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) - } - - switch fields[0] { - case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) - case "Max file size": - l.FileSize, err = parseInt(fields[1]) - case "Max data size": - l.DataSize, err = parseInt(fields[1]) - case "Max stack size": - l.StackSize, err = parseInt(fields[1]) - case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) - case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) - case "Max processes": - l.Processes, err = parseInt(fields[1]) - case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) - case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) - case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) - case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) - case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) - case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) - case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseInt(s string) (int, error) { - if s == limitsUnlimited { - return -1, nil - } - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) - } - return int(i), nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 724e271b9..000000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,175 +0,0 @@ -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime uint - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime uint - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize int - // Resident set size in pages. - RSS int - - fs FS -} - -// NewStat returns the current status information of the process. -func (p Proc) NewStat() (ProcStat, error) { - f, err := os.Open(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - defer f.Close() - - data, err := ioutil.ReadAll(f) - if err != nil { - return ProcStat{}, err - } - - var ( - ignore int - - s = ProcStat{PID: p.PID, fs: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) - } - - s.Comm = string(data[l+1 : r]) - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignore, - &s.Starttime, - &s.VSize, - &s.RSS, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() int { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - stat, err := s.fs.NewStat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 1ca217e8c..000000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,56 +0,0 @@ -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime int64 -} - -// NewStat returns kernel/system statistics read from /proc/stat. -func NewStat() (Stat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Stat{}, err - } - - return fs.NewStat() -} - -// NewStat returns an information about current kernel/system statistics. -func (fs FS) NewStat() (Stat, error) { - f, err := os.Open(fs.Path("stat")) - if err != nil { - return Stat{}, err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - line := s.Text() - if !strings.HasPrefix(line, "btime") { - continue - } - fields := strings.Fields(line) - if len(fields) != 2 { - return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line) - } - i, err := strconv.ParseInt(fields[1], 10, 32) - if err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err) - } - return Stat{BootTime: i}, nil - } - if err := s.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) - } - - return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name()) -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS similarity index 100% rename from vendor/golang.org/x/oauth2/AUTHORS rename to vendor/golang.org/x/crypto/AUTHORS diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS similarity index 100% rename from vendor/golang.org/x/oauth2/CONTRIBUTORS rename to vendor/golang.org/x/crypto/CONTRIBUTORS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 000000000..741eeb13f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,892 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} +var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return t.c.Write(buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = t.c.Write(buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } + + panic("unreachable") // for Go 1.0. +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 000000000..7e60b31a2 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,133 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "io" + "syscall" + "unsafe" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var oldState State + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var oldState syscall.Termios + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return nil, err + } + + defer func() { + syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 000000000..9c1ffd145 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 000000000..5883b22d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 000000000..799f049f0 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 000000000..90d374ad5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,73 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios syscall.Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + // see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c + var termio unix.Termio + err := unix.IoctlSetTermio(fd, unix.TCGETA, &termio) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 000000000..ae9fa9ec1 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,174 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + short int16 + word uint16 + + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index ea1a7cd53..000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index f31d88273..000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - return client.Do(req.WithContext(ctx)) -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go deleted file mode 100644 index 7564b2032..000000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package ctxhttp - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided http.Client and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // TODO(djd): Respect any existing value of req.Cancel. - cancel := make(chan struct{}) - req.Cancel = cancel - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - // Make local copies of test hooks closed over by goroutines below. - // Prevents data races in tests. - testHookDoReturned := testHookDoReturned - testHookDidBodyClose := testHookDidBodyClose - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - close(cancel) - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - close(cancel) - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index f8cda19ad..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 5a30acabd..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/idna/idna.go b/vendor/golang.org/x/net/idna/idna.go new file mode 100644 index 000000000..35ff39d81 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna.go @@ -0,0 +1,68 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package idna implements IDNA2008 (Internationalized Domain Names for +// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and +// RFC 5894. +package idna + +import ( + "strings" + "unicode/utf8" +) + +// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or +// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11 + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". +func ToASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". +func ToUnicode(s string) (string, error) { + if !strings.Contains(s, acePrefix) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if strings.HasPrefix(label, acePrefix) { + u, err := decode(label[len(acePrefix):]) + if err != nil { + return "", err + } + labels[i] = u + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 000000000..92e733f6a --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,200 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", fmt.Errorf("idna: invalid label %q", encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("idna: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml deleted file mode 100644 index fa139db22..000000000 --- a/vendor/golang.org/x/oauth2/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - tip - -install: - - export GOPATH="$HOME/gopath" - - mkdir -p "$GOPATH/src/golang.org/x" - - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d golang.org/x/oauth2/... - -script: - - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md deleted file mode 100644 index 46aa2b12d..000000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE deleted file mode 100644 index d02f24fd5..000000000 --- a/vendor/golang.org/x/oauth2/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The oauth2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md deleted file mode 100644 index 0d5141733..000000000 --- a/vendor/golang.org/x/oauth2/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# OAuth2 for Go - -[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) - -oauth2 package contains a client implementation for OAuth 2.0 spec. - -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -See godoc for further documentation and examples. - -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) - - -## App Engine - -In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor -of the [`context.Context`](https://golang.org/x/net/context#Context) type from -the `golang.org/x/net/context` package - -This means its no longer possible to use the "Classic App Engine" -`appengine.Context` type with the `oauth2` package. (You're using -Classic App Engine if you import the package `"appengine"`.) - -To work around this, you may use the new `"google.golang.org/appengine"` -package. This package has almost the same API as the `"appengine"` package, -but it can be fetched with `go get` and used on "Managed VMs" and well as -Classic App Engine. - -See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) -for information on updating your app. - -If you don't want to update your entire app to use the new App Engine packages, -you may use both sets of packages in parallel, using only the new packages -with the `oauth2` package. - - import ( - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - newappengine "google.golang.org/appengine" - newurlfetch "google.golang.org/appengine/urlfetch" - - "appengine" - ) - - func handler(w http.ResponseWriter, r *http.Request) { - var c appengine.Context = appengine.NewContext(r) - c.Infof("Logging a message with the old package") - - var ctx context.Context = newappengine.NewContext(r) - client := &http.Client{ - Transport: &oauth2.Transport{ - Source: google.AppEngineTokenSource(ctx, "scope"), - Base: &newurlfetch.Transport{Context: ctx}, - }, - } - client.Get("...") - } - diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go deleted file mode 100644 index 8962c49d1..000000000 --- a/vendor/golang.org/x/oauth2/client_appengine.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -// App Engine hooks. - -package oauth2 - -import ( - "net/http" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" - "google.golang.org/appengine/urlfetch" -) - -func init() { - internal.RegisterContextClientFunc(contextClientAppEngine) -} - -func contextClientAppEngine(ctx context.Context) (*http.Client, error) { - return urlfetch.Client(ctx), nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go deleted file mode 100644 index dc993efb5..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "sort" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" -) - -// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. -var appengineVM bool - -// Set at init time by appengine_hook.go. If nil, we're not on App Engine. -var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) - -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. -// -// The provided context must have come from appengine.NewContext. -func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - scopes := append([]string{}, scope...) - sort.Strings(scopes) - return &appEngineTokenSource{ - ctx: ctx, - scopes: scopes, - key: strings.Join(scopes, " "), - } -} - -// aeTokens helps the fetched tokens to be reused until their expiration. -var ( - aeTokensMu sync.Mutex - aeTokens = make(map[string]*tokenLock) // key is space-separated scopes -) - -type tokenLock struct { - mu sync.Mutex // guards t; held while fetching or updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx context.Context - scopes []string - key string // to aeTokens map; space-separated scopes -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - if appengineTokenFunc == nil { - panic("google: AppEngineTokenSource can only be used on App Engine.") - } - - aeTokensMu.Lock() - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go deleted file mode 100644 index 4f42c8b34..000000000 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appengine - -package google - -import "google.golang.org/appengine" - -func init() { - appengineTokenFunc = appengine.AccessToken -} diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go deleted file mode 100644 index 633611cc3..000000000 --- a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm - -package google - -import "google.golang.org/appengine" - -func init() { - appengineVM = true - appengineTokenFunc = appengine.AccessToken -} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go deleted file mode 100644 index b95236297..000000000 --- a/vendor/golang.org/x/oauth2/google/default.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" -) - -// DefaultClient returns an HTTP Client that uses the -// DefaultTokenSource to obtain authentication credentials. -// -// This client should be used when developing services -// that run on Google App Engine or Google Compute Engine -// and use "Application Default Credentials." -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { - ts, err := DefaultTokenSource(ctx, scope...) - if err != nil { - return nil, err - } - return oauth2.NewClient(ctx, ts), nil -} - -// DefaultTokenSource is a token source that uses -// "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -// -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -// -func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { - // First, try the environment variable. - const envVar = "GOOGLE_APPLICATION_CREDENTIALS" - if filename := os.Getenv(envVar); filename != "" { - ts, err := tokenSourceFromFile(ctx, filename, scope) - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) - } - return ts, nil - } - - // Second, try a well-known file. - filename := wellKnownFile() - _, err := os.Stat(filename) - if err == nil { - ts, err2 := tokenSourceFromFile(ctx, filename, scope) - if err2 == nil { - return ts, nil - } - err = err2 - } else if os.IsNotExist(err) { - err = nil // ignore this error - } - if err != nil { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) - } - - // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil && !appengineVM { - return AppEngineTokenSource(ctx, scope...), nil - } - - // Fourth, if we're on Google Compute Engine use the metadata server. - if metadata.OnGCE() { - return ComputeTokenSource(""), nil - } - - // None are found; return helpful error. - const url = "https://developers.google.com/accounts/docs/application-default-credentials" - return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) -} - -func wellKnownFile() string { - const f = "application_default_credentials.json" - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) - } - return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) -} - -func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var d struct { - // Common fields - Type string - ClientID string `json:"client_id"` - - // User Credential fields - ClientSecret string `json:"client_secret"` - RefreshToken string `json:"refresh_token"` - - // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - } - if err := json.Unmarshal(b, &d); err != nil { - return nil, err - } - switch d.Type { - case "authorized_user": - cfg := &oauth2.Config{ - ClientID: d.ClientID, - ClientSecret: d.ClientSecret, - Scopes: append([]string{}, scopes...), // copy - Endpoint: Endpoint, - } - tok := &oauth2.Token{RefreshToken: d.RefreshToken} - return cfg.TokenSource(ctx, tok), nil - case "service_account": - cfg := &jwt.Config{ - Email: d.ClientEmail, - PrivateKey: []byte(d.PrivateKey), - Scopes: append([]string{}, scopes...), // copy - TokenURL: JWTTokenURL, - } - return cfg.TokenSource(ctx), nil - case "": - return nil, errors.New("missing 'type' field in credentials") - default: - return nil, fmt.Errorf("unknown credential type: %q", d.Type) - } -} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go deleted file mode 100644 index 2b349fd3d..000000000 --- a/vendor/golang.org/x/oauth2/google/google.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package google provides support for making OAuth2 authorized and -// authenticated HTTP requests to Google APIs. -// It supports the Web server flow, client-side credentials, service accounts, -// Google Compute Engine service accounts, and Google App Engine service -// accounts. -// -// For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -package google - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" -) - -// Endpoint is Google's OAuth 2.0 endpoint. -var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", -} - -// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. -const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" - -// ConfigFromJSON uses a Google Developers Console client_credentials.json -// file to construct a config. -// client_credentials.json can be downloaded from -// https://console.developers.google.com, under "Credentials". Download the Web -// application credentials in the JSON format and provide the contents of the -// file as jsonKey. -func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { - type cred struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - RedirectURIs []string `json:"redirect_uris"` - AuthURI string `json:"auth_uri"` - TokenURI string `json:"token_uri"` - } - var j struct { - Web *cred `json:"web"` - Installed *cred `json:"installed"` - } - if err := json.Unmarshal(jsonKey, &j); err != nil { - return nil, err - } - var c *cred - switch { - case j.Web != nil: - c = j.Web - case j.Installed != nil: - c = j.Installed - default: - return nil, fmt.Errorf("oauth2/google: no credentials found") - } - if len(c.RedirectURIs) < 1 { - return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") - } - return &oauth2.Config{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RedirectURL: c.RedirectURIs[0], - Scopes: scope, - Endpoint: oauth2.Endpoint{ - AuthURL: c.AuthURI, - TokenURL: c.TokenURI, - }, - }, nil -} - -// JWTConfigFromJSON uses a Google Developers service account JSON key file to read -// the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" for your project at -// https://console.developers.google.com to download a JSON key file. -func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { - var key struct { - Email string `json:"client_email"` - PrivateKey string `json:"private_key"` - PrivateKeyID string `json:"private_key_id"` - } - if err := json.Unmarshal(jsonKey, &key); err != nil { - return nil, err - } - config := &jwt.Config{ - Email: key.Email, - PrivateKey: []byte(key.PrivateKey), - PrivateKeyID: key.PrivateKeyID, - Scopes: scope, - TokenURL: JWTTokenURL, - } - return config, nil -} - -// ComputeTokenSource returns a token source that fetches access tokens -// from Google Compute Engine (GCE)'s metadata server. It's only valid to use -// this token source if your program is running on a GCE instance. -// If no account is specified, "default" is used. -// Further information about retrieving access tokens from the GCE metadata -// server can be found at https://cloud.google.com/compute/docs/authentication. -func ComputeTokenSource(account string) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, computeSource{account: account}) -} - -type computeSource struct { - account string -} - -func (cs computeSource) Token() (*oauth2.Token, error) { - if !metadata.OnGCE() { - return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") - } - acct := cs.account - if acct == "" { - acct = "default" - } - tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token") - if err != nil { - return nil, err - } - var res struct { - AccessToken string `json:"access_token"` - ExpiresInSec int `json:"expires_in"` - TokenType string `json:"token_type"` - } - err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) - if err != nil { - return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) - } - if res.ExpiresInSec == 0 || res.AccessToken == "" { - return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") - } - return &oauth2.Token{ - AccessToken: res.AccessToken, - TokenType: res.TokenType, - Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), - }, nil -} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go deleted file mode 100644 index b0fdb3a88..000000000 --- a/vendor/golang.org/x/oauth2/google/jwt.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "crypto/rsa" - "fmt" - "time" - - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON -// key file to read the credentials that authorize and authenticate the -// requests, and returns a TokenSource that does not use any OAuth2 flow but -// instead creates a JWT and sends that as the access token. -// The audience is typically a URL that specifies the scope of the credentials. -// -// Note that this is not a standard OAuth flow, but rather an -// optimization supported by a few Google services. -// Unless you know otherwise, you should use JWTConfigFromJSON instead. -func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { - cfg, err := JWTConfigFromJSON(jsonKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse JSON key: %v", err) - } - pk, err := internal.ParseKey(cfg.PrivateKey) - if err != nil { - return nil, fmt.Errorf("google: could not parse key: %v", err) - } - ts := &jwtAccessTokenSource{ - email: cfg.Email, - audience: audience, - pk: pk, - pkID: cfg.PrivateKeyID, - } - tok, err := ts.Token() - if err != nil { - return nil, err - } - return oauth2.ReuseTokenSource(tok, ts), nil -} - -type jwtAccessTokenSource struct { - email, audience string - pk *rsa.PrivateKey - pkID string -} - -func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { - iat := time.Now() - exp := iat.Add(time.Hour) - cs := &jws.ClaimSet{ - Iss: ts.email, - Sub: ts.email, - Aud: ts.audience, - Iat: iat.Unix(), - Exp: exp.Unix(), - } - hdr := &jws.Header{ - Algorithm: "RS256", - Typ: "JWT", - KeyID: string(ts.pkID), - } - msg, err := jws.Encode(hdr, cs, ts.pk) - if err != nil { - return nil, fmt.Errorf("google: could not encode JWT: %v", err) - } - return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil -} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go deleted file mode 100644 index d29a3bb9b..000000000 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "os" - "os/user" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" -) - -type sdkCredentials struct { - Data []struct { - Credential struct { - ClientID string `json:"client_id"` - ClientSecret string `json:"client_secret"` - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - TokenExpiry *time.Time `json:"token_expiry"` - } `json:"credential"` - Key struct { - Account string `json:"account"` - Scope string `json:"scope"` - } `json:"key"` - } -} - -// An SDKConfig provides access to tokens from an account already -// authorized via the Google Cloud SDK. -type SDKConfig struct { - conf oauth2.Config - initialToken *oauth2.Token -} - -// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK -// account. If account is empty, the account currently active in -// Google Cloud SDK properties is used. -// Google Cloud SDK credentials must be created by running `gcloud auth` -// before using this function. -// The Google Cloud SDK is available at https://cloud.google.com/sdk/. -func NewSDKConfig(account string) (*SDKConfig, error) { - configPath, err := sdkConfigPath() - if err != nil { - return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) - } - credentialsPath := filepath.Join(configPath, "credentials") - f, err := os.Open(credentialsPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) - } - defer f.Close() - - var c sdkCredentials - if err := json.NewDecoder(f).Decode(&c); err != nil { - return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) - } - if len(c.Data) == 0 { - return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) - } - if account == "" { - propertiesPath := filepath.Join(configPath, "properties") - f, err := os.Open(propertiesPath) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) - } - defer f.Close() - ini, err := internal.ParseINI(f) - if err != nil { - return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) - } - core, ok := ini["core"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) - } - active, ok := core["account"] - if !ok { - return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) - } - account = active - } - - for _, d := range c.Data { - if account == "" || d.Key.Account == account { - if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { - return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) - } - var expiry time.Time - if d.Credential.TokenExpiry != nil { - expiry = *d.Credential.TokenExpiry - } - return &SDKConfig{ - conf: oauth2.Config{ - ClientID: d.Credential.ClientID, - ClientSecret: d.Credential.ClientSecret, - Scopes: strings.Split(d.Key.Scope, " "), - Endpoint: Endpoint, - RedirectURL: "oob", - }, - initialToken: &oauth2.Token{ - AccessToken: d.Credential.AccessToken, - RefreshToken: d.Credential.RefreshToken, - Expiry: expiry, - }, - }, nil - } - } - return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) -} - -// Client returns an HTTP client using Google Cloud SDK credentials to -// authorize requests. The token will auto-refresh as necessary. The -// underlying http.RoundTripper will be obtained using the provided -// context. The returned client and its Transport should not be -// modified. -func (c *SDKConfig) Client(ctx context.Context) *http.Client { - return &http.Client{ - Transport: &oauth2.Transport{ - Source: c.TokenSource(ctx), - }, - } -} - -// TokenSource returns an oauth2.TokenSource that retrieve tokens from -// Google Cloud SDK credentials using the provided context. -// It will returns the current access token stored in the credentials, -// and refresh it when it expires, but it won't update the credentials -// with the new access token. -func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { - return c.conf.TokenSource(ctx, c.initialToken) -} - -// Scopes are the OAuth 2.0 scopes the current account is authorized for. -func (c *SDKConfig) Scopes() []string { - return c.conf.Scopes -} - -// sdkConfigPath tries to guess where the gcloud config is located. -// It can be overridden during tests. -var sdkConfigPath = func() (string, error) { - if runtime.GOOS == "windows" { - return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil - } - homeDir := guessUnixHomeDir() - if homeDir == "" { - return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") - } - return filepath.Join(homeDir, ".config", "gcloud"), nil -} - -func guessUnixHomeDir() string { - usr, err := user.Current() - if err == nil { - return usr.HomeDir - } - return os.Getenv("HOME") -} diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go deleted file mode 100644 index fbe1028d6..000000000 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "bufio" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io" - "strings" -) - -// ParseKey converts the binary contents of a private key file -// to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key -// from PEM container before conversion. It only supports PEM -// containers with no passphrase. -func ParseKey(key []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(key) - if block != nil { - key = block.Bytes - } - parsedKey, err := x509.ParsePKCS8PrivateKey(key) - if err != nil { - parsedKey, err = x509.ParsePKCS1PrivateKey(key) - if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) - } - } - parsed, ok := parsedKey.(*rsa.PrivateKey) - if !ok { - return nil, errors.New("private key is invalid") - } - return parsed, nil -} - -func ParseINI(ini io.Reader) (map[string]map[string]string, error) { - result := map[string]map[string]string{ - "": map[string]string{}, // root section - } - scanner := bufio.NewScanner(ini) - currentSection := "" - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, ";") { - // comment. - continue - } - if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { - currentSection = strings.TrimSpace(line[1 : len(line)-1]) - result[currentSection] = map[string]string{} - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) == 2 && parts[0] != "" { - result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) - } - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("error scanning ini: %v", err) - } - return result, nil -} - -func CondVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go deleted file mode 100644 index 18328a0dc..000000000 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" -) - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// This type is a mirror of oauth2.Token and exists to break -// an otherwise-circular dependency. Other internal packages -// should convert this Token into an oauth2.Token before use. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time - - // Raw optionally contains extra metadata from the server - // when updating a token. - Raw interface{} -} - -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number - Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in -} - -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -type expirationTime int32 - -func (e *expirationTime) UnmarshalJSON(b []byte) error { - var n json.Number - err := json.Unmarshal(b, &n) - if err != nil { - return err - } - i, err := n.Int64() - if err != nil { - return err - } - *e = expirationTime(i) - return nil -} - -var brokenAuthHeaderProviders = []string{ - "https://accounts.google.com/", - "https://api.dropbox.com/", - "https://api.dropboxapi.com/", - "https://api.instagram.com/", - "https://api.netatmo.net/", - "https://api.odnoklassniki.ru/", - "https://api.pushbullet.com/", - "https://api.soundcloud.com/", - "https://api.twitch.tv/", - "https://app.box.com/", - "https://connect.stripe.com/", - "https://login.microsoftonline.com/", - "https://login.salesforce.com/", - "https://oauth.sandbox.trainingpeaks.com/", - "https://oauth.trainingpeaks.com/", - "https://oauth.vk.com/", - "https://openapi.baidu.com/", - "https://slack.com/", - "https://test-sandbox.auth.corp.google.com", - "https://test.salesforce.com/", - "https://user.gini.net/", - "https://www.douban.com/", - "https://www.googleapis.com/", - "https://www.linkedin.com/", - "https://www.strava.com/oauth/", - "https://www.wunderlist.com/oauth/", - "https://api.patreon.com/", -} - -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -// - Stripe only accepts client secret in Auth header with Bearer method, not Basic -func providerAuthHeaderWorks(tokenURL string) bool { - for _, s := range brokenAuthHeaderProviders { - if strings.HasPrefix(tokenURL, s) { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true -} - -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { - hc, err := ContextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", clientID) - bustedAuth := !providerAuthHeaderWorks(tokenURL) - if bustedAuth && clientSecret != "" { - v.Set("client_secret", clientSecret) - } - req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth { - req.SetBasicAuth(clientID, clientSecret) - } - r, err := hc.Do(req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - Raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - Raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.Raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go deleted file mode 100644 index f1f173e34..000000000 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package internal contains support packages for oauth2 package. -package internal - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient ContextKey - -// ContextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a ContextKey, being unexported. -type ContextKey struct{} - -// ContextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type ContextClientFunc func(context.Context) (*http.Client, error) - -var contextClientFuncs []ContextClientFunc - -func RegisterContextClientFunc(fn ContextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func ContextClient(ctx context.Context) (*http.Client, error) { - if ctx != nil { - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - } - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - return http.DefaultClient, nil -} - -func ContextTransport(ctx context.Context) http.RoundTripper { - hc, err := ContextClient(ctx) - // This is a rare error case (somebody using nil on App Engine). - if err != nil { - return ErrorTransport{err} - } - return hc.Transport -} - -// ErrorTransport returns the specified error on RoundTrip. -// This RoundTripper should be used in rare error cases where -// error handling can be postponed to response handling time. -type ErrorTransport struct{ Err error } - -func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.Err -} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go deleted file mode 100644 index 745d6535e..000000000 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jws provides encoding and decoding utilities for -// signed JWS messages. -package jws - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strings" - "time" -) - -// ClaimSet contains information about the JWT signature including the -// permissions being requested (scopes), the target of the token, the issuer, -// the time the token was issued, and the lifetime of the token. -type ClaimSet struct { - Iss string `json:"iss"` // email address of the client_id of the application making the access token request - Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests - Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) - Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) - Typ string `json:"typ,omitempty"` // token type (Optional). - - // Email for which the application is requesting delegated access (Optional). - Sub string `json:"sub,omitempty"` - - // The old name of Sub. Client keeps setting Prn to be - // complaint with legacy OAuth 2.0 providers. (Optional) - Prn string `json:"prn,omitempty"` - - // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 - // This array is marshalled using custom code (see (c *ClaimSet) encode()). - PrivateClaims map[string]interface{} `json:"-"` -} - -func (c *ClaimSet) encode() (string, error) { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - if c.Iat == 0 { - c.Iat = now.Unix() - } - if c.Exp == 0 { - c.Exp = now.Add(time.Hour).Unix() - } - if c.Exp < c.Iat { - return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) - } - - b, err := json.Marshal(c) - if err != nil { - return "", err - } - - if len(c.PrivateClaims) == 0 { - return base64Encode(b), nil - } - - // Marshal private claim set and then append it to b. - prv, err := json.Marshal(c.PrivateClaims) - if err != nil { - return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) - } - - // Concatenate public and private claim JSON objects. - if !bytes.HasSuffix(b, []byte{'}'}) { - return "", fmt.Errorf("jws: invalid JSON %s", b) - } - if !bytes.HasPrefix(prv, []byte{'{'}) { - return "", fmt.Errorf("jws: invalid JSON %s", prv) - } - b[len(b)-1] = ',' // Replace closing curly brace with a comma. - b = append(b, prv[1:]...) // Append private claims. - return base64Encode(b), nil -} - -// Header represents the header for the signed JWS payloads. -type Header struct { - // The algorithm used for signature. - Algorithm string `json:"alg"` - - // Represents the token type. - Typ string `json:"typ"` - - // The optional hint of which key is being used. - KeyID string `json:"kid,omitempty"` -} - -func (h *Header) encode() (string, error) { - b, err := json.Marshal(h) - if err != nil { - return "", err - } - return base64Encode(b), nil -} - -// Decode decodes a claim set from a JWS payload. -func Decode(payload string) (*ClaimSet, error) { - // decode returned id token to get expiry - s := strings.Split(payload, ".") - if len(s) < 2 { - // TODO(jbd): Provide more context about the error. - return nil, errors.New("jws: invalid token received") - } - decoded, err := base64Decode(s[1]) - if err != nil { - return nil, err - } - c := &ClaimSet{} - err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) - return c, err -} - -// Signer returns a signature for the given data. -type Signer func(data []byte) (sig []byte, err error) - -// EncodeWithSigner encodes a header and claim set with the provided signer. -func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { - head, err := header.encode() - if err != nil { - return "", err - } - cs, err := c.encode() - if err != nil { - return "", err - } - ss := fmt.Sprintf("%s.%s", head, cs) - sig, err := sg([]byte(ss)) - if err != nil { - return "", err - } - return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil -} - -// Encode encodes a signed JWS with provided header and claim set. -// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. -func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { - sg := func(data []byte) (sig []byte, err error) { - h := sha256.New() - h.Write(data) - return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) - } - return EncodeWithSigner(header, c, sg) -} - -// Verify tests whether the provided JWT token's signature was produced by the private key -// associated with the supplied public key. -func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { - return errors.New("jws: invalid token received, token must have 3 parts") - } - - signedContent := parts[0] + "." + parts[1] - signatureString, err := base64Decode(parts[2]) - if err != nil { - return err - } - - h := sha256.New() - h.Write([]byte(signedContent)) - return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) -} - -// base64Encode returns and Base64url encoded version of the input string with any -// trailing "=" stripped. -func base64Encode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// base64Decode decodes the Base64url encoded string -func base64Decode(s string) ([]byte, error) { - // add back missing padding - switch len(s) % 4 { - case 1: - s += "===" - case 2: - s += "==" - case 3: - s += "=" - } - return base64.URLEncoding.DecodeString(s) -} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go deleted file mode 100644 index f4b9523e6..000000000 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly -// known as "two-legged OAuth 2.0". -// -// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 -package jwt - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/internal" - "golang.org/x/oauth2/jws" -) - -var ( - defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" - defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} -) - -// Config is the configuration for using JWT to fetch tokens, -// commonly known as "two-legged OAuth 2.0". -type Config struct { - // Email is the OAuth client identifier used when communicating with - // the configured OAuth provider. - Email string - - // PrivateKey contains the contents of an RSA private key or the - // contents of a PEM file that contains a private key. The provided - // private key is used to sign JWT payloads. - // PEM containers with a passphrase are not supported. - // Use the following command to convert a PKCS 12 file into a PEM. - // - // $ openssl pkcs12 -in key.p12 -out key.pem -nodes - // - PrivateKey []byte - - // PrivateKeyID contains an optional hint indicating which key is being - // used. - PrivateKeyID string - - // Subject is the optional user to impersonate. - Subject string - - // Scopes optionally specifies a list of requested permission scopes. - Scopes []string - - // TokenURL is the endpoint required to complete the 2-legged JWT flow. - TokenURL string - - // Expires optionally specifies how long the token is valid for. - Expires time.Duration -} - -// TokenSource returns a JWT TokenSource using the configuration -// in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { - return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) -} - -// Client returns an HTTP client wrapping the context's -// HTTP transport and adding Authorization headers with tokens -// obtained from c. -// -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context) *http.Client { - return oauth2.NewClient(ctx, c.TokenSource(ctx)) -} - -// jwtSource is a source that always does a signed JWT request for a token. -// It should typically be wrapped with a reuseTokenSource. -type jwtSource struct { - ctx context.Context - conf *Config -} - -func (js jwtSource) Token() (*oauth2.Token, error) { - pk, err := internal.ParseKey(js.conf.PrivateKey) - if err != nil { - return nil, err - } - hc := oauth2.NewClient(js.ctx, nil) - claimSet := &jws.ClaimSet{ - Iss: js.conf.Email, - Scope: strings.Join(js.conf.Scopes, " "), - Aud: js.conf.TokenURL, - } - if subject := js.conf.Subject; subject != "" { - claimSet.Sub = subject - // prn is the old name of sub. Keep setting it - // to be compatible with legacy OAuth 2.0 providers. - claimSet.Prn = subject - } - if t := js.conf.Expires; t > 0 { - claimSet.Exp = time.Now().Add(t).Unix() - } - payload, err := jws.Encode(defaultHeader, claimSet, pk) - if err != nil { - return nil, err - } - v := url.Values{} - v.Set("grant_type", defaultGrantType) - v.Set("assertion", payload) - resp, err := hc.PostForm(js.conf.TokenURL, v) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if c := resp.StatusCode; c < 200 || c > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body) - } - // tokenRes is the JSON response body. - var tokenRes struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - ExpiresIn int64 `json:"expires_in"` // relative seconds from now - } - if err := json.Unmarshal(body, &tokenRes); err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - token := &oauth2.Token{ - AccessToken: tokenRes.AccessToken, - TokenType: tokenRes.TokenType, - } - raw := make(map[string]interface{}) - json.Unmarshal(body, &raw) // no error checks for optional fields - token = token.WithExtra(raw) - - if secs := tokenRes.ExpiresIn; secs > 0 { - token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) - } - if v := tokenRes.IDToken; v != "" { - // decode returned id token to get expiry - claimSet, err := jws.Decode(v) - if err != nil { - return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) - } - token.Expiry = time.Unix(claimSet.Exp, 0) - } - return token, nil -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go deleted file mode 100644 index 9b7b977da..000000000 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. -// It can additionally grant authorization with Bearer JWT. -package oauth2 - -import ( - "bytes" - "errors" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// NoContext is the default context you should supply if not using -// your own context.Context (see https://golang.org/x/net/context). -var NoContext = context.TODO() - -// RegisterBrokenAuthHeaderProvider registers an OAuth2 server -// identified by the tokenURL prefix as an OAuth2 implementation -// which doesn't support the HTTP Basic authentication -// scheme to authenticate with the authorization server. -// Once a server is registered, credentials (client_id and client_secret) -// will be passed as query parameters rather than being present -// in the Authorization header. -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -func RegisterBrokenAuthHeaderProvider(tokenURL string) { - internal.RegisterBrokenAuthHeaderProvider(tokenURL) -} - -// Config describes a typical 3-legged OAuth2 flow, with both the -// client application information and the server's endpoint URLs. -type Config struct { - // ClientID is the application's ID. - ClientID string - - // ClientSecret is the application's secret. - ClientSecret string - - // Endpoint contains the resource server's token endpoint - // URLs. These are constants specific to each server and are - // often available via site-specific packages, such as - // google.Endpoint or github.Endpoint. - Endpoint Endpoint - - // RedirectURL is the URL to redirect users going through - // the OAuth flow, after the resource owner's URLs. - RedirectURL string - - // Scope specifies optional requested permissions. - Scopes []string -} - -// A TokenSource is anything that can return a token. -type TokenSource interface { - // Token returns a token or an error. - // Token must be safe for concurrent use by multiple goroutines. - // The returned Token must not be modified. - Token() (*Token, error) -} - -// Endpoint contains the OAuth 2.0 provider's authorization and token -// endpoint URLs. -type Endpoint struct { - AuthURL string - TokenURL string -} - -var ( - // AccessTypeOnline and AccessTypeOffline are options passed - // to the Options.AuthCodeURL method. They modify the - // "access_type" field that gets sent in the URL returned by - // AuthCodeURL. - // - // Online is the default if neither is specified. If your - // application needs to refresh access tokens when the user - // is not present at the browser, then use offline. This will - // result in your application obtaining a refresh token the - // first time your application exchanges an authorization - // code for a user. - AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") - AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") - - // ApprovalForce forces the users to view the consent dialog - // and confirm the permissions request at the URL returned - // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") -) - -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) -} - -type setParam struct{ k, v string } - -func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } - -// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters -// to a provider's authorization endpoint. -func SetAuthURLParam(key, value string) AuthCodeOption { - return setParam{key, value} -} - -// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page -// that asks for permissions for the required scopes explicitly. -// -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-zero string and validate that it matches the -// the state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. -// -// Opts may include AccessTypeOnline or AccessTypeOffline, as well -// as ApprovalForce. -func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer - buf.WriteString(c.Endpoint.AuthURL) - v := url.Values{ - "response_type": {"code"}, - "client_id": {c.ClientID}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - "state": internal.CondVal(state), - } - for _, opt := range opts { - opt.setValue(v) - } - if strings.Contains(c.Endpoint.AuthURL, "?") { - buf.WriteByte('&') - } else { - buf.WriteByte('?') - } - buf.WriteString(v.Encode()) - return buf.String() -} - -// PasswordCredentialsToken converts a resource owner username and password -// pair into a token. -// -// Per the RFC, this grant type should only be used "when there is a high -// degree of trust between the resource owner and the client (e.g., the client -// is part of the device operating system or a highly privileged application), -// and when other authorization grant types are not available." -// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. -// -// The HTTP client to use is derived from the context. -// If nil, http.DefaultClient is used. -func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"password"}, - "username": {username}, - "password": {password}, - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Exchange converts an authorization code into a token. -// -// It is used after a resource provider redirects the user back -// to the Redirect URI (the URL obtained from AuthCodeURL). -// -// The HTTP client to use is derived from the context. -// If a client is not provided via the context, http.DefaultClient is used. -// -// The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { - return retrieveToken(ctx, c, url.Values{ - "grant_type": {"authorization_code"}, - "code": {code}, - "redirect_uri": internal.CondVal(c.RedirectURL), - "scope": internal.CondVal(strings.Join(c.Scopes, " ")), - }) -} - -// Client returns an HTTP client using the provided token. -// The token will auto-refresh as necessary. The underlying -// HTTP transport will be obtained using the provided context. -// The returned client and its Transport should not be modified. -func (c *Config) Client(ctx context.Context, t *Token) *http.Client { - return NewClient(ctx, c.TokenSource(ctx, t)) -} - -// TokenSource returns a TokenSource that returns t until t expires, -// automatically refreshing it as necessary using the provided context. -// -// Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { - tkr := &tokenRefresher{ - ctx: ctx, - conf: c, - } - if t != nil { - tkr.refreshToken = t.RefreshToken - } - return &reuseTokenSource{ - t: t, - new: tkr, - } -} - -// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" -// HTTP requests to renew a token using a RefreshToken. -type tokenRefresher struct { - ctx context.Context // used to get HTTP requests - conf *Config - refreshToken string -} - -// WARNING: Token is not safe for concurrent access, as it -// updates the tokenRefresher's refreshToken field. -// Within this package, it is used by reuseTokenSource which -// synchronizes calls to this method with its own mutex. -func (tf *tokenRefresher) Token() (*Token, error) { - if tf.refreshToken == "" { - return nil, errors.New("oauth2: token expired and refresh token is not set") - } - - tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ - "grant_type": {"refresh_token"}, - "refresh_token": {tf.refreshToken}, - }) - - if err != nil { - return nil, err - } - if tf.refreshToken != tk.RefreshToken { - tf.refreshToken = tk.RefreshToken - } - return tk, err -} - -// reuseTokenSource is a TokenSource that holds a single token in memory -// and validates its expiry before each call to retrieve it with -// Token. If it's expired, it will be auto-refreshed using the -// new TokenSource. -type reuseTokenSource struct { - new TokenSource // called when t is expired. - - mu sync.Mutex // guards t - t *Token -} - -// Token returns the current token if it's still valid, else will -// refresh the current token (using r.Context for HTTP client -// information) and return the new one. -func (s *reuseTokenSource) Token() (*Token, error) { - s.mu.Lock() - defer s.mu.Unlock() - if s.t.Valid() { - return s.t, nil - } - t, err := s.new.Token() - if err != nil { - return nil, err - } - s.t = t - return t, nil -} - -// StaticTokenSource returns a TokenSource that always returns the same token. -// Because the provided token t is never refreshed, StaticTokenSource is only -// useful for tokens that never expire. -func StaticTokenSource(t *Token) TokenSource { - return staticTokenSource{t} -} - -// staticTokenSource is a TokenSource that always returns the same Token. -type staticTokenSource struct { - t *Token -} - -func (s staticTokenSource) Token() (*Token, error) { - return s.t, nil -} - -// HTTPClient is the context key to use with golang.org/x/net/context's -// WithValue function to associate an *http.Client value with a context. -var HTTPClient internal.ContextKey - -// NewClient creates an *http.Client from a Context and TokenSource. -// The returned client is not valid beyond the lifetime of the context. -// -// As a special case, if src is nil, a non-OAuth2 client is returned -// using the provided context. This exists to support related OAuth2 -// packages. -func NewClient(ctx context.Context, src TokenSource) *http.Client { - if src == nil { - c, err := internal.ContextClient(ctx) - if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} - } - return c - } - return &http.Client{ - Transport: &Transport{ - Base: internal.ContextTransport(ctx), - Source: ReuseTokenSource(nil, src), - }, - } -} - -// ReuseTokenSource returns a TokenSource which repeatedly returns the -// same token as long as it's valid, starting with t. -// When its cached token is invalid, a new token is obtained from src. -// -// ReuseTokenSource is typically used to reuse tokens from a cache -// (such as a file on disk) between runs of a program, rather than -// obtaining new tokens unnecessarily. -// -// The initial token t may be nil, in which case the TokenSource is -// wrapped in a caching version if it isn't one already. This also -// means it's always safe to wrap ReuseTokenSource around any other -// TokenSource without adverse effects. -func ReuseTokenSource(t *Token, src TokenSource) TokenSource { - // Don't wrap a reuseTokenSource in itself. That would work, - // but cause an unnecessary number of mutex operations. - // Just build the equivalent one. - if rt, ok := src.(*reuseTokenSource); ok { - if t == nil { - // Just use it directly. - return rt - } - src = rt.new - } - return &reuseTokenSource{ - t: t, - new: src, - } -} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go deleted file mode 100644 index 7a3167f15..000000000 --- a/vendor/golang.org/x/oauth2/token.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "golang.org/x/net/context" - "golang.org/x/oauth2/internal" -) - -// expiryDelta determines how earlier a token should be considered -// expired than its actual expiration time. It is used to avoid late -// expirations due to client-server time mismatches. -const expiryDelta = 10 * time.Second - -// Token represents the crendentials used to authorize -// the requests to access protected resources on the OAuth 2.0 -// provider's backend. -// -// Most users of this package should not access fields of Token -// directly. They're exported mostly for use by related packages -// implementing derivative OAuth2 flows. -type Token struct { - // AccessToken is the token that authorizes and authenticates - // the requests. - AccessToken string `json:"access_token"` - - // TokenType is the type of token. - // The Type method returns either this or "Bearer", the default. - TokenType string `json:"token_type,omitempty"` - - // RefreshToken is a token that's used by the application - // (as opposed to the user) to refresh the access token - // if it expires. - RefreshToken string `json:"refresh_token,omitempty"` - - // Expiry is the optional expiration time of the access token. - // - // If zero, TokenSource implementations will reuse the same - // token forever and RefreshToken or equivalent - // mechanisms for that TokenSource will not be used. - Expiry time.Time `json:"expiry,omitempty"` - - // raw optionally contains extra metadata from the server - // when updating a token. - raw interface{} -} - -// Type returns t.TokenType if non-empty, else "Bearer". -func (t *Token) Type() string { - if strings.EqualFold(t.TokenType, "bearer") { - return "Bearer" - } - if strings.EqualFold(t.TokenType, "mac") { - return "MAC" - } - if strings.EqualFold(t.TokenType, "basic") { - return "Basic" - } - if t.TokenType != "" { - return t.TokenType - } - return "Bearer" -} - -// SetAuthHeader sets the Authorization header to r using the access -// token in t. -// -// This method is unnecessary when using Transport or an HTTP Client -// returned by this package. -func (t *Token) SetAuthHeader(r *http.Request) { - r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) -} - -// WithExtra returns a new Token that's a clone of t, but using the -// provided raw extra map. This is only intended for use by packages -// implementing derivative OAuth2 flows. -func (t *Token) WithExtra(extra interface{}) *Token { - t2 := new(Token) - *t2 = *t - t2.raw = extra - return t2 -} - -// Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a -// part of the token retrieval response. -func (t *Token) Extra(key string) interface{} { - if raw, ok := t.raw.(map[string]interface{}); ok { - return raw[key] - } - - vals, ok := t.raw.(url.Values) - if !ok { - return nil - } - - v := vals.Get(key) - switch s := strings.TrimSpace(v); strings.Count(s, ".") { - case 0: // Contains no "."; try to parse as int - if i, err := strconv.ParseInt(s, 10, 64); err == nil { - return i - } - case 1: // Contains a single "."; try to parse as float - if f, err := strconv.ParseFloat(s, 64); err == nil { - return f - } - } - - return v -} - -// expired reports whether the token is expired. -// t must be non-nil. -func (t *Token) expired() bool { - if t.Expiry.IsZero() { - return false - } - return t.Expiry.Add(-expiryDelta).Before(time.Now()) -} - -// Valid reports whether t is non-nil, has an AccessToken, and is not expired. -func (t *Token) Valid() bool { - return t != nil && t.AccessToken != "" && !t.expired() -} - -// tokenFromInternal maps an *internal.Token struct into -// a *Token struct. -func tokenFromInternal(t *internal.Token) *Token { - if t == nil { - return nil - } - return &Token{ - AccessToken: t.AccessToken, - TokenType: t.TokenType, - RefreshToken: t.RefreshToken, - Expiry: t.Expiry, - raw: t.Raw, - } -} - -// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. -// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along -// with an error.. -func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) - if err != nil { - return nil, err - } - return tokenFromInternal(tk), nil -} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go deleted file mode 100644 index 92ac7e253..000000000 --- a/vendor/golang.org/x/oauth2/transport.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package oauth2 - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, -// wrapping a base RoundTripper and adding an Authorization header -// with a token from the supplied Sources. -// -// Transport is a low-level mechanism. Most code will use the -// higher-level Config.Client method instead. -type Transport struct { - // Source supplies the token to add to outgoing requests' - // Authorization headers. - Source TokenSource - - // Base is the base RoundTripper used to make HTTP requests. - // If nil, http.DefaultTransport is used. - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - if t.Source == nil { - return nil, errors.New("oauth2: Transport's Source is nil") - } - token, err := t.Source.Token() - if err != nil { - return nil, err - } - - req2 := cloneRequest(req) // per RoundTripper contract - token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *Transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/AUTHORS b/vendor/golang.org/x/text/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/text/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/text/CONTRIBUTORS b/vendor/golang.org/x/text/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/text/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/text/cases/cases.go b/vendor/golang.org/x/text/cases/cases.go new file mode 100644 index 000000000..4155e63b6 --- /dev/null +++ b/vendor/golang.org/x/text/cases/cases.go @@ -0,0 +1,134 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go + +// Package cases provides general and language-specific case mappers. +package cases + +import ( + "golang.org/x/text/language" + "golang.org/x/text/transform" +) + +// References: +// - Unicode Reference Manual Chapter 3.13, 4.2, and 5.18. +// - http://www.unicode.org/reports/tr29/ +// - http://www.unicode.org/Public/6.3.0/ucd/CaseFolding.txt +// - http://www.unicode.org/Public/6.3.0/ucd/SpecialCasing.txt +// - http://www.unicode.org/Public/6.3.0/ucd/DerivedCoreProperties.txt +// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakProperty.txt +// - http://www.unicode.org/Public/6.3.0/ucd/auxiliary/WordBreakTest.txt +// - http://userguide.icu-project.org/transforms/casemappings + +// TODO: +// - Case folding +// - Wide and Narrow? +// - Segmenter option for title casing. +// - ASCII fast paths +// - Encode Soft-Dotted property within trie somehow. + +// A Caser transforms given input to a certain case. It implements +// transform.Transformer. +// +// A Caser may be stateful and should therefore not be shared between +// goroutines. +type Caser struct { + t transform.SpanningTransformer +} + +// Bytes returns a new byte slice with the result of converting b to the case +// form implemented by c. +func (c Caser) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(c.t, b) + return b +} + +// String returns a string with the result of transforming s to the case form +// implemented by c. +func (c Caser) String(s string) string { + s, _, _ = transform.String(c.t, s) + return s +} + +// Reset resets the Caser to be reused for new input after a previous call to +// Transform. +func (c Caser) Reset() { c.t.Reset() } + +// Transform implements the transform.Transformer interface and transforms the +// given input to the case form implemented by c. +func (c Caser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return c.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (c Caser) Span(src []byte, atEOF bool) (n int, err error) { + return c.t.Span(src, atEOF) +} + +// Upper returns a Caser for language-specific uppercasing. +func Upper(t language.Tag, opts ...Option) Caser { + return Caser{makeUpper(t, getOpts(opts...))} +} + +// Lower returns a Caser for language-specific lowercasing. +func Lower(t language.Tag, opts ...Option) Caser { + return Caser{makeLower(t, getOpts(opts...))} +} + +// Title returns a Caser for language-specific title casing. It uses an +// approximation of the default Unicode Word Break algorithm. +func Title(t language.Tag, opts ...Option) Caser { + return Caser{makeTitle(t, getOpts(opts...))} +} + +// Fold returns a Caser that implements Unicode case folding. The returned Caser +// is stateless and safe to use concurrently by multiple goroutines. +// +// Case folding does not normalize the input and may not preserve a normal form. +// Use the collate or search package for more convenient and linguistically +// sound comparisons. Use unicode/precis for string comparisons where security +// aspects are a concern. +func Fold(opts ...Option) Caser { + return Caser{makeFold(getOpts(opts...))} +} + +// An Option is used to modify the behavior of a Caser. +type Option func(o *options) + +var ( + // NoLower disables the lowercasing of non-leading letters for a title + // caser. + NoLower Option = noLower + + // Compact omits mappings in case folding for characters that would grow the + // input. (Unimplemented.) + Compact Option = compact +) + +// TODO: option to preserve a normal form, if applicable? + +type options struct { + noLower bool + simple bool + + // TODO: segmenter, max ignorable, alternative versions, etc. + + noFinalSigma bool // Only used for testing. +} + +func getOpts(o ...Option) (res options) { + for _, f := range o { + f(&res) + } + return +} + +func noLower(o *options) { + o.noLower = true +} + +func compact(o *options) { + o.simple = true +} diff --git a/vendor/golang.org/x/text/cases/context.go b/vendor/golang.org/x/text/cases/context.go new file mode 100644 index 000000000..e9aa9e193 --- /dev/null +++ b/vendor/golang.org/x/text/cases/context.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +// A context is used for iterating over source bytes, fetching case info and +// writing to a destination buffer. +// +// Casing operations may need more than one rune of context to decide how a rune +// should be cased. Casing implementations should call checkpoint on context +// whenever it is known to be safe to return the runes processed so far. +// +// It is recommended for implementations to not allow for more than 30 case +// ignorables as lookahead (analogous to the limit in norm) and to use state if +// unbounded lookahead is needed for cased runes. +type context struct { + dst, src []byte + atEOF bool + + pDst int // pDst points past the last written rune in dst. + pSrc int // pSrc points to the start of the currently scanned rune. + + // checkpoints safe to return in Transform, where nDst <= pDst and nSrc <= pSrc. + nDst, nSrc int + err error + + sz int // size of current rune + info info // case information of currently scanned rune + + // State preserved across calls to Transform. + isMidWord bool // false if next cased letter needs to be title-cased. +} + +func (c *context) Reset() { + c.isMidWord = false +} + +// ret returns the return values for the Transform method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) ret() (nDst, nSrc int, err error) { + if c.err != nil || c.nSrc == len(c.src) { + return c.nDst, c.nSrc, c.err + } + // This point is only reached by mappers if there was no short destination + // buffer. This means that the source buffer was exhausted and that c.sz was + // set to 0 by next. + if c.atEOF && c.pSrc == len(c.src) { + return c.pDst, c.pSrc, nil + } + return c.nDst, c.nSrc, transform.ErrShortSrc +} + +// retSpan returns the return values for the Span method. It checks whether +// there were insufficient bytes in src to complete and introduces an error +// accordingly, if necessary. +func (c *context) retSpan() (n int, err error) { + _, nSrc, err := c.ret() + return nSrc, err +} + +// checkpoint sets the return value buffer points for Transform to the current +// positions. +func (c *context) checkpoint() { + if c.err == nil { + c.nDst, c.nSrc = c.pDst, c.pSrc+c.sz + } +} + +// unreadRune causes the last rune read by next to be reread on the next +// invocation of next. Only one unreadRune may be called after a call to next. +func (c *context) unreadRune() { + c.sz = 0 +} + +func (c *context) next() bool { + c.pSrc += c.sz + if c.pSrc == len(c.src) || c.err != nil { + c.info, c.sz = 0, 0 + return false + } + v, sz := trie.lookup(c.src[c.pSrc:]) + c.info, c.sz = info(v), sz + if c.sz == 0 { + if c.atEOF { + // A zero size means we have an incomplete rune. If we are atEOF, + // this means it is an illegal rune, which we will consume one + // byte at a time. + c.sz = 1 + } else { + c.err = transform.ErrShortSrc + return false + } + } + return true +} + +// writeBytes adds bytes to dst. +func (c *context) writeBytes(b []byte) bool { + if len(c.dst)-c.pDst < len(b) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for _, ch := range b { + c.dst[c.pDst] = ch + c.pDst++ + } + return true +} + +// writeString writes the given string to dst. +func (c *context) writeString(s string) bool { + if len(c.dst)-c.pDst < len(s) { + c.err = transform.ErrShortDst + return false + } + // This loop is faster than using copy. + for i := 0; i < len(s); i++ { + c.dst[c.pDst] = s[i] + c.pDst++ + } + return true +} + +// copy writes the current rune to dst. +func (c *context) copy() bool { + return c.writeBytes(c.src[c.pSrc : c.pSrc+c.sz]) +} + +// copyXOR copies the current rune to dst and modifies it by applying the XOR +// pattern of the case info. It is the responsibility of the caller to ensure +// that this is a rune with a XOR pattern defined. +func (c *context) copyXOR() bool { + if !c.copy() { + return false + } + if c.info&xorIndexBit == 0 { + // Fast path for 6-bit XOR pattern, which covers most cases. + c.dst[c.pDst-1] ^= byte(c.info >> xorShift) + } else { + // Interpret XOR bits as an index. + // TODO: test performance for unrolling this loop. Verify that we have + // at least two bytes and at most three. + idx := c.info >> xorShift + for p := c.pDst - 1; ; p-- { + c.dst[p] ^= xorData[idx] + idx-- + if xorData[idx] == 0 { + break + } + } + } + return true +} + +// hasPrefix returns true if src[pSrc:] starts with the given string. +func (c *context) hasPrefix(s string) bool { + b := c.src[c.pSrc:] + if len(b) < len(s) { + return false + } + for i, c := range b[:len(s)] { + if c != s[i] { + return false + } + } + return true +} + +// caseType returns an info with only the case bits, normalized to either +// cLower, cUpper, cTitle or cUncased. +func (c *context) caseType() info { + cm := c.info & 0x7 + if cm < 4 { + return cm + } + if cm >= cXORCase { + // xor the last bit of the rune with the case type bits. + b := c.src[c.pSrc+c.sz-1] + return info(b&1) ^ cm&0x3 + } + if cm == cIgnorableCased { + return cLower + } + return cUncased +} + +// lower writes the lowercase version of the current rune to dst. +func lower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + return c.writeString(e[offset : offset+nLower]) + } + return c.copy() +} + +func isLower(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cLower { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + if nLower := (e[1] >> lengthBits) & lengthMask; nLower != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// upper writes the uppercase version of the current rune to dst. +func upper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return c.copy() + } + if c.info&exceptionBit == 0 { + return c.copyXOR() + } + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + // The first special case mapping is for lower. Set n to the second. + if n == noChange { + n = 0 + } + n, e = e[1]&lengthMask, e[n:] + } + if n != noChange { + return c.writeString(e[offset : offset+n]) + } + return c.copy() +} + +// isUpper writes the isUppercase version of the current rune to dst. +func isUpper(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cUpper { + return true + } + if c.info&exceptionBit == 0 { + c.err = transform.ErrEndOfSpan + return false + } + e := exceptions[c.info>>exceptionShift:] + // Get length of first special case mapping. + n := (e[1] >> lengthBits) & lengthMask + if ct == cTitle { + n = e[1] & lengthMask + } + if n != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// title writes the title case version of the current rune to dst. +func title(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return c.copy() + } + if c.info&exceptionBit == 0 { + if ct == cLower { + return c.copyXOR() + } + return c.copy() + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + offset := 2 + e[0]&lengthMask // size of header + fold string + + nFirst := (e[1] >> lengthBits) & lengthMask + if nTitle := e[1] & lengthMask; nTitle != noChange { + if nFirst != noChange { + e = e[nFirst:] + } + return c.writeString(e[offset : offset+nTitle]) + } + if ct == cLower && nFirst != noChange { + // Use the uppercase version instead. + return c.writeString(e[offset : offset+nFirst]) + } + // Already in correct case. + return c.copy() +} + +// isTitle reports whether the current rune is in title case. +func isTitle(c *context) bool { + ct := c.caseType() + if c.info&hasMappingMask == 0 || ct == cTitle { + return true + } + if c.info&exceptionBit == 0 { + if ct == cLower { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + // Get the exception data. + e := exceptions[c.info>>exceptionShift:] + if nTitle := e[1] & lengthMask; nTitle != noChange { + c.err = transform.ErrEndOfSpan + return false + } + nFirst := (e[1] >> lengthBits) & lengthMask + if ct == cLower && nFirst != noChange { + c.err = transform.ErrEndOfSpan + return false + } + return true +} + +// foldFull writes the foldFull version of the current rune to dst. +func foldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return c.copy() + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + return c.copyXOR() + } + return c.copy() + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 { + if ct == cLower { + return c.copy() + } + n = (e[1] >> lengthBits) & lengthMask + } + return c.writeString(e[2 : 2+n]) +} + +// isFoldFull reports whether the current run is mapped to foldFull +func isFoldFull(c *context) bool { + if c.info&hasMappingMask == 0 { + return true + } + ct := c.caseType() + if c.info&exceptionBit == 0 { + if ct != cLower || c.info&inverseFoldBit != 0 { + c.err = transform.ErrEndOfSpan + return false + } + return true + } + e := exceptions[c.info>>exceptionShift:] + n := e[0] & lengthMask + if n == 0 && ct == cLower { + return true + } + c.err = transform.ErrEndOfSpan + return false +} diff --git a/vendor/golang.org/x/text/cases/fold.go b/vendor/golang.org/x/text/cases/fold.go new file mode 100644 index 000000000..85cc434fa --- /dev/null +++ b/vendor/golang.org/x/text/cases/fold.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +import "golang.org/x/text/transform" + +type caseFolder struct{ transform.NopResetter } + +// caseFolder implements the Transformer interface for doing case folding. +func (t *caseFolder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + foldFull(&c) + c.checkpoint() + } + return c.ret() +} + +func (t *caseFolder) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isFoldFull(&c) { + c.checkpoint() + } + return c.retSpan() +} + +func makeFold(o options) transform.SpanningTransformer { + // TODO: Special case folding, through option Language, Special/Turkic, or + // both. + // TODO: Implement Compact options. + return &caseFolder{} +} diff --git a/vendor/golang.org/x/text/cases/gen.go b/vendor/golang.org/x/text/cases/gen.go new file mode 100644 index 000000000..eb399baa7 --- /dev/null +++ b/vendor/golang.org/x/text/cases/gen.go @@ -0,0 +1,839 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates the trie for casing operations. The Unicode casing +// algorithm requires the lookup of various properties and mappings for each +// rune. The table generated by this generator combines several of the most +// frequently used of these into a single trie so that they can be accessed +// with a single lookup. +package main + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "reflect" + "strconv" + "strings" + "unicode" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/norm" +) + +func main() { + gen.Init() + genTables() + genTablesTest() + gen.Repackage("gen_trieval.go", "trieval.go", "cases") +} + +// runeInfo contains all information for a rune that we care about for casing +// operations. +type runeInfo struct { + Rune rune + + entry info // trie value for this rune. + + CaseMode info + + // Simple case mappings. + Simple [1 + maxCaseMode][]rune + + // Special casing + HasSpecial bool + Conditional bool + Special [1 + maxCaseMode][]rune + + // Folding + FoldSimple rune + FoldSpecial rune + FoldFull []rune + + // TODO: FC_NFKC, or equivalent data. + + // Properties + SoftDotted bool + CaseIgnorable bool + Cased bool + DecomposeGreek bool + BreakType string + BreakCat breakCategory + + // We care mostly about 0, Above, and IotaSubscript. + CCC byte +} + +type breakCategory int + +const ( + breakBreak breakCategory = iota + breakLetter + breakMid +) + +// mapping returns the case mapping for the given case type. +func (r *runeInfo) mapping(c info) string { + if r.HasSpecial { + return string(r.Special[c]) + } + if len(r.Simple[c]) != 0 { + return string(r.Simple[c]) + } + return string(r.Rune) +} + +func parse(file string, f func(p *ucd.Parser)) { + ucd.Parse(gen.OpenUCDFile(file), f) +} + +func parseUCD() []runeInfo { + chars := make([]runeInfo, unicode.MaxRune) + + get := func(r rune) *runeInfo { + c := &chars[r] + c.Rune = r + return c + } + + parse("UnicodeData.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + ri.CCC = byte(p.Int(ucd.CanonicalCombiningClass)) + ri.Simple[cLower] = p.Runes(ucd.SimpleLowercaseMapping) + ri.Simple[cUpper] = p.Runes(ucd.SimpleUppercaseMapping) + ri.Simple[cTitle] = p.Runes(ucd.SimpleTitlecaseMapping) + if p.String(ucd.GeneralCategory) == "Lt" { + ri.CaseMode = cTitle + } + }) + + // ; + parse("PropList.txt", func(p *ucd.Parser) { + if p.String(1) == "Soft_Dotted" { + chars[p.Rune(0)].SoftDotted = true + } + }) + + // ; + parse("DerivedCoreProperties.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + switch p.String(1) { + case "Case_Ignorable": + ri.CaseIgnorable = true + case "Cased": + ri.Cased = true + case "Lowercase": + ri.CaseMode = cLower + case "Uppercase": + ri.CaseMode = cUpper + } + }) + + // ; ; ; <upper> ; (<condition_list> ;)? + parse("SpecialCasing.txt", func(p *ucd.Parser) { + // We drop all conditional special casing and deal with them manually in + // the language-specific case mappers. Rune 0x03A3 is the only one with + // a conditional formatting that is not language-specific. However, + // dealing with this letter is tricky, especially in a streaming + // context, so we deal with it in the Caser for Greek specifically. + ri := get(p.Rune(0)) + if p.String(4) == "" { + ri.HasSpecial = true + ri.Special[cLower] = p.Runes(1) + ri.Special[cTitle] = p.Runes(2) + ri.Special[cUpper] = p.Runes(3) + } else { + ri.Conditional = true + } + }) + + // TODO: Use text breaking according to UAX #29. + // <code>; <word break type> + parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + ri.BreakType = p.String(1) + + // We collapse the word breaking properties onto the categories we need. + switch p.String(1) { // TODO: officially we need to canonicalize. + case "MidLetter", "MidNumLet", "Single_Quote": + ri.BreakCat = breakMid + if !ri.CaseIgnorable { + // finalSigma relies on the fact that all breakMid runes are + // also a Case_Ignorable. Revisit this code when this changes. + log.Fatalf("Rune %U, which has a break category mid, is not a case ignorable", ri) + } + case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet", "Format", "ZWJ": + ri.BreakCat = breakLetter + } + }) + + // <code>; <type>; <mapping> + parse("CaseFolding.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + switch p.String(1) { + case "C": + ri.FoldSimple = p.Rune(2) + ri.FoldFull = p.Runes(2) + case "S": + ri.FoldSimple = p.Rune(2) + case "T": + ri.FoldSpecial = p.Rune(2) + case "F": + ri.FoldFull = p.Runes(2) + default: + log.Fatalf("%U: unknown type: %s", p.Rune(0), p.String(1)) + } + }) + + return chars +} + +func genTables() { + chars := parseUCD() + verifyProperties(chars) + + t := triegen.NewTrie("case") + for i := range chars { + c := &chars[i] + makeEntry(c) + t.Insert(rune(i), uint64(c.entry)) + } + + w := gen.NewCodeWriter() + defer w.WriteGoFile("tables.go", "cases") + + gen.WriteUnicodeVersion(w) + + // TODO: write CLDR version after adding a mechanism to detect that the + // tables on which the manually created locale-sensitive casing code is + // based hasn't changed. + + w.WriteVar("xorData", string(xorData)) + w.WriteVar("exceptions", string(exceptionData)) + + sz, err := t.Gen(w, triegen.Compact(&sparseCompacter{})) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} + +func makeEntry(ri *runeInfo) { + if ri.CaseIgnorable { + if ri.Cased { + ri.entry = cIgnorableCased + } else { + ri.entry = cIgnorableUncased + } + } else { + ri.entry = ri.CaseMode + } + + // TODO: handle soft-dotted. + + ccc := cccOther + switch ri.CCC { + case 0: // Not_Reordered + ccc = cccZero + case above: // Above + ccc = cccAbove + } + switch ri.BreakCat { + case breakBreak: + ccc = cccBreak + case breakMid: + ri.entry |= isMidBit + } + + ri.entry |= ccc + + if ri.CaseMode == cUncased { + return + } + + // Need to do something special. + if ri.CaseMode == cTitle || ri.HasSpecial || ri.mapping(cTitle) != ri.mapping(cUpper) { + makeException(ri) + return + } + if f := string(ri.FoldFull); len(f) > 0 && f != ri.mapping(cUpper) && f != ri.mapping(cLower) { + makeException(ri) + return + } + + // Rune is either lowercase or uppercase. + + orig := string(ri.Rune) + mapped := "" + if ri.CaseMode == cUpper { + mapped = ri.mapping(cLower) + } else { + mapped = ri.mapping(cUpper) + } + + if len(orig) != len(mapped) { + makeException(ri) + return + } + + if string(ri.FoldFull) == ri.mapping(cUpper) { + ri.entry |= inverseFoldBit + } + + n := len(orig) + + // Create per-byte XOR mask. + var b []byte + for i := 0; i < n; i++ { + b = append(b, orig[i]^mapped[i]) + } + + // Remove leading 0 bytes, but keep at least one byte. + for ; len(b) > 1 && b[0] == 0; b = b[1:] { + } + + if len(b) == 1 && b[0]&0xc0 == 0 { + ri.entry |= info(b[0]) << xorShift + return + } + + key := string(b) + x, ok := xorCache[key] + if !ok { + xorData = append(xorData, 0) // for detecting start of sequence + xorData = append(xorData, b...) + + x = len(xorData) - 1 + xorCache[key] = x + } + ri.entry |= info(x<<xorShift) | xorIndexBit +} + +var xorCache = map[string]int{} + +// xorData contains byte-wise XOR data for the least significant bytes of a +// UTF-8 encoded rune. An index points to the last byte. The sequence starts +// with a zero terminator. +var xorData = []byte{} + +// See the comments in gen_trieval.go re "the exceptions slice". +var exceptionData = []byte{0} + +// makeException encodes case mappings that cannot be expressed in a simple +// XOR diff. +func makeException(ri *runeInfo) { + ccc := ri.entry & cccMask + // Set exception bit and retain case type. + ri.entry &= 0x0007 + ri.entry |= exceptionBit + + if len(exceptionData) >= 1<<numExceptionBits { + log.Fatalf("%U:exceptionData too large %x > %d bits", ri.Rune, len(exceptionData), numExceptionBits) + } + + // Set the offset in the exceptionData array. + ri.entry |= info(len(exceptionData) << exceptionShift) + + orig := string(ri.Rune) + tc := ri.mapping(cTitle) + uc := ri.mapping(cUpper) + lc := ri.mapping(cLower) + ff := string(ri.FoldFull) + + // addString sets the length of a string and adds it to the expansions array. + addString := func(s string, b *byte) { + if len(s) == 0 { + // Zero-length mappings exist, but only for conditional casing, + // which we are representing outside of this table. + log.Fatalf("%U: has zero-length mapping.", ri.Rune) + } + *b <<= 3 + if s != orig { + n := len(s) + if n > 7 { + log.Fatalf("%U: mapping larger than 7 (%d)", ri.Rune, n) + } + *b |= byte(n) + exceptionData = append(exceptionData, s...) + } + } + + // byte 0: + exceptionData = append(exceptionData, byte(ccc)|byte(len(ff))) + + // byte 1: + p := len(exceptionData) + exceptionData = append(exceptionData, 0) + + if len(ff) > 7 { // May be zero-length. + log.Fatalf("%U: fold string larger than 7 (%d)", ri.Rune, len(ff)) + } + exceptionData = append(exceptionData, ff...) + ct := ri.CaseMode + if ct != cLower { + addString(lc, &exceptionData[p]) + } + if ct != cUpper { + addString(uc, &exceptionData[p]) + } + if ct != cTitle { + // If title is the same as upper, we set it to the original string so + // that it will be marked as not present. This implies title case is + // the same as upper case. + if tc == uc { + tc = orig + } + addString(tc, &exceptionData[p]) + } +} + +// sparseCompacter is a trie value block Compacter. There are many cases where +// successive runes alternate between lower- and upper-case. This Compacter +// exploits this by adding a special case type where the case value is obtained +// from or-ing it with the least-significant bit of the rune, creating large +// ranges of equal case values that compress well. +type sparseCompacter struct { + sparseBlocks [][]uint16 + sparseOffsets []uint16 + sparseCount int +} + +// makeSparse returns the number of elements that compact block would contain +// as well as the modified values. +func makeSparse(vals []uint64) ([]uint16, int) { + // Copy the values. + values := make([]uint16, len(vals)) + for i, v := range vals { + values[i] = uint16(v) + } + + alt := func(i int, v uint16) uint16 { + if cm := info(v & fullCasedMask); cm == cUpper || cm == cLower { + // Convert cLower or cUpper to cXORCase value, which has the form 11x. + xor := v + xor &^= 1 + xor |= uint16(i&1) ^ (v & 1) + xor |= 0x4 + return xor + } + return v + } + + var count int + var previous uint16 + for i, v := range values { + if v != 0 { + // Try if the unmodified value is equal to the previous. + if v == previous { + continue + } + + // Try if the xor-ed value is equal to the previous value. + a := alt(i, v) + if a == previous { + values[i] = a + continue + } + + // This is a new value. + count++ + + // Use the xor-ed value if it will be identical to the next value. + if p := i + 1; p < len(values) && alt(p, values[p]) == a { + values[i] = a + v = a + } + } + previous = v + } + return values, count +} + +func (s *sparseCompacter) Size(v []uint64) (int, bool) { + _, n := makeSparse(v) + + // We limit using this method to having 16 entries. + if n > 16 { + return 0, false + } + + return 2 + int(reflect.TypeOf(valueRange{}).Size())*n, true +} + +func (s *sparseCompacter) Store(v []uint64) uint32 { + h := uint32(len(s.sparseOffsets)) + values, sz := makeSparse(v) + s.sparseBlocks = append(s.sparseBlocks, values) + s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount)) + s.sparseCount += sz + return h +} + +func (s *sparseCompacter) Handler() string { + // The sparse global variable and its lookup method is defined in gen_trieval.go. + return "sparse.lookup" +} + +func (s *sparseCompacter) Print(w io.Writer) (retErr error) { + p := func(format string, args ...interface{}) { + _, err := fmt.Fprintf(w, format, args...) + if retErr == nil && err != nil { + retErr = err + } + } + + ls := len(s.sparseBlocks) + if ls == len(s.sparseOffsets) { + s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount)) + } + p("// sparseOffsets: %d entries, %d bytes\n", ls+1, (ls+1)*2) + p("var sparseOffsets = %#v\n\n", s.sparseOffsets) + + ns := s.sparseCount + p("// sparseValues: %d entries, %d bytes\n", ns, ns*4) + p("var sparseValues = [%d]valueRange {", ns) + for i, values := range s.sparseBlocks { + p("\n// Block %#x, offset %#x", i, s.sparseOffsets[i]) + var v uint16 + for i, nv := range values { + if nv != v { + if v != 0 { + p(",hi:%#02x},", 0x80+i-1) + } + if nv != 0 { + p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) + } + } + v = nv + } + if v != 0 { + p(",hi:%#02x},", 0x80+len(values)-1) + } + } + p("\n}\n\n") + return +} + +// verifyProperties that properties of the runes that are relied upon in the +// implementation. Each property is marked with an identifier that is referred +// to in the places where it is used. +func verifyProperties(chars []runeInfo) { + for i, c := range chars { + r := rune(i) + + // Rune properties. + + // A.1: modifier never changes on lowercase. [ltLower] + if c.CCC > 0 && unicode.ToLower(r) != r { + log.Fatalf("%U: non-starter changes when lowercased", r) + } + + // A.2: properties of decompositions starting with I or J. [ltLower] + d := norm.NFD.PropertiesString(string(r)).Decomposition() + if len(d) > 0 { + if d[0] == 'I' || d[0] == 'J' { + // A.2.1: we expect at least an ASCII character and a modifier. + if len(d) < 3 { + log.Fatalf("%U: length of decomposition was %d; want >= 3", r, len(d)) + } + + // All subsequent runes are modifiers and all have the same CCC. + runes := []rune(string(d[1:])) + ccc := chars[runes[0]].CCC + + for _, mr := range runes[1:] { + mc := chars[mr] + + // A.2.2: all modifiers have a CCC of Above or less. + if ccc == 0 || ccc > above { + log.Fatalf("%U: CCC of successive rune (%U) was %d; want (0,230]", r, mr, ccc) + } + + // A.2.3: a sequence of modifiers all have the same CCC. + if mc.CCC != ccc { + log.Fatalf("%U: CCC of follow-up modifier (%U) was %d; want %d", r, mr, mc.CCC, ccc) + } + + // A.2.4: for each trailing r, r in [0x300, 0x311] <=> CCC == Above. + if (ccc == above) != (0x300 <= mr && mr <= 0x311) { + log.Fatalf("%U: modifier %U in [U+0300, U+0311] != ccc(%U) == 230", r, mr, mr) + } + + if i += len(string(mr)); i >= len(d) { + break + } + } + } + } + + // A.3: no U+0307 in decomposition of Soft-Dotted rune. [ltUpper] + if unicode.Is(unicode.Soft_Dotted, r) && strings.Contains(string(d), "\u0307") { + log.Fatalf("%U: decomposition of soft-dotted rune may not contain U+0307", r) + } + + // A.4: only rune U+0345 may be of CCC Iota_Subscript. [elUpper] + if c.CCC == iotaSubscript && r != 0x0345 { + log.Fatalf("%U: only rune U+0345 may have CCC Iota_Subscript", r) + } + + // A.5: soft-dotted runes do not have exceptions. + if c.SoftDotted && c.entry&exceptionBit != 0 { + log.Fatalf("%U: soft-dotted has exception", r) + } + + // A.6: Greek decomposition. [elUpper] + if unicode.Is(unicode.Greek, r) { + if b := norm.NFD.PropertiesString(string(r)).Decomposition(); b != nil { + runes := []rune(string(b)) + // A.6.1: If a Greek rune decomposes and the first rune of the + // decomposition is greater than U+00FF, the rune is always + // great and not a modifier. + if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) { + log.Fatalf("%U: expeced first rune of Greek decomposition to be letter, found %U", r, f) + } + // A.6.2: Any follow-up rune in a Greek decomposition is a + // modifier of which the first should be gobbled in + // decomposition. + for _, m := range runes[1:] { + switch m { + case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345: + default: + log.Fatalf("%U: modifier %U is outside of expeced Greek modifier set", r, m) + } + } + } + } + + // Breaking properties. + + // B.1: all runes with CCC > 0 are of break type Extend. + if c.CCC > 0 && c.BreakType != "Extend" { + log.Fatalf("%U: CCC == %d, but got break type %s; want Extend", r, c.CCC, c.BreakType) + } + + // B.2: all cased runes with c.CCC == 0 are of break type ALetter. + if c.CCC == 0 && c.Cased && c.BreakType != "ALetter" { + log.Fatalf("%U: cased, but got break type %s; want ALetter", r, c.BreakType) + } + + // B.3: letter category. + if c.CCC == 0 && c.BreakCat != breakBreak && !c.CaseIgnorable { + if c.BreakCat != breakLetter { + log.Fatalf("%U: check for letter break type gave %d; want %d", r, c.BreakCat, breakLetter) + } + } + } +} + +func genTablesTest() { + w := &bytes.Buffer{} + + fmt.Fprintln(w, "var (") + printProperties(w, "DerivedCoreProperties.txt", "Case_Ignorable", verifyIgnore) + + // We discard the output as we know we have perfect functions. We run them + // just to verify the properties are correct. + n := printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Cased", verifyCased) + n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Lowercase", verifyLower) + n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Uppercase", verifyUpper) + if n > 0 { + log.Fatalf("One of the discarded properties does not have a perfect filter.") + } + + // <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)? + fmt.Fprintln(w, "\tspecial = map[rune]struct{ toLower, toTitle, toUpper string }{") + parse("SpecialCasing.txt", func(p *ucd.Parser) { + // Skip conditional entries. + if p.String(4) != "" { + return + } + r := p.Rune(0) + fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", + r, string(p.Runes(1)), string(p.Runes(2)), string(p.Runes(3))) + }) + fmt.Fprint(w, "\t}\n\n") + + // <code>; <type>; <runes> + table := map[rune]struct{ simple, full, special string }{} + parse("CaseFolding.txt", func(p *ucd.Parser) { + r := p.Rune(0) + t := p.String(1) + v := string(p.Runes(2)) + if t != "T" && v == string(unicode.ToLower(r)) { + return + } + x := table[r] + switch t { + case "C": + x.full = v + x.simple = v + case "S": + x.simple = v + case "F": + x.full = v + case "T": + x.special = v + } + table[r] = x + }) + fmt.Fprintln(w, "\tfoldMap = map[rune]struct{ simple, full, special string }{") + for r := rune(0); r < 0x10FFFF; r++ { + x, ok := table[r] + if !ok { + continue + } + fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", r, x.simple, x.full, x.special) + } + fmt.Fprint(w, "\t}\n\n") + + // Break property + notBreak := map[rune]bool{} + parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { + switch p.String(1) { + case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote", + "ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet", "ZWJ": + notBreak[p.Rune(0)] = true + } + }) + + fmt.Fprintln(w, "\tbreakProp = []struct{ lo, hi rune }{") + inBreak := false + for r := rune(0); r <= lastRuneForTesting; r++ { + if isBreak := !notBreak[r]; isBreak != inBreak { + if isBreak { + fmt.Fprintf(w, "\t\t{0x%x, ", r) + } else { + fmt.Fprintf(w, "0x%x},\n", r-1) + } + inBreak = isBreak + } + } + if inBreak { + fmt.Fprintf(w, "0x%x},\n", lastRuneForTesting) + } + fmt.Fprint(w, "\t}\n\n") + + // Word break test + // Filter out all samples that do not contain cased characters. + cased := map[rune]bool{} + parse("DerivedCoreProperties.txt", func(p *ucd.Parser) { + if p.String(1) == "Cased" { + cased[p.Rune(0)] = true + } + }) + + fmt.Fprintln(w, "\tbreakTest = []string{") + parse("auxiliary/WordBreakTest.txt", func(p *ucd.Parser) { + c := strings.Split(p.String(0), " ") + + const sep = '|' + numCased := 0 + test := "" + for ; len(c) >= 2; c = c[2:] { + if c[0] == "÷" && test != "" { + test += string(sep) + } + i, err := strconv.ParseUint(c[1], 16, 32) + r := rune(i) + if err != nil { + log.Fatalf("Invalid rune %q.", c[1]) + } + if r == sep { + log.Fatalf("Separator %q not allowed in test data. Pick another one.", sep) + } + if cased[r] { + numCased++ + } + test += string(r) + } + if numCased > 1 { + fmt.Fprintf(w, "\t\t%q,\n", test) + } + }) + fmt.Fprintln(w, "\t}") + + fmt.Fprintln(w, ")") + + gen.WriteGoFile("tables_test.go", "cases", w.Bytes()) +} + +// These functions are just used for verification that their definition have not +// changed in the Unicode Standard. + +func verifyCased(r rune) bool { + return verifyLower(r) || verifyUpper(r) || unicode.IsTitle(r) +} + +func verifyLower(r rune) bool { + return unicode.IsLower(r) || unicode.Is(unicode.Other_Lowercase, r) +} + +func verifyUpper(r rune) bool { + return unicode.IsUpper(r) || unicode.Is(unicode.Other_Uppercase, r) +} + +// verifyIgnore is an approximation of the Case_Ignorable property using the +// core unicode package. It is used to reduce the size of the test data. +func verifyIgnore(r rune) bool { + props := []*unicode.RangeTable{ + unicode.Mn, + unicode.Me, + unicode.Cf, + unicode.Lm, + unicode.Sk, + } + for _, p := range props { + if unicode.Is(p, r) { + return true + } + } + return false +} + +// printProperties prints tables of rune properties from the given UCD file. +// A filter func f can be given to exclude certain values. A rune r will have +// the indicated property if it is in the generated table or if f(r). +func printProperties(w io.Writer, file, property string, f func(r rune) bool) int { + verify := map[rune]bool{} + n := 0 + varNameParts := strings.Split(property, "_") + varNameParts[0] = strings.ToLower(varNameParts[0]) + fmt.Fprintf(w, "\t%s = map[rune]bool{\n", strings.Join(varNameParts, "")) + parse(file, func(p *ucd.Parser) { + if p.String(1) == property { + r := p.Rune(0) + verify[r] = true + if !f(r) { + n++ + fmt.Fprintf(w, "\t\t0x%.4x: true,\n", r) + } + } + }) + fmt.Fprint(w, "\t}\n\n") + + // Verify that f is correct, that is, it represents a subset of the property. + for r := rune(0); r <= lastRuneForTesting; r++ { + if !verify[r] && f(r) { + log.Fatalf("Incorrect filter func for property %q.", property) + } + } + return n +} + +// The newCaseTrie, sparseValues and sparseOffsets definitions below are +// placeholders referred to by gen_trieval.go. The real definitions are +// generated by this program and written to tables.go. + +func newCaseTrie(int) int { return 0 } + +var ( + sparseValues [0]valueRange + sparseOffsets [0]uint16 +) diff --git a/vendor/golang.org/x/text/cases/gen_trieval.go b/vendor/golang.org/x/text/cases/gen_trieval.go new file mode 100644 index 000000000..376d22c8f --- /dev/null +++ b/vendor/golang.org/x/text/cases/gen_trieval.go @@ -0,0 +1,219 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This file contains definitions for interpreting the trie value of the case +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds case information for a single rune. It is the value returned +// by a trie lookup. Most mapping information can be stored in a single 16-bit +// value. If not, for example when a rune is mapped to multiple runes, the value +// stores some basic case data and an index into an array with additional data. +// +// The per-rune values have the following format: +// +// if (exception) { +// 15..5 unsigned exception index +// 4 unused +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode +// +// For the non-exceptional cases, a rune must be either uncased, lowercase or +// uppercase. If the rune is cased, the XOR pattern maps either a lowercase +// rune to uppercase or an uppercase rune to lowercase (applied to the 10 +// least-significant bits of the rune). +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + casedMask = 0x0003 + fullCasedMask = 0x0007 + ignorableMask = 0x0006 + ignorableValue = 0x0004 + + inverseFoldBit = 1 << 7 + isMidBit = 1 << 6 + + exceptionBit = 1 << 3 + exceptionShift = 5 + numExceptionBits = 11 + + xorIndexBit = 1 << 6 + xorShift = 8 + + // There is no mapping if all xor bits and the exception bit are zero. + hasMappingMask = 0xff80 | exceptionBit +) + +// The case mode bits encodes the case type of a rune. This includes uncased, +// title, upper and lower case and case ignorable. (For a definition of these +// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare +// cases, a rune can be both cased and case-ignorable. This is encoded by +// cIgnorableCased. A rune of this type is always lower case. Some runes are +// cased while not having a mapping. +// +// A common pattern for scripts in the Unicode standard is for upper and lower +// case runes to alternate for increasing rune values (e.g. the accented Latin +// ranges starting from U+0100 and U+1E00 among others and some Cyrillic +// characters). We use this property by defining a cXORCase mode, where the case +// mode (always upper or lower case) is derived from the rune value. As the XOR +// pattern for case mappings is often identical for successive runes, using +// cXORCase can result in large series of identical trie values. This, in turn, +// allows us to better compress the trie blocks. +const ( + cUncased info = iota // 000 + cTitle // 001 + cLower // 010 + cUpper // 011 + cIgnorableUncased // 100 + cIgnorableCased // 101 // lower case if mappings exist + cXORCase // 11x // case is cLower | ((rune&1) ^ x) + + maxCaseMode = cUpper +) + +func (c info) isCased() bool { + return c&casedMask != 0 +} + +func (c info) isCaseIgnorable() bool { + return c&ignorableMask == ignorableValue +} + +func (c info) isNotCasedAndNotCaseIgnorable() bool { + return c&fullCasedMask == 0 +} + +func (c info) isCaseIgnorableAndNotCased() bool { + return c&fullCasedMask == cIgnorableUncased +} + +func (c info) isMid() bool { + return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased +} + +// The case mapping implementation will need to know about various Canonical +// Combining Class (CCC) values. We encode two of these in the trie value: +// cccZero (0) and cccAbove (230). If the value is cccOther, it means that +// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that +// the rune also has the break category Break (see below). +const ( + cccBreak info = iota << 4 + cccZero + cccAbove + cccOther + + cccMask = cccBreak | cccZero | cccAbove | cccOther +) + +const ( + starter = 0 + above = 230 + iotaSubscript = 240 +) + +// The exceptions slice holds data that does not fit in a normal info entry. +// The entry is pointed to by the exception index in an entry. It has the +// following format: +// +// Header +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold +// +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper +// +// Lengths with the value 0x7 indicate no value and implies no change. +// A length of 0 indicates a mapping to zero-length string. +// +// Body bytes: +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// Fallbacks: +// missing fold -> lower +// missing title -> upper +// all missing -> original rune +// +// exceptions starts with a dummy byte to enforce that there is no zero index +// value. +const ( + lengthMask = 0x07 + lengthBits = 3 + noChange = 0 +) + +// References to generated trie. + +var trie = newCaseTrie(0) + +var sparse = sparseBlocks{ + values: sparseValues[:], + offsets: sparseOffsets[:], +} + +// Sparse block lookup code. + +// valueRange is an entry in a sparse block. +type valueRange struct { + value uint16 + lo, hi byte +} + +type sparseBlocks struct { + values []valueRange + offsets []uint16 +} + +// lookup returns the value from values block n for byte b using binary search. +func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { + lo := s.offsets[n] + hi := s.offsets[n+1] + for lo < hi { + m := lo + (hi-lo)/2 + r := s.values[m] + if r.lo <= b && b <= r.hi { + return r.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} + +// lastRuneForTesting is the last rune used for testing. Everything after this +// is boring. +const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/cases/icu.go b/vendor/golang.org/x/text/cases/icu.go new file mode 100644 index 000000000..46530d1e4 --- /dev/null +++ b/vendor/golang.org/x/text/cases/icu.go @@ -0,0 +1,61 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build icu + +package cases + +// Ideally these functions would be defined in a test file, but go test doesn't +// allow CGO in tests. The build tag should ensure either way that these +// functions will not end up in the package. + +// TODO: Ensure that the correct ICU version is set. + +/* +#cgo LDFLAGS: -licui18n.57 -licuuc.57 +#include <stdlib.h> +#include <unicode/ustring.h> +#include <unicode/utypes.h> +#include <unicode/localpointer.h> +#include <unicode/ucasemap.h> +*/ +import "C" + +import "unsafe" + +func doICU(tag, caser, input string) string { + err := C.UErrorCode(0) + loc := C.CString(tag) + cm := C.ucasemap_open(loc, C.uint32_t(0), &err) + + buf := make([]byte, len(input)*4) + dst := (*C.char)(unsafe.Pointer(&buf[0])) + src := C.CString(input) + + cn := C.int32_t(0) + + switch caser { + case "fold": + cn = C.ucasemap_utf8FoldCase(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "lower": + cn = C.ucasemap_utf8ToLower(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "upper": + cn = C.ucasemap_utf8ToUpper(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + case "title": + cn = C.ucasemap_utf8ToTitle(cm, + dst, C.int32_t(len(buf)), + src, C.int32_t(len(input)), + &err) + } + return string(buf[:cn]) +} diff --git a/vendor/golang.org/x/text/cases/info.go b/vendor/golang.org/x/text/cases/info.go new file mode 100644 index 000000000..3b51f03d6 --- /dev/null +++ b/vendor/golang.org/x/text/cases/info.go @@ -0,0 +1,82 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +func (c info) cccVal() info { + if c&exceptionBit != 0 { + return info(exceptions[c>>exceptionShift]) & cccMask + } + return c & cccMask +} + +func (c info) cccType() info { + ccc := c.cccVal() + if ccc <= cccZero { + return cccZero + } + return ccc +} + +// TODO: Implement full Unicode breaking algorithm: +// 1) Implement breaking in separate package. +// 2) Use the breaker here. +// 3) Compare table size and performance of using the more generic breaker. +// +// Note that we can extend the current algorithm to be much more accurate. This +// only makes sense, though, if the performance and/or space penalty of using +// the generic breaker is big. Extra data will only be needed for non-cased +// runes, which means there are sufficient bits left in the caseType. +// ICU prohibits breaking in such cases as well. + +// For the purpose of title casing we use an approximation of the Unicode Word +// Breaking algorithm defined in Annex #29: +// http://www.unicode.org/reports/tr29/#Default_Grapheme_Cluster_Table. +// +// For our approximation, we group the Word Break types into the following +// categories, with associated rules: +// +// 1) Letter: +// ALetter, Hebrew_Letter, Numeric, ExtendNumLet, Extend, Format_FE, ZWJ. +// Rule: Never break between consecutive runes of this category. +// +// 2) Mid: +// MidLetter, MidNumLet, Single_Quote. +// (Cf. case-ignorable: MidLetter, MidNumLet, Single_Quote or cat is Mn, +// Me, Cf, Lm or Sk). +// Rule: Don't break between Letter and Mid, but break between two Mids. +// +// 3) Break: +// Any other category: NewLine, MidNum, CR, LF, Double_Quote, Katakana, and +// Other. +// These categories should always result in a break between two cased letters. +// Rule: Always break. +// +// Note 1: the Katakana and MidNum categories can, in esoteric cases, result in +// preventing a break between two cased letters. For now we will ignore this +// (e.g. [ALetter] [ExtendNumLet] [Katakana] [ExtendNumLet] [ALetter] and +// [ALetter] [Numeric] [MidNum] [Numeric] [ALetter].) +// +// Note 2: the rule for Mid is very approximate, but works in most cases. To +// improve, we could store the categories in the trie value and use a FA to +// manage breaks. See TODO comment above. +// +// Note 3: according to the spec, it is possible for the Extend category to +// introduce breaks between other categories grouped in Letter. However, this +// is undesirable for our purposes. ICU prevents breaks in such cases as well. + +// isBreak returns whether this rune should introduce a break. +func (c info) isBreak() bool { + return c.cccVal() == cccBreak +} + +// isLetter returns whether the rune is of break type ALetter, Hebrew_Letter, +// Numeric, ExtendNumLet, or Extend. +func (c info) isLetter() bool { + ccc := c.cccVal() + if ccc == cccZero { + return !c.isCaseIgnorable() + } + return ccc != cccBreak +} diff --git a/vendor/golang.org/x/text/cases/map.go b/vendor/golang.org/x/text/cases/map.go new file mode 100644 index 000000000..68395fcc0 --- /dev/null +++ b/vendor/golang.org/x/text/cases/map.go @@ -0,0 +1,742 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cases + +// This file contains the definitions of case mappings for all supported +// languages. The rules for the language-specific tailorings were taken and +// modified from the CLDR transform definitions in common/transforms. + +import ( + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/text/language" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// A mapFunc takes a context set to the current rune and writes the mapped +// version to the same context. It may advance the context to the next rune. It +// returns whether a checkpoint is possible: whether the pDst bytes written to +// dst so far won't need changing as we see more source bytes. +type mapFunc func(*context) bool + +// A spanFunc takes a context set to the current rune and returns whether this +// rune would be altered when written to the output. It may advance the context +// to the next rune. It returns whether a checkpoint is possible. +type spanFunc func(*context) bool + +// maxIgnorable defines the maximum number of ignorables to consider for +// lookahead operations. +const maxIgnorable = 30 + +// supported lists the language tags for which we have tailorings. +const supported = "und af az el lt nl tr" + +func init() { + tags := []language.Tag{} + for _, s := range strings.Split(supported, " ") { + tags = append(tags, language.MustParse(s)) + } + matcher = language.NewMatcher(tags) + Supported = language.NewCoverage(tags) +} + +var ( + matcher language.Matcher + + Supported language.Coverage + + // We keep the following lists separate, instead of having a single per- + // language struct, to give the compiler a chance to remove unused code. + + // Some uppercase mappers are stateless, so we can precompute the + // Transformers and save a bit on runtime allocations. + upperFunc = []struct { + upper mapFunc + span spanFunc + }{ + {nil, nil}, // und + {nil, nil}, // af + {aztrUpper(upper), isUpper}, // az + {elUpper, noSpan}, // el + {ltUpper(upper), noSpan}, // lt + {nil, nil}, // nl + {aztrUpper(upper), isUpper}, // tr + } + + undUpper transform.SpanningTransformer = &undUpperCaser{} + + lowerFunc = []mapFunc{ + lower, // und + lower, // af + aztrLower, // az + lower, // el + ltLower, // lt + lower, // nl + aztrLower, // tr + } + + titleInfos = []struct { + title mapFunc + lower mapFunc + titleSpan spanFunc + rewrite func(*context) + }{ + {title, lower, isTitle, nil}, // und + {title, lower, isTitle, afnlRewrite}, // af + {aztrUpper(title), aztrLower, isTitle, nil}, // az + {title, lower, isTitle, nil}, // el + {ltUpper(title), ltLower, noSpan, nil}, // lt + {nlTitle, lower, nlTitleSpan, afnlRewrite}, // nl + {aztrUpper(title), aztrLower, isTitle, nil}, // tr + } +) + +func makeUpper(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := upperFunc[i].upper + if f == nil { + return undUpper + } + return &simpleCaser{f: f, span: upperFunc[i].span} +} + +func makeLower(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + f := lowerFunc[i] + if o.noFinalSigma { + return &simpleCaser{f: f, span: isLower} + } + return &lowerCaser{ + first: f, + midWord: finalSigma(f), + } +} + +func makeTitle(t language.Tag, o options) transform.SpanningTransformer { + _, i, _ := matcher.Match(t) + x := &titleInfos[i] + lower := x.lower + if o.noLower { + lower = (*context).copy + } else if !o.noFinalSigma { + lower = finalSigma(lower) + } + return &titleCaser{ + title: x.title, + lower: lower, + titleSpan: x.titleSpan, + rewrite: x.rewrite, + } +} + +func noSpan(c *context) bool { + c.err = transform.ErrEndOfSpan + return false +} + +// TODO: consider a similar special case for the fast majority lower case. This +// is a bit more involved so will require some more precise benchmarking to +// justify it. + +type undUpperCaser struct{ transform.NopResetter } + +// undUpperCaser implements the Transformer interface for doing an upper case +// mapping for the root locale (und). It eliminates the need for an allocation +// as it prevents escaping by not using function pointers. +func (t *undUpperCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + c := context{dst: dst, src: src, atEOF: atEOF} + for c.next() { + upper(&c) + c.checkpoint() + } + return c.ret() +} + +func (t *undUpperCaser) Span(src []byte, atEOF bool) (n int, err error) { + c := context{src: src, atEOF: atEOF} + for c.next() && isUpper(&c) { + c.checkpoint() + } + return c.retSpan() +} + +type simpleCaser struct { + context + f mapFunc + span spanFunc +} + +// simpleCaser implements the Transformer interface for doing a case operation +// on a rune-by-rune basis. +func (t *simpleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF} + c := &t.context + for c.next() && t.f(c) { + c.checkpoint() + } + return c.ret() +} + +func (t *simpleCaser) Span(src []byte, atEOF bool) (n int, err error) { + t.context = context{src: src, atEOF: atEOF} + c := &t.context + for c.next() && t.span(c) { + c.checkpoint() + } + return c.retSpan() +} + +// lowerCaser implements the Transformer interface. The default Unicode lower +// casing requires different treatment for the first and subsequent characters +// of a word, most notably to handle the Greek final Sigma. +type lowerCaser struct { + context + + first, midWord mapFunc +} + +func (t *lowerCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF} + c := &t.context + + for isInterWord := true; c.next(); { + if isInterWord { + if c.info.isCased() { + if !t.first(c) { + break + } + isInterWord = false + } else if !c.copy() { + break + } + } else { + if c.info.isNotCasedAndNotCaseIgnorable() { + if !c.copy() { + break + } + isInterWord = true + } else if !t.midWord(c) { + break + } + } + c.checkpoint() + } + return c.ret() +} + +// Span implements a generic lower-casing. This is possible as isLower works +// for all lowercasing variants. All lowercase variants only vary in how they +// transform a non-lowercase letter. They will never change an already lowercase +// letter. In addition, there is no state. +func (t *lowerCaser) Span(src []byte, atEOF bool) (n int, err error) { + t.context = context{src: src, atEOF: atEOF} + c := &t.context + for c.next() && isLower(c) { + c.checkpoint() + } + return c.retSpan() +} + +// titleCaser implements the Transformer interface. Title casing algorithms +// distinguish between the first letter of a word and subsequent letters of the +// same word. It uses state to avoid requiring a potentially infinite lookahead. +type titleCaser struct { + context + + // rune mappings used by the actual casing algorithms. + title mapFunc + lower mapFunc + titleSpan spanFunc + + rewrite func(*context) +} + +// Transform implements the standard Unicode title case algorithm as defined in +// Chapter 3 of The Unicode Standard: +// toTitlecase(X): Find the word boundaries in X according to Unicode Standard +// Annex #29, "Unicode Text Segmentation." For each word boundary, find the +// first cased character F following the word boundary. If F exists, map F to +// Titlecase_Mapping(F); then map all characters C between F and the following +// word boundary to Lowercase_Mapping(C). +func (t *titleCaser) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + t.context = context{dst: dst, src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.ret() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.title(c) { + break + } + c.isMidWord = true + } else if !t.lower(c) { + break + } + } else if !c.copy() { + break + } else if p.isBreak() { + c.isMidWord = false + } + + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.ret() +} + +func (t *titleCaser) Span(src []byte, atEOF bool) (n int, err error) { + t.context = context{src: src, atEOF: atEOF, isMidWord: t.isMidWord} + c := &t.context + + if !c.next() { + return c.retSpan() + } + + for { + p := c.info + if t.rewrite != nil { + t.rewrite(c) + } + + wasMid := p.isMid() + // Break out of this loop on failure to ensure we do not modify the + // state incorrectly. + if p.isCased() { + if !c.isMidWord { + if !t.titleSpan(c) { + break + } + c.isMidWord = true + } else if !isLower(c) { + break + } + } else if p.isBreak() { + c.isMidWord = false + } + // As we save the state of the transformer, it is safe to call + // checkpoint after any successful write. + if !(c.isMidWord && wasMid) { + c.checkpoint() + } + + if !c.next() { + break + } + if wasMid && c.info.isMid() { + c.isMidWord = false + } + } + return c.retSpan() +} + +// finalSigma adds Greek final Sigma handing to another casing function. It +// determines whether a lowercased sigma should be σ or ς, by looking ahead for +// case-ignorables and a cased letters. +func finalSigma(f mapFunc) mapFunc { + return func(c *context) bool { + // ::NFD(); + // # 03A3; 03C2; 03A3; 03A3; Final_Sigma; # GREEK CAPITAL LETTER SIGMA + // Σ } [:case-ignorable:]* [:cased:] → σ; + // [:cased:] [:case-ignorable:]* { Σ → ς; + // ::Any-Lower; + // ::NFC(); + + if !c.hasPrefix("Σ") { + return f(c) + } + + p := c.pDst + c.writeString("ς") + + // TODO: we should do this here, but right now this will never have an + // effect as this is called when the prefix is Sigma, whereas Dutch and + // Afrikaans only test for an apostrophe. + // + // if t.rewrite != nil { + // t.rewrite(c) + // } + + // We need to do one more iteration after maxIgnorable, as a cased + // letter is not an ignorable and may modify the result. + wasMid := false + for i := 0; i < maxIgnorable+1; i++ { + if !c.next() { + return false + } + if !c.info.isCaseIgnorable() { + // All Midword runes are also case ignorable, so we are + // guaranteed to have a letter or word break here. As we are + // unreading the run, there is no need to unset c.isMidWord; + // the title caser will handle this. + if c.info.isCased() { + // p+1 is guaranteed to be in bounds: if writing ς was + // successful, p+1 will contain the second byte of ς. If not, + // this function will have returned after c.next returned false. + c.dst[p+1]++ // ς → σ + } + c.unreadRune() + return true + } + // A case ignorable may also introduce a word break, so we may need + // to continue searching even after detecting a break. + isMid := c.info.isMid() + if (wasMid && isMid) || c.info.isBreak() { + c.isMidWord = false + } + wasMid = isMid + c.copy() + } + return true + } +} + +// finalSigmaSpan would be the same as isLower. + +// elUpper implements Greek upper casing, which entails removing a predefined +// set of non-blocked modifiers. Note that these accents should not be removed +// for title casing! +// Example: "Οδός" -> "ΟΔΟΣ". +func elUpper(c *context) bool { + // From CLDR: + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Above:]]*? { [\u0313\u0314\u0301\u0300\u0306\u0342\u0308\u0304] → ; + // [:Greek:] [^[:ccc=Not_Reordered:][:ccc=Iota_Subscript:]]*? { \u0345 → ; + + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !upper(c) { + return false + } + if !unicode.Is(unicode.Greek, r) { + return true + } + i := 0 + // Take the properties of the uppercased rune that is already written to the + // destination. This saves us the trouble of having to uppercase the + // decomposed rune again. + if b := norm.NFD.Properties(c.dst[oldPDst:]).Decomposition(); b != nil { + // Restore the destination position and process the decomposed rune. + r, sz := utf8.DecodeRune(b) + if r <= 0xFF { // See A.6.1 + return true + } + c.pDst = oldPDst + // Insert the first rune and ignore the modifiers. See A.6.2. + c.writeBytes(b[:sz]) + i = len(b[sz:]) / 2 // Greek modifiers are always of length 2. + } + + for ; i < maxIgnorable && c.next(); i++ { + switch r, _ := utf8.DecodeRune(c.src[c.pSrc:]); r { + // Above and Iota Subscript + case 0x0300, // U+0300 COMBINING GRAVE ACCENT + 0x0301, // U+0301 COMBINING ACUTE ACCENT + 0x0304, // U+0304 COMBINING MACRON + 0x0306, // U+0306 COMBINING BREVE + 0x0308, // U+0308 COMBINING DIAERESIS + 0x0313, // U+0313 COMBINING COMMA ABOVE + 0x0314, // U+0314 COMBINING REVERSED COMMA ABOVE + 0x0342, // U+0342 COMBINING GREEK PERISPOMENI + 0x0345: // U+0345 COMBINING GREEK YPOGEGRAMMENI + // No-op. Gobble the modifier. + + default: + switch v, _ := trie.lookup(c.src[c.pSrc:]); info(v).cccType() { + case cccZero: + c.unreadRune() + return true + + // We don't need to test for IotaSubscript as the only rune that + // qualifies (U+0345) was already excluded in the switch statement + // above. See A.4. + + case cccAbove: + return c.copy() + default: + // Some other modifier. We're still allowed to gobble Greek + // modifiers after this. + c.copy() + } + } + } + return i == maxIgnorable +} + +// TODO: implement elUpperSpan (low-priority: complex and infrequent). + +func ltLower(c *context) bool { + // From CLDR: + // # Introduce an explicit dot above when lowercasing capital I's and J's + // # whenever there are more accents above. + // # (of the accents used in Lithuanian: grave, acute, tilde above, and ogonek) + // # 0049; 0069 0307; 0049; 0049; lt More_Above; # LATIN CAPITAL LETTER I + // # 004A; 006A 0307; 004A; 004A; lt More_Above; # LATIN CAPITAL LETTER J + // # 012E; 012F 0307; 012E; 012E; lt More_Above; # LATIN CAPITAL LETTER I WITH OGONEK + // # 00CC; 0069 0307 0300; 00CC; 00CC; lt; # LATIN CAPITAL LETTER I WITH GRAVE + // # 00CD; 0069 0307 0301; 00CD; 00CD; lt; # LATIN CAPITAL LETTER I WITH ACUTE + // # 0128; 0069 0307 0303; 0128; 0128; lt; # LATIN CAPITAL LETTER I WITH TILDE + // ::NFD(); + // I } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0307; + // J } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → j \u0307; + // I \u0328 (Į) } [^[:ccc=Not_Reordered:][:ccc=Above:]]* [:ccc=Above:] → i \u0328 \u0307; + // I \u0300 (Ì) → i \u0307 \u0300; + // I \u0301 (Í) → i \u0307 \u0301; + // I \u0303 (Ĩ) → i \u0307 \u0303; + // ::Any-Lower(); + // ::NFC(); + + i := 0 + if r := c.src[c.pSrc]; r < utf8.RuneSelf { + lower(c) + if r != 'I' && r != 'J' { + return true + } + } else { + p := norm.NFD.Properties(c.src[c.pSrc:]) + if d := p.Decomposition(); len(d) >= 3 && (d[0] == 'I' || d[0] == 'J') { + // UTF-8 optimization: the decomposition will only have an above + // modifier if the last rune of the decomposition is in [U+300-U+311]. + // In all other cases, a decomposition starting with I is always + // an I followed by modifiers that are not cased themselves. See A.2. + if d[1] == 0xCC && d[2] <= 0x91 { // A.2.4. + if !c.writeBytes(d[:1]) { + return false + } + c.dst[c.pDst-1] += 'a' - 'A' // lower + + // Assumption: modifier never changes on lowercase. See A.1. + // Assumption: all modifiers added have CCC = Above. See A.2.3. + return c.writeString("\u0307") && c.writeBytes(d[1:]) + } + // In all other cases the additional modifiers will have a CCC + // that is less than 230 (Above). We will insert the U+0307, if + // needed, after these modifiers so that a string in FCD form + // will remain so. See A.2.2. + lower(c) + i = 1 + } else { + return lower(c) + } + } + + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + return c.writeString("\u0307") && c.copy() // See A.1. + default: + c.copy() // See A.1. + } + } + return i == maxIgnorable +} + +// ltLowerSpan would be the same as isLower. + +func ltUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // Unicode: + // 0307; 0307; ; ; lt After_Soft_Dotted; # COMBINING DOT ABOVE + // + // From CLDR: + // # Remove \u0307 following soft-dotteds (i, j, and the like), with possible + // # intervening non-230 marks. + // ::NFD(); + // [:Soft_Dotted:] [^[:ccc=Not_Reordered:][:ccc=Above:]]* { \u0307 → ; + // ::Any-Upper(); + // ::NFC(); + + // TODO: See A.5. A soft-dotted rune never has an exception. This would + // allow us to overload the exception bit and encode this property in + // info. Need to measure performance impact of this. + r, _ := utf8.DecodeRune(c.src[c.pSrc:]) + oldPDst := c.pDst + if !f(c) { + return false + } + if !unicode.Is(unicode.Soft_Dotted, r) { + return true + } + + // We don't need to do an NFD normalization, as a soft-dotted rune never + // contains U+0307. See A.3. + + i := 0 + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccZero: + c.unreadRune() + return true + case cccAbove: + if c.hasPrefix("\u0307") { + // We don't do a full NFC, but rather combine runes for + // some of the common cases. (Returning NFC or + // preserving normal form is neither a requirement nor + // a possibility anyway). + if !c.next() { + return false + } + if c.dst[oldPDst] == 'I' && c.pDst == oldPDst+1 && c.src[c.pSrc] == 0xcc { + s := "" + switch c.src[c.pSrc+1] { + case 0x80: // U+0300 COMBINING GRAVE ACCENT + s = "\u00cc" // U+00CC LATIN CAPITAL LETTER I WITH GRAVE + case 0x81: // U+0301 COMBINING ACUTE ACCENT + s = "\u00cd" // U+00CD LATIN CAPITAL LETTER I WITH ACUTE + case 0x83: // U+0303 COMBINING TILDE + s = "\u0128" // U+0128 LATIN CAPITAL LETTER I WITH TILDE + case 0x88: // U+0308 COMBINING DIAERESIS + s = "\u00cf" // U+00CF LATIN CAPITAL LETTER I WITH DIAERESIS + default: + } + if s != "" { + c.pDst = oldPDst + return c.writeString(s) + } + } + } + return c.copy() + default: + c.copy() + } + } + return i == maxIgnorable + } +} + +// TODO: implement ltUpperSpan (low priority: complex and infrequent). + +func aztrUpper(f mapFunc) mapFunc { + return func(c *context) bool { + // i→İ; + if c.src[c.pSrc] == 'i' { + return c.writeString("İ") + } + return f(c) + } +} + +func aztrLower(c *context) (done bool) { + // From CLDR: + // # I and i-dotless; I-dot and i are case pairs in Turkish and Azeri + // # 0130; 0069; 0130; 0130; tr; # LATIN CAPITAL LETTER I WITH DOT ABOVE + // İ→i; + // # When lowercasing, remove dot_above in the sequence I + dot_above, which will turn into i. + // # This matches the behavior of the canonically equivalent I-dot_above + // # 0307; ; 0307; 0307; tr After_I; # COMBINING DOT ABOVE + // # When lowercasing, unless an I is before a dot_above, it turns into a dotless i. + // # 0049; 0131; 0049; 0049; tr Not_Before_Dot; # LATIN CAPITAL LETTER I + // I([^[:ccc=Not_Reordered:][:ccc=Above:]]*)\u0307 → i$1 ; + // I→ı ; + // ::Any-Lower(); + if c.hasPrefix("\u0130") { // İ + return c.writeString("i") + } + if c.src[c.pSrc] != 'I' { + return lower(c) + } + + // We ignore the lower-case I for now, but insert it later when we know + // which form we need. + start := c.pSrc + c.sz + + i := 0 +Loop: + // We check for up to n ignorables before \u0307. As \u0307 is an + // ignorable as well, n is maxIgnorable-1. + for ; i < maxIgnorable && c.next(); i++ { + switch c.info.cccType() { + case cccAbove: + if c.hasPrefix("\u0307") { + return c.writeString("i") && c.writeBytes(c.src[start:c.pSrc]) // ignore U+0307 + } + done = true + break Loop + case cccZero: + c.unreadRune() + done = true + break Loop + default: + // We'll write this rune after we know which starter to use. + } + } + if i == maxIgnorable { + done = true + } + return c.writeString("ı") && c.writeBytes(c.src[start:c.pSrc+c.sz]) && done +} + +// aztrLowerSpan would be the same as isLower. + +func nlTitle(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' && c.src[c.pSrc] != 'i' { + return title(c) + } + + if !c.writeString("I") || !c.next() { + return false + } + if c.src[c.pSrc] == 'j' || c.src[c.pSrc] == 'J' { + return c.writeString("J") + } + c.unreadRune() + return true +} + +func nlTitleSpan(c *context) bool { + // From CLDR: + // # Special titlecasing for Dutch initial "ij". + // ::Any-Title(); + // # Fix up Ij at the beginning of a "word" (per Any-Title, notUAX #29) + // [:^WB=ALetter:] [:WB=Extend:]* [[:WB=MidLetter:][:WB=MidNumLet:]]? { Ij } → IJ ; + if c.src[c.pSrc] != 'I' { + return isTitle(c) + } + if !c.next() || c.src[c.pSrc] == 'j' { + return false + } + if c.src[c.pSrc] != 'J' { + c.unreadRune() + } + return true +} + +// Not part of CLDR, but see http://unicode.org/cldr/trac/ticket/7078. +func afnlRewrite(c *context) { + if c.hasPrefix("'") || c.hasPrefix("’") { + c.isMidWord = true + } +} diff --git a/vendor/golang.org/x/text/cases/tables.go b/vendor/golang.org/x/text/cases/tables.go new file mode 100644 index 000000000..32ee8fadb --- /dev/null +++ b/vendor/golang.org/x/text/cases/tables.go @@ -0,0 +1,2211 @@ +// This file was generated by go generate; DO NOT EDIT + +package cases + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var xorData string = "" + // Size: 185 bytes + "\x00\x06\x07\x00\x01?\x00\x0f\x03\x00\x0f\x12\x00\x0f\x1f\x00\x0f\x1d" + + "\x00\x01\x13\x00\x0f\x16\x00\x0f\x0b\x00\x0f3\x00\x0f7\x00\x01#\x00\x0f?" + + "\x00\x0e'\x00\x0f/\x00\x0e>\x00\x0f*\x00\x0c&\x00\x0c*\x00\x0c;\x00\x0c9" + + "\x00\x0c%\x00\x01\x08\x00\x03\x0d\x00\x03\x09\x00\x02\x06\x00\x02\x02" + + "\x00\x02\x0c\x00\x01\x00\x00\x01\x03\x00\x01\x01\x00\x01 \x00\x01\x0c" + + "\x00\x01\x10\x00\x03\x10\x00\x036 \x00\x037 \x00\x0b#\x10\x00\x0b 0\x00" + + "\x0b!\x10\x00\x0b!0\x00\x0b(\x04\x00\x03\x04\x1e\x00\x03\x0a\x00\x02:" + + "\x00\x02>\x00\x02,\x00\x02\x00\x00\x02\x10\x00\x01<\x00\x01&\x00\x01*" + + "\x00\x01.\x00\x010\x003 \x00\x01\x18\x00\x01(\x00\x01\x1e\x00\x01\x22" + +var exceptions string = "" + // Size: 1852 bytes + "\x00\x12\x10μΜ\x12\x12ssSSSs\x13\x18i̇i̇\x10\x08I\x13\x18ʼnʼN\x11\x08sS" + + "\x12\x12dždžDž\x12\x12dždžDŽ\x10\x12DŽDž\x12\x12ljljLj\x12\x12ljljLJ\x10\x12LJLj\x12\x12" + + "njnjNj\x12\x12njnjNJ\x10\x12NJNj\x13\x18ǰJ̌\x12\x12dzdzDz\x12\x12dzdzDZ\x10\x12DZDz" + + "\x13\x18ⱥⱥ\x13\x18ⱦⱦ\x10\x18Ȿ\x10\x18Ɀ\x10\x18Ɐ\x10\x18Ɑ\x10\x18Ɒ\x10" + + "\x18Ɜ\x10\x18Ɡ\x10\x18Ɥ\x10\x18Ɦ\x10\x18Ɪ\x10\x18Ɫ\x10\x18Ɬ\x10\x18Ɱ\x10" + + "\x18Ɽ\x10\x18Ʇ\x10\x18Ʝ\x10\x18Ʞ2\x10ιΙ\x160ΐΪ́\x160ΰΫ́\x12\x10σΣ" + + "\x12\x10βΒ\x12\x10θΘ\x12\x10φΦ\x12\x10πΠ\x12\x10κΚ\x12\x10ρΡ\x12\x10εΕ" + + "\x14$եւԵՒԵւ\x12\x10вВ\x12\x10дД\x12\x10оО\x12\x10сС\x12\x10тТ\x12\x10тТ" + + "\x12\x10ъЪ\x12\x10ѣѢ\x13\x18ꙋꙊ\x13\x18ẖH̱\x13\x18ẗT̈\x13\x18ẘW̊\x13" + + "\x18ẙY̊\x13\x18aʾAʾ\x13\x18ṡṠ\x12\x10ssß\x14 ὐΥ̓\x160ὒΥ̓̀\x160ὔΥ̓́" + + "\x160ὖΥ̓͂\x15+ἀιἈΙᾈ\x15+ἁιἉΙᾉ\x15+ἂιἊΙᾊ\x15+ἃιἋΙᾋ\x15+ἄιἌΙᾌ\x15+ἅιἍΙᾍ" + + "\x15+ἆιἎΙᾎ\x15+ἇιἏΙᾏ\x15\x1dἀιᾀἈΙ\x15\x1dἁιᾁἉΙ\x15\x1dἂιᾂἊΙ\x15\x1dἃιᾃἋΙ" + + "\x15\x1dἄιᾄἌΙ\x15\x1dἅιᾅἍΙ\x15\x1dἆιᾆἎΙ\x15\x1dἇιᾇἏΙ\x15+ἠιἨΙᾘ\x15+ἡιἩΙᾙ" + + "\x15+ἢιἪΙᾚ\x15+ἣιἫΙᾛ\x15+ἤιἬΙᾜ\x15+ἥιἭΙᾝ\x15+ἦιἮΙᾞ\x15+ἧιἯΙᾟ\x15\x1dἠιᾐἨ" + + "Ι\x15\x1dἡιᾑἩΙ\x15\x1dἢιᾒἪΙ\x15\x1dἣιᾓἫΙ\x15\x1dἤιᾔἬΙ\x15\x1dἥιᾕἭΙ\x15" + + "\x1dἦιᾖἮΙ\x15\x1dἧιᾗἯΙ\x15+ὠιὨΙᾨ\x15+ὡιὩΙᾩ\x15+ὢιὪΙᾪ\x15+ὣιὫΙᾫ\x15+ὤιὬΙᾬ" + + "\x15+ὥιὭΙᾭ\x15+ὦιὮΙᾮ\x15+ὧιὯΙᾯ\x15\x1dὠιᾠὨΙ\x15\x1dὡιᾡὩΙ\x15\x1dὢιᾢὪΙ" + + "\x15\x1dὣιᾣὫΙ\x15\x1dὤιᾤὬΙ\x15\x1dὥιᾥὭΙ\x15\x1dὦιᾦὮΙ\x15\x1dὧιᾧὯΙ\x15-ὰι" + + "ᾺΙᾺͅ\x14#αιΑΙᾼ\x14$άιΆΙΆͅ\x14 ᾶΑ͂\x166ᾶιΑ͂Ιᾼ͂\x14\x1cαιᾳΑΙ\x12\x10ι" + + "Ι\x15-ὴιῊΙῊͅ\x14#ηιΗΙῌ\x14$ήιΉΙΉͅ\x14 ῆΗ͂\x166ῆιΗ͂Ιῌ͂\x14\x1cηιῃΗΙ" + + "\x160ῒΪ̀\x160ΐΪ́\x14 ῖΙ͂\x160ῗΪ͂\x160ῢΫ̀\x160ΰΫ́\x14 ῤΡ" + + "̓\x14 ῦΥ͂\x160ῧΫ͂\x15-ὼιῺΙῺͅ\x14#ωιΩΙῼ\x14$ώιΏΙΏͅ\x14 ῶΩ͂\x166ῶιΩ" + + "͂Ιῼ͂\x14\x1cωιῳΩΙ\x12\x10ωω\x11\x08kk\x12\x10åå\x12\x10ɫɫ\x12\x10ɽɽ" + + "\x10\x10Ⱥ\x10\x10Ⱦ\x12\x10ɑɑ\x12\x10ɱɱ\x12\x10ɐɐ\x12\x10ɒɒ\x12\x10ȿȿ\x12" + + "\x10ɀɀ\x12\x10ɥɥ\x12\x10ɦɦ\x12\x10ɜɜ\x12\x10ɡɡ\x12\x10ɬɬ\x12\x10ɪɪ\x12" + + "\x10ʞʞ\x12\x10ʇʇ\x12\x10ʝʝ\x12\x12ffFFFf\x12\x12fiFIFi\x12\x12flFLFl\x13" + + "\x1bffiFFIFfi\x13\x1bfflFFLFfl\x12\x12stSTSt\x12\x12stSTSt\x14$մնՄՆՄն" + + "\x14$մեՄԵՄե\x14$միՄԻՄի\x14$վնՎՆՎն\x14$մխՄԽՄխ" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *caseTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return caseValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := caseIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = caseIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = caseIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *caseTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return caseValues[c0] + } + i := caseIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = caseIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = caseIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// caseTrie. Total size: 11742 bytes (11.47 KiB). Checksum: 147a11466b427436. +type caseTrie struct{} + +func newCaseTrie(i int) *caseTrie { + return &caseTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *caseTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 18: + return uint16(caseValues[n<<6+uint32(b)]) + default: + n -= 18 + return uint16(sparse.lookup(n, b)) + } +} + +// caseValues: 20 blocks, 1280 entries, 2560 bytes +// The third block is the zero block. +var caseValues = [1280]uint16{ + // Block 0x0, offset 0x0 + 0x27: 0x0054, + 0x2e: 0x0054, + 0x30: 0x0010, 0x31: 0x0010, 0x32: 0x0010, 0x33: 0x0010, 0x34: 0x0010, 0x35: 0x0010, + 0x36: 0x0010, 0x37: 0x0010, 0x38: 0x0010, 0x39: 0x0010, 0x3a: 0x0054, + // Block 0x1, offset 0x40 + 0x41: 0x2013, 0x42: 0x2013, 0x43: 0x2013, 0x44: 0x2013, 0x45: 0x2013, + 0x46: 0x2013, 0x47: 0x2013, 0x48: 0x2013, 0x49: 0x2013, 0x4a: 0x2013, 0x4b: 0x2013, + 0x4c: 0x2013, 0x4d: 0x2013, 0x4e: 0x2013, 0x4f: 0x2013, 0x50: 0x2013, 0x51: 0x2013, + 0x52: 0x2013, 0x53: 0x2013, 0x54: 0x2013, 0x55: 0x2013, 0x56: 0x2013, 0x57: 0x2013, + 0x58: 0x2013, 0x59: 0x2013, 0x5a: 0x2013, + 0x5e: 0x0004, 0x5f: 0x0010, 0x60: 0x0004, 0x61: 0x2012, 0x62: 0x2012, 0x63: 0x2012, + 0x64: 0x2012, 0x65: 0x2012, 0x66: 0x2012, 0x67: 0x2012, 0x68: 0x2012, 0x69: 0x2012, + 0x6a: 0x2012, 0x6b: 0x2012, 0x6c: 0x2012, 0x6d: 0x2012, 0x6e: 0x2012, 0x6f: 0x2012, + 0x70: 0x2012, 0x71: 0x2012, 0x72: 0x2012, 0x73: 0x2012, 0x74: 0x2012, 0x75: 0x2012, + 0x76: 0x2012, 0x77: 0x2012, 0x78: 0x2012, 0x79: 0x2012, 0x7a: 0x2012, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0852, 0xc1: 0x0b53, 0xc2: 0x0113, 0xc3: 0x0112, 0xc4: 0x0113, 0xc5: 0x0112, + 0xc6: 0x0b53, 0xc7: 0x0f13, 0xc8: 0x0f12, 0xc9: 0x0e53, 0xca: 0x1153, 0xcb: 0x0713, + 0xcc: 0x0712, 0xcd: 0x0012, 0xce: 0x1453, 0xcf: 0x1753, 0xd0: 0x1a53, 0xd1: 0x0313, + 0xd2: 0x0312, 0xd3: 0x1d53, 0xd4: 0x2053, 0xd5: 0x2352, 0xd6: 0x2653, 0xd7: 0x2653, + 0xd8: 0x0113, 0xd9: 0x0112, 0xda: 0x2952, 0xdb: 0x0012, 0xdc: 0x1d53, 0xdd: 0x2c53, + 0xde: 0x2f52, 0xdf: 0x3253, 0xe0: 0x0113, 0xe1: 0x0112, 0xe2: 0x0113, 0xe3: 0x0112, + 0xe4: 0x0113, 0xe5: 0x0112, 0xe6: 0x3553, 0xe7: 0x0f13, 0xe8: 0x0f12, 0xe9: 0x3853, + 0xea: 0x0012, 0xeb: 0x0012, 0xec: 0x0113, 0xed: 0x0112, 0xee: 0x3553, 0xef: 0x1f13, + 0xf0: 0x1f12, 0xf1: 0x3b53, 0xf2: 0x3e53, 0xf3: 0x0713, 0xf4: 0x0712, 0xf5: 0x0313, + 0xf6: 0x0312, 0xf7: 0x4153, 0xf8: 0x0113, 0xf9: 0x0112, 0xfa: 0x0012, 0xfb: 0x0010, + 0xfc: 0x0113, 0xfd: 0x0112, 0xfe: 0x0012, 0xff: 0x4452, + // Block 0x4, offset 0x100 + 0x100: 0x0010, 0x101: 0x0010, 0x102: 0x0010, 0x103: 0x0010, 0x104: 0x04cb, 0x105: 0x05c9, + 0x106: 0x06ca, 0x107: 0x078b, 0x108: 0x0889, 0x109: 0x098a, 0x10a: 0x0a4b, 0x10b: 0x0b49, + 0x10c: 0x0c4a, 0x10d: 0x0313, 0x10e: 0x0312, 0x10f: 0x1f13, 0x110: 0x1f12, 0x111: 0x0313, + 0x112: 0x0312, 0x113: 0x0713, 0x114: 0x0712, 0x115: 0x0313, 0x116: 0x0312, 0x117: 0x0f13, + 0x118: 0x0f12, 0x119: 0x0313, 0x11a: 0x0312, 0x11b: 0x0713, 0x11c: 0x0712, 0x11d: 0x1452, + 0x11e: 0x0113, 0x11f: 0x0112, 0x120: 0x0113, 0x121: 0x0112, 0x122: 0x0113, 0x123: 0x0112, + 0x124: 0x0113, 0x125: 0x0112, 0x126: 0x0113, 0x127: 0x0112, 0x128: 0x0113, 0x129: 0x0112, + 0x12a: 0x0113, 0x12b: 0x0112, 0x12c: 0x0113, 0x12d: 0x0112, 0x12e: 0x0113, 0x12f: 0x0112, + 0x130: 0x0d0a, 0x131: 0x0e0b, 0x132: 0x0f09, 0x133: 0x100a, 0x134: 0x0113, 0x135: 0x0112, + 0x136: 0x2353, 0x137: 0x4453, 0x138: 0x0113, 0x139: 0x0112, 0x13a: 0x0113, 0x13b: 0x0112, + 0x13c: 0x0113, 0x13d: 0x0112, 0x13e: 0x0113, 0x13f: 0x0112, + // Block 0x5, offset 0x140 + 0x140: 0x136a, 0x141: 0x0313, 0x142: 0x0312, 0x143: 0x0853, 0x144: 0x4753, 0x145: 0x4a53, + 0x146: 0x0113, 0x147: 0x0112, 0x148: 0x0113, 0x149: 0x0112, 0x14a: 0x0113, 0x14b: 0x0112, + 0x14c: 0x0113, 0x14d: 0x0112, 0x14e: 0x0113, 0x14f: 0x0112, 0x150: 0x140a, 0x151: 0x14aa, + 0x152: 0x154a, 0x153: 0x0b52, 0x154: 0x0b52, 0x155: 0x0012, 0x156: 0x0e52, 0x157: 0x1152, + 0x158: 0x0012, 0x159: 0x1752, 0x15a: 0x0012, 0x15b: 0x1a52, 0x15c: 0x15ea, 0x15d: 0x0012, + 0x15e: 0x0012, 0x15f: 0x0012, 0x160: 0x1d52, 0x161: 0x168a, 0x162: 0x0012, 0x163: 0x2052, + 0x164: 0x0012, 0x165: 0x172a, 0x166: 0x17ca, 0x167: 0x0012, 0x168: 0x2652, 0x169: 0x2652, + 0x16a: 0x186a, 0x16b: 0x190a, 0x16c: 0x19aa, 0x16d: 0x0012, 0x16e: 0x0012, 0x16f: 0x1d52, + 0x170: 0x0012, 0x171: 0x1a4a, 0x172: 0x2c52, 0x173: 0x0012, 0x174: 0x0012, 0x175: 0x3252, + 0x176: 0x0012, 0x177: 0x0012, 0x178: 0x0012, 0x179: 0x0012, 0x17a: 0x0012, 0x17b: 0x0012, + 0x17c: 0x0012, 0x17d: 0x1aea, 0x17e: 0x0012, 0x17f: 0x0012, + // Block 0x6, offset 0x180 + 0x180: 0x3552, 0x181: 0x0012, 0x182: 0x0012, 0x183: 0x3852, 0x184: 0x0012, 0x185: 0x0012, + 0x186: 0x0012, 0x187: 0x1b8a, 0x188: 0x3552, 0x189: 0x4752, 0x18a: 0x3b52, 0x18b: 0x3e52, + 0x18c: 0x4a52, 0x18d: 0x0012, 0x18e: 0x0012, 0x18f: 0x0012, 0x190: 0x0012, 0x191: 0x0012, + 0x192: 0x4152, 0x193: 0x0012, 0x194: 0x0010, 0x195: 0x0012, 0x196: 0x0012, 0x197: 0x0012, + 0x198: 0x0012, 0x199: 0x0012, 0x19a: 0x0012, 0x19b: 0x0012, 0x19c: 0x0012, 0x19d: 0x1c2a, + 0x19e: 0x1cca, 0x19f: 0x0012, 0x1a0: 0x0012, 0x1a1: 0x0012, 0x1a2: 0x0012, 0x1a3: 0x0012, + 0x1a4: 0x0012, 0x1a5: 0x0012, 0x1a6: 0x0012, 0x1a7: 0x0012, 0x1a8: 0x0012, 0x1a9: 0x0012, + 0x1aa: 0x0012, 0x1ab: 0x0012, 0x1ac: 0x0012, 0x1ad: 0x0012, 0x1ae: 0x0012, 0x1af: 0x0012, + 0x1b0: 0x0015, 0x1b1: 0x0015, 0x1b2: 0x0015, 0x1b3: 0x0015, 0x1b4: 0x0015, 0x1b5: 0x0015, + 0x1b6: 0x0015, 0x1b7: 0x0015, 0x1b8: 0x0015, 0x1b9: 0x0014, 0x1ba: 0x0014, 0x1bb: 0x0014, + 0x1bc: 0x0014, 0x1bd: 0x0014, 0x1be: 0x0014, 0x1bf: 0x0014, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0024, 0x1c1: 0x0024, 0x1c2: 0x0024, 0x1c3: 0x0024, 0x1c4: 0x0024, 0x1c5: 0x1d6d, + 0x1c6: 0x0024, 0x1c7: 0x0034, 0x1c8: 0x0034, 0x1c9: 0x0034, 0x1ca: 0x0024, 0x1cb: 0x0024, + 0x1cc: 0x0024, 0x1cd: 0x0034, 0x1ce: 0x0034, 0x1cf: 0x0014, 0x1d0: 0x0024, 0x1d1: 0x0024, + 0x1d2: 0x0024, 0x1d3: 0x0034, 0x1d4: 0x0034, 0x1d5: 0x0034, 0x1d6: 0x0034, 0x1d7: 0x0024, + 0x1d8: 0x0034, 0x1d9: 0x0034, 0x1da: 0x0034, 0x1db: 0x0024, 0x1dc: 0x0034, 0x1dd: 0x0034, + 0x1de: 0x0034, 0x1df: 0x0034, 0x1e0: 0x0034, 0x1e1: 0x0034, 0x1e2: 0x0034, 0x1e3: 0x0024, + 0x1e4: 0x0024, 0x1e5: 0x0024, 0x1e6: 0x0024, 0x1e7: 0x0024, 0x1e8: 0x0024, 0x1e9: 0x0024, + 0x1ea: 0x0024, 0x1eb: 0x0024, 0x1ec: 0x0024, 0x1ed: 0x0024, 0x1ee: 0x0024, 0x1ef: 0x0024, + 0x1f0: 0x0113, 0x1f1: 0x0112, 0x1f2: 0x0113, 0x1f3: 0x0112, 0x1f4: 0x0014, 0x1f5: 0x0004, + 0x1f6: 0x0113, 0x1f7: 0x0112, 0x1fa: 0x0015, 0x1fb: 0x4d52, + 0x1fc: 0x5052, 0x1fd: 0x5052, 0x1ff: 0x5353, + // Block 0x8, offset 0x200 + 0x204: 0x0004, 0x205: 0x0004, + 0x206: 0x2a13, 0x207: 0x0054, 0x208: 0x2513, 0x209: 0x2713, 0x20a: 0x2513, + 0x20c: 0x5653, 0x20e: 0x5953, 0x20f: 0x5c53, 0x210: 0x1e2a, 0x211: 0x2013, + 0x212: 0x2013, 0x213: 0x2013, 0x214: 0x2013, 0x215: 0x2013, 0x216: 0x2013, 0x217: 0x2013, + 0x218: 0x2013, 0x219: 0x2013, 0x21a: 0x2013, 0x21b: 0x2013, 0x21c: 0x2013, 0x21d: 0x2013, + 0x21e: 0x2013, 0x21f: 0x2013, 0x220: 0x5f53, 0x221: 0x5f53, 0x223: 0x5f53, + 0x224: 0x5f53, 0x225: 0x5f53, 0x226: 0x5f53, 0x227: 0x5f53, 0x228: 0x5f53, 0x229: 0x5f53, + 0x22a: 0x5f53, 0x22b: 0x5f53, 0x22c: 0x2a12, 0x22d: 0x2512, 0x22e: 0x2712, 0x22f: 0x2512, + 0x230: 0x1fea, 0x231: 0x2012, 0x232: 0x2012, 0x233: 0x2012, 0x234: 0x2012, 0x235: 0x2012, + 0x236: 0x2012, 0x237: 0x2012, 0x238: 0x2012, 0x239: 0x2012, 0x23a: 0x2012, 0x23b: 0x2012, + 0x23c: 0x2012, 0x23d: 0x2012, 0x23e: 0x2012, 0x23f: 0x2012, + // Block 0x9, offset 0x240 + 0x240: 0x5f52, 0x241: 0x5f52, 0x242: 0x21aa, 0x243: 0x5f52, 0x244: 0x5f52, 0x245: 0x5f52, + 0x246: 0x5f52, 0x247: 0x5f52, 0x248: 0x5f52, 0x249: 0x5f52, 0x24a: 0x5f52, 0x24b: 0x5f52, + 0x24c: 0x5652, 0x24d: 0x5952, 0x24e: 0x5c52, 0x24f: 0x1813, 0x250: 0x226a, 0x251: 0x232a, + 0x252: 0x0013, 0x253: 0x0013, 0x254: 0x0013, 0x255: 0x23ea, 0x256: 0x24aa, 0x257: 0x1812, + 0x258: 0x0113, 0x259: 0x0112, 0x25a: 0x0113, 0x25b: 0x0112, 0x25c: 0x0113, 0x25d: 0x0112, + 0x25e: 0x0113, 0x25f: 0x0112, 0x260: 0x0113, 0x261: 0x0112, 0x262: 0x0113, 0x263: 0x0112, + 0x264: 0x0113, 0x265: 0x0112, 0x266: 0x0113, 0x267: 0x0112, 0x268: 0x0113, 0x269: 0x0112, + 0x26a: 0x0113, 0x26b: 0x0112, 0x26c: 0x0113, 0x26d: 0x0112, 0x26e: 0x0113, 0x26f: 0x0112, + 0x270: 0x256a, 0x271: 0x262a, 0x272: 0x0b12, 0x273: 0x5352, 0x274: 0x6253, 0x275: 0x26ea, + 0x277: 0x0f13, 0x278: 0x0f12, 0x279: 0x0b13, 0x27a: 0x0113, 0x27b: 0x0112, + 0x27c: 0x0012, 0x27d: 0x4d53, 0x27e: 0x5053, 0x27f: 0x5053, + // Block 0xa, offset 0x280 + 0x280: 0x0812, 0x281: 0x0812, 0x282: 0x0812, 0x283: 0x0812, 0x284: 0x0812, 0x285: 0x0812, + 0x288: 0x0813, 0x289: 0x0813, 0x28a: 0x0813, 0x28b: 0x0813, + 0x28c: 0x0813, 0x28d: 0x0813, 0x290: 0x372a, 0x291: 0x0812, + 0x292: 0x386a, 0x293: 0x0812, 0x294: 0x3a2a, 0x295: 0x0812, 0x296: 0x3bea, 0x297: 0x0812, + 0x299: 0x0813, 0x29b: 0x0813, 0x29d: 0x0813, + 0x29f: 0x0813, 0x2a0: 0x0812, 0x2a1: 0x0812, 0x2a2: 0x0812, 0x2a3: 0x0812, + 0x2a4: 0x0812, 0x2a5: 0x0812, 0x2a6: 0x0812, 0x2a7: 0x0812, 0x2a8: 0x0813, 0x2a9: 0x0813, + 0x2aa: 0x0813, 0x2ab: 0x0813, 0x2ac: 0x0813, 0x2ad: 0x0813, 0x2ae: 0x0813, 0x2af: 0x0813, + 0x2b0: 0x8b52, 0x2b1: 0x8b52, 0x2b2: 0x8e52, 0x2b3: 0x8e52, 0x2b4: 0x9152, 0x2b5: 0x9152, + 0x2b6: 0x9452, 0x2b7: 0x9452, 0x2b8: 0x9752, 0x2b9: 0x9752, 0x2ba: 0x9a52, 0x2bb: 0x9a52, + 0x2bc: 0x4d52, 0x2bd: 0x4d52, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3daa, 0x2c1: 0x3f8a, 0x2c2: 0x416a, 0x2c3: 0x434a, 0x2c4: 0x452a, 0x2c5: 0x470a, + 0x2c6: 0x48ea, 0x2c7: 0x4aca, 0x2c8: 0x4ca9, 0x2c9: 0x4e89, 0x2ca: 0x5069, 0x2cb: 0x5249, + 0x2cc: 0x5429, 0x2cd: 0x5609, 0x2ce: 0x57e9, 0x2cf: 0x59c9, 0x2d0: 0x5baa, 0x2d1: 0x5d8a, + 0x2d2: 0x5f6a, 0x2d3: 0x614a, 0x2d4: 0x632a, 0x2d5: 0x650a, 0x2d6: 0x66ea, 0x2d7: 0x68ca, + 0x2d8: 0x6aa9, 0x2d9: 0x6c89, 0x2da: 0x6e69, 0x2db: 0x7049, 0x2dc: 0x7229, 0x2dd: 0x7409, + 0x2de: 0x75e9, 0x2df: 0x77c9, 0x2e0: 0x79aa, 0x2e1: 0x7b8a, 0x2e2: 0x7d6a, 0x2e3: 0x7f4a, + 0x2e4: 0x812a, 0x2e5: 0x830a, 0x2e6: 0x84ea, 0x2e7: 0x86ca, 0x2e8: 0x88a9, 0x2e9: 0x8a89, + 0x2ea: 0x8c69, 0x2eb: 0x8e49, 0x2ec: 0x9029, 0x2ed: 0x9209, 0x2ee: 0x93e9, 0x2ef: 0x95c9, + 0x2f0: 0x0812, 0x2f1: 0x0812, 0x2f2: 0x97aa, 0x2f3: 0x99ca, 0x2f4: 0x9b6a, + 0x2f6: 0x9d2a, 0x2f7: 0x9e6a, 0x2f8: 0x0813, 0x2f9: 0x0813, 0x2fa: 0x8b53, 0x2fb: 0x8b53, + 0x2fc: 0xa0e9, 0x2fd: 0x0004, 0x2fe: 0xa28a, 0x2ff: 0x0004, + // Block 0xc, offset 0x300 + 0x300: 0x0004, 0x301: 0x0004, 0x302: 0xa34a, 0x303: 0xa56a, 0x304: 0xa70a, + 0x306: 0xa8ca, 0x307: 0xaa0a, 0x308: 0x8e53, 0x309: 0x8e53, 0x30a: 0x9153, 0x30b: 0x9153, + 0x30c: 0xac89, 0x30d: 0x0004, 0x30e: 0x0004, 0x30f: 0x0004, 0x310: 0x0812, 0x311: 0x0812, + 0x312: 0xae2a, 0x313: 0xafea, 0x316: 0xb1aa, 0x317: 0xb2ea, + 0x318: 0x0813, 0x319: 0x0813, 0x31a: 0x9453, 0x31b: 0x9453, 0x31d: 0x0004, + 0x31e: 0x0004, 0x31f: 0x0004, 0x320: 0x0812, 0x321: 0x0812, 0x322: 0xb4aa, 0x323: 0xb66a, + 0x324: 0xb82a, 0x325: 0x0912, 0x326: 0xb96a, 0x327: 0xbaaa, 0x328: 0x0813, 0x329: 0x0813, + 0x32a: 0x9a53, 0x32b: 0x9a53, 0x32c: 0x0913, 0x32d: 0x0004, 0x32e: 0x0004, 0x32f: 0x0004, + 0x332: 0xbc6a, 0x333: 0xbe8a, 0x334: 0xc02a, + 0x336: 0xc1ea, 0x337: 0xc32a, 0x338: 0x9753, 0x339: 0x9753, 0x33a: 0x4d53, 0x33b: 0x4d53, + 0x33c: 0xc5a9, 0x33d: 0x0004, 0x33e: 0x0004, + // Block 0xd, offset 0x340 + 0x342: 0x0013, + 0x347: 0x0013, 0x34a: 0x0012, 0x34b: 0x0013, + 0x34c: 0x0013, 0x34d: 0x0013, 0x34e: 0x0012, 0x34f: 0x0012, 0x350: 0x0013, 0x351: 0x0013, + 0x352: 0x0013, 0x353: 0x0012, 0x355: 0x0013, + 0x359: 0x0013, 0x35a: 0x0013, 0x35b: 0x0013, 0x35c: 0x0013, 0x35d: 0x0013, + 0x364: 0x0013, 0x366: 0xc74b, 0x368: 0x0013, + 0x36a: 0xc80b, 0x36b: 0xc88b, 0x36c: 0x0013, 0x36d: 0x0013, 0x36f: 0x0012, + 0x370: 0x0013, 0x371: 0x0013, 0x372: 0x9d53, 0x373: 0x0013, 0x374: 0x0012, 0x375: 0x0010, + 0x376: 0x0010, 0x377: 0x0010, 0x378: 0x0010, 0x379: 0x0012, + 0x37c: 0x0012, 0x37d: 0x0012, 0x37e: 0x0013, 0x37f: 0x0013, + // Block 0xe, offset 0x380 + 0x380: 0x1a13, 0x381: 0x1a13, 0x382: 0x1e13, 0x383: 0x1e13, 0x384: 0x1a13, 0x385: 0x1a13, + 0x386: 0x2613, 0x387: 0x2613, 0x388: 0x2a13, 0x389: 0x2a13, 0x38a: 0x2e13, 0x38b: 0x2e13, + 0x38c: 0x2a13, 0x38d: 0x2a13, 0x38e: 0x2613, 0x38f: 0x2613, 0x390: 0xa052, 0x391: 0xa052, + 0x392: 0xa352, 0x393: 0xa352, 0x394: 0xa652, 0x395: 0xa652, 0x396: 0xa352, 0x397: 0xa352, + 0x398: 0xa052, 0x399: 0xa052, 0x39a: 0x1a12, 0x39b: 0x1a12, 0x39c: 0x1e12, 0x39d: 0x1e12, + 0x39e: 0x1a12, 0x39f: 0x1a12, 0x3a0: 0x2612, 0x3a1: 0x2612, 0x3a2: 0x2a12, 0x3a3: 0x2a12, + 0x3a4: 0x2e12, 0x3a5: 0x2e12, 0x3a6: 0x2a12, 0x3a7: 0x2a12, 0x3a8: 0x2612, 0x3a9: 0x2612, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x6552, 0x3c1: 0x6552, 0x3c2: 0x6552, 0x3c3: 0x6552, 0x3c4: 0x6552, 0x3c5: 0x6552, + 0x3c6: 0x6552, 0x3c7: 0x6552, 0x3c8: 0x6552, 0x3c9: 0x6552, 0x3ca: 0x6552, 0x3cb: 0x6552, + 0x3cc: 0x6552, 0x3cd: 0x6552, 0x3ce: 0x6552, 0x3cf: 0x6552, 0x3d0: 0xa952, 0x3d1: 0xa952, + 0x3d2: 0xa952, 0x3d3: 0xa952, 0x3d4: 0xa952, 0x3d5: 0xa952, 0x3d6: 0xa952, 0x3d7: 0xa952, + 0x3d8: 0xa952, 0x3d9: 0xa952, 0x3da: 0xa952, 0x3db: 0xa952, 0x3dc: 0xa952, 0x3dd: 0xa952, + 0x3de: 0xa952, 0x3e0: 0x0113, 0x3e1: 0x0112, 0x3e2: 0xc94b, 0x3e3: 0x8853, + 0x3e4: 0xca0b, 0x3e5: 0xcaca, 0x3e6: 0xcb4a, 0x3e7: 0x0f13, 0x3e8: 0x0f12, 0x3e9: 0x0313, + 0x3ea: 0x0312, 0x3eb: 0x0713, 0x3ec: 0x0712, 0x3ed: 0xcbcb, 0x3ee: 0xcc8b, 0x3ef: 0xcd4b, + 0x3f0: 0xce0b, 0x3f1: 0x0012, 0x3f2: 0x0113, 0x3f3: 0x0112, 0x3f4: 0x0012, 0x3f5: 0x0313, + 0x3f6: 0x0312, 0x3f7: 0x0012, 0x3f8: 0x0012, 0x3f9: 0x0012, 0x3fa: 0x0012, 0x3fb: 0x0012, + 0x3fc: 0x0015, 0x3fd: 0x0015, 0x3fe: 0xcecb, 0x3ff: 0xcf8b, + // Block 0x10, offset 0x400 + 0x400: 0x0113, 0x401: 0x0112, 0x402: 0x0113, 0x403: 0x0112, 0x404: 0x0113, 0x405: 0x0112, + 0x406: 0x0113, 0x407: 0x0112, 0x408: 0x0014, 0x409: 0x0004, 0x40a: 0x0004, 0x40b: 0x0713, + 0x40c: 0x0712, 0x40d: 0xd04b, 0x40e: 0x0012, 0x40f: 0x0010, 0x410: 0x0113, 0x411: 0x0112, + 0x412: 0x0113, 0x413: 0x0112, 0x414: 0x0012, 0x415: 0x0012, 0x416: 0x0113, 0x417: 0x0112, + 0x418: 0x0113, 0x419: 0x0112, 0x41a: 0x0113, 0x41b: 0x0112, 0x41c: 0x0113, 0x41d: 0x0112, + 0x41e: 0x0113, 0x41f: 0x0112, 0x420: 0x0113, 0x421: 0x0112, 0x422: 0x0113, 0x423: 0x0112, + 0x424: 0x0113, 0x425: 0x0112, 0x426: 0x0113, 0x427: 0x0112, 0x428: 0x0113, 0x429: 0x0112, + 0x42a: 0xd10b, 0x42b: 0xd1cb, 0x42c: 0xd28b, 0x42d: 0xd34b, 0x42e: 0xd40b, + 0x430: 0xd4cb, 0x431: 0xd58b, 0x432: 0xd64b, 0x433: 0xac53, 0x434: 0x0113, 0x435: 0x0112, + 0x436: 0x0113, 0x437: 0x0112, + // Block 0x11, offset 0x440 + 0x440: 0xd70a, 0x441: 0xd80a, 0x442: 0xd90a, 0x443: 0xda0a, 0x444: 0xdb6a, 0x445: 0xdcca, + 0x446: 0xddca, + 0x453: 0xdeca, 0x454: 0xe08a, 0x455: 0xe24a, 0x456: 0xe40a, 0x457: 0xe5ca, + 0x45d: 0x0010, + 0x45e: 0x0034, 0x45f: 0x0010, 0x460: 0x0010, 0x461: 0x0010, 0x462: 0x0010, 0x463: 0x0010, + 0x464: 0x0010, 0x465: 0x0010, 0x466: 0x0010, 0x467: 0x0010, 0x468: 0x0010, + 0x46a: 0x0010, 0x46b: 0x0010, 0x46c: 0x0010, 0x46d: 0x0010, 0x46e: 0x0010, 0x46f: 0x0010, + 0x470: 0x0010, 0x471: 0x0010, 0x472: 0x0010, 0x473: 0x0010, 0x474: 0x0010, 0x475: 0x0010, + 0x476: 0x0010, 0x478: 0x0010, 0x479: 0x0010, 0x47a: 0x0010, 0x47b: 0x0010, + 0x47c: 0x0010, 0x47e: 0x0010, + // Block 0x12, offset 0x480 + 0x480: 0x2213, 0x481: 0x2213, 0x482: 0x2613, 0x483: 0x2613, 0x484: 0x2213, 0x485: 0x2213, + 0x486: 0x2e13, 0x487: 0x2e13, 0x488: 0x2213, 0x489: 0x2213, 0x48a: 0x2613, 0x48b: 0x2613, + 0x48c: 0x2213, 0x48d: 0x2213, 0x48e: 0x3e13, 0x48f: 0x3e13, 0x490: 0x2213, 0x491: 0x2213, + 0x492: 0x2613, 0x493: 0x2613, 0x494: 0x2213, 0x495: 0x2213, 0x496: 0x2e13, 0x497: 0x2e13, + 0x498: 0x2213, 0x499: 0x2213, 0x49a: 0x2613, 0x49b: 0x2613, 0x49c: 0x2213, 0x49d: 0x2213, + 0x49e: 0xb553, 0x49f: 0xb553, 0x4a0: 0xb853, 0x4a1: 0xb853, 0x4a2: 0x2212, 0x4a3: 0x2212, + 0x4a4: 0x2612, 0x4a5: 0x2612, 0x4a6: 0x2212, 0x4a7: 0x2212, 0x4a8: 0x2e12, 0x4a9: 0x2e12, + 0x4aa: 0x2212, 0x4ab: 0x2212, 0x4ac: 0x2612, 0x4ad: 0x2612, 0x4ae: 0x2212, 0x4af: 0x2212, + 0x4b0: 0x3e12, 0x4b1: 0x3e12, 0x4b2: 0x2212, 0x4b3: 0x2212, 0x4b4: 0x2612, 0x4b5: 0x2612, + 0x4b6: 0x2212, 0x4b7: 0x2212, 0x4b8: 0x2e12, 0x4b9: 0x2e12, 0x4ba: 0x2212, 0x4bb: 0x2212, + 0x4bc: 0x2612, 0x4bd: 0x2612, 0x4be: 0x2212, 0x4bf: 0x2212, + // Block 0x13, offset 0x4c0 + 0x4c2: 0x0010, + 0x4c7: 0x0010, 0x4c9: 0x0010, 0x4cb: 0x0010, + 0x4cd: 0x0010, 0x4ce: 0x0010, 0x4cf: 0x0010, 0x4d1: 0x0010, + 0x4d2: 0x0010, 0x4d4: 0x0010, 0x4d7: 0x0010, + 0x4d9: 0x0010, 0x4db: 0x0010, 0x4dd: 0x0010, + 0x4df: 0x0010, 0x4e1: 0x0010, 0x4e2: 0x0010, + 0x4e4: 0x0010, 0x4e7: 0x0010, 0x4e8: 0x0010, 0x4e9: 0x0010, + 0x4ea: 0x0010, 0x4ec: 0x0010, 0x4ed: 0x0010, 0x4ee: 0x0010, 0x4ef: 0x0010, + 0x4f0: 0x0010, 0x4f1: 0x0010, 0x4f2: 0x0010, 0x4f4: 0x0010, 0x4f5: 0x0010, + 0x4f6: 0x0010, 0x4f7: 0x0010, 0x4f9: 0x0010, 0x4fa: 0x0010, 0x4fb: 0x0010, + 0x4fc: 0x0010, 0x4fe: 0x0010, +} + +// caseIndex: 25 blocks, 1600 entries, 3200 bytes +// Block 0 is the zero block. +var caseIndex = [1600]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x12, 0xc3: 0x13, 0xc4: 0x14, 0xc5: 0x15, 0xc6: 0x01, 0xc7: 0x02, + 0xc8: 0x16, 0xc9: 0x03, 0xca: 0x04, 0xcb: 0x17, 0xcc: 0x18, 0xcd: 0x05, 0xce: 0x06, 0xcf: 0x07, + 0xd0: 0x19, 0xd1: 0x1a, 0xd2: 0x1b, 0xd3: 0x1c, 0xd4: 0x1d, 0xd5: 0x1e, 0xd6: 0x1f, 0xd7: 0x20, + 0xd8: 0x21, 0xd9: 0x22, 0xda: 0x23, 0xdb: 0x24, 0xdc: 0x25, 0xdd: 0x26, 0xde: 0x27, 0xdf: 0x28, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x08, 0xef: 0x09, + 0xf0: 0x14, 0xf3: 0x16, + // Block 0x4, offset 0x100 + 0x120: 0x29, 0x121: 0x2a, 0x122: 0x2b, 0x123: 0x2c, 0x124: 0x2d, 0x125: 0x2e, 0x126: 0x2f, 0x127: 0x30, + 0x128: 0x31, 0x129: 0x32, 0x12a: 0x33, 0x12b: 0x34, 0x12c: 0x35, 0x12d: 0x36, 0x12e: 0x37, 0x12f: 0x38, + 0x130: 0x39, 0x131: 0x3a, 0x132: 0x3b, 0x133: 0x3c, 0x134: 0x3d, 0x135: 0x3e, 0x136: 0x3f, 0x137: 0x40, + 0x138: 0x41, 0x139: 0x42, 0x13a: 0x43, 0x13b: 0x44, 0x13c: 0x45, 0x13d: 0x46, 0x13e: 0x47, 0x13f: 0x48, + // Block 0x5, offset 0x140 + 0x140: 0x49, 0x141: 0x4a, 0x142: 0x4b, 0x143: 0x4c, 0x144: 0x23, 0x145: 0x23, 0x146: 0x23, 0x147: 0x23, + 0x148: 0x23, 0x149: 0x4d, 0x14a: 0x4e, 0x14b: 0x4f, 0x14c: 0x50, 0x14d: 0x51, 0x14e: 0x52, 0x14f: 0x53, + 0x150: 0x54, 0x151: 0x23, 0x152: 0x23, 0x153: 0x23, 0x154: 0x23, 0x155: 0x23, 0x156: 0x23, 0x157: 0x23, + 0x158: 0x23, 0x159: 0x55, 0x15a: 0x56, 0x15b: 0x57, 0x15c: 0x58, 0x15d: 0x59, 0x15e: 0x5a, 0x15f: 0x5b, + 0x160: 0x5c, 0x161: 0x5d, 0x162: 0x5e, 0x163: 0x5f, 0x164: 0x60, 0x165: 0x61, 0x167: 0x62, + 0x168: 0x63, 0x169: 0x64, 0x16a: 0x65, 0x16c: 0x66, 0x16d: 0x67, 0x16e: 0x68, 0x16f: 0x69, + 0x170: 0x6a, 0x171: 0x6b, 0x172: 0x6c, 0x173: 0x6d, 0x174: 0x6e, 0x175: 0x6f, 0x176: 0x70, 0x177: 0x71, + 0x178: 0x72, 0x179: 0x72, 0x17a: 0x73, 0x17b: 0x72, 0x17c: 0x74, 0x17d: 0x08, 0x17e: 0x09, 0x17f: 0x0a, + // Block 0x6, offset 0x180 + 0x180: 0x75, 0x181: 0x76, 0x182: 0x77, 0x183: 0x78, 0x184: 0x0b, 0x185: 0x79, 0x186: 0x7a, + 0x192: 0x7b, 0x193: 0x0c, + 0x1b0: 0x7c, 0x1b1: 0x0d, 0x1b2: 0x72, 0x1b3: 0x7d, 0x1b4: 0x7e, 0x1b5: 0x7f, 0x1b6: 0x80, 0x1b7: 0x81, + 0x1b8: 0x82, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x83, 0x1c2: 0x84, 0x1c3: 0x85, 0x1c4: 0x86, 0x1c5: 0x23, 0x1c6: 0x87, + // Block 0x8, offset 0x200 + 0x200: 0x88, 0x201: 0x23, 0x202: 0x23, 0x203: 0x23, 0x204: 0x23, 0x205: 0x23, 0x206: 0x23, 0x207: 0x23, + 0x208: 0x23, 0x209: 0x23, 0x20a: 0x23, 0x20b: 0x23, 0x20c: 0x23, 0x20d: 0x23, 0x20e: 0x23, 0x20f: 0x23, + 0x210: 0x23, 0x211: 0x23, 0x212: 0x89, 0x213: 0x8a, 0x214: 0x23, 0x215: 0x23, 0x216: 0x23, 0x217: 0x23, + 0x218: 0x8b, 0x219: 0x8c, 0x21a: 0x8d, 0x21b: 0x8e, 0x21c: 0x8f, 0x21d: 0x90, 0x21e: 0x0e, 0x21f: 0x91, + 0x220: 0x92, 0x221: 0x93, 0x222: 0x23, 0x223: 0x94, 0x224: 0x95, 0x225: 0x96, 0x226: 0x97, 0x227: 0x98, + 0x228: 0x99, 0x229: 0x9a, 0x22a: 0x9b, 0x22b: 0x9c, 0x22c: 0x9d, 0x22d: 0x9e, 0x22e: 0x9f, 0x22f: 0xa0, + 0x230: 0x23, 0x231: 0x23, 0x232: 0x23, 0x233: 0x23, 0x234: 0x23, 0x235: 0x23, 0x236: 0x23, 0x237: 0x23, + 0x238: 0x23, 0x239: 0x23, 0x23a: 0x23, 0x23b: 0x23, 0x23c: 0x23, 0x23d: 0x23, 0x23e: 0x23, 0x23f: 0x23, + // Block 0x9, offset 0x240 + 0x240: 0x23, 0x241: 0x23, 0x242: 0x23, 0x243: 0x23, 0x244: 0x23, 0x245: 0x23, 0x246: 0x23, 0x247: 0x23, + 0x248: 0x23, 0x249: 0x23, 0x24a: 0x23, 0x24b: 0x23, 0x24c: 0x23, 0x24d: 0x23, 0x24e: 0x23, 0x24f: 0x23, + 0x250: 0x23, 0x251: 0x23, 0x252: 0x23, 0x253: 0x23, 0x254: 0x23, 0x255: 0x23, 0x256: 0x23, 0x257: 0x23, + 0x258: 0x23, 0x259: 0x23, 0x25a: 0x23, 0x25b: 0x23, 0x25c: 0x23, 0x25d: 0x23, 0x25e: 0x23, 0x25f: 0x23, + 0x260: 0x23, 0x261: 0x23, 0x262: 0x23, 0x263: 0x23, 0x264: 0x23, 0x265: 0x23, 0x266: 0x23, 0x267: 0x23, + 0x268: 0x23, 0x269: 0x23, 0x26a: 0x23, 0x26b: 0x23, 0x26c: 0x23, 0x26d: 0x23, 0x26e: 0x23, 0x26f: 0x23, + 0x270: 0x23, 0x271: 0x23, 0x272: 0x23, 0x273: 0x23, 0x274: 0x23, 0x275: 0x23, 0x276: 0x23, 0x277: 0x23, + 0x278: 0x23, 0x279: 0x23, 0x27a: 0x23, 0x27b: 0x23, 0x27c: 0x23, 0x27d: 0x23, 0x27e: 0x23, 0x27f: 0x23, + // Block 0xa, offset 0x280 + 0x280: 0x23, 0x281: 0x23, 0x282: 0x23, 0x283: 0x23, 0x284: 0x23, 0x285: 0x23, 0x286: 0x23, 0x287: 0x23, + 0x288: 0x23, 0x289: 0x23, 0x28a: 0x23, 0x28b: 0x23, 0x28c: 0x23, 0x28d: 0x23, 0x28e: 0x23, 0x28f: 0x23, + 0x290: 0x23, 0x291: 0x23, 0x292: 0x23, 0x293: 0x23, 0x294: 0x23, 0x295: 0x23, 0x296: 0x23, 0x297: 0x23, + 0x298: 0x23, 0x299: 0x23, 0x29a: 0x23, 0x29b: 0x23, 0x29c: 0x23, 0x29d: 0x23, 0x29e: 0xa1, 0x29f: 0xa2, + // Block 0xb, offset 0x2c0 + 0x2ec: 0x0f, 0x2ed: 0xa3, 0x2ee: 0xa4, 0x2ef: 0xa5, + 0x2f0: 0x23, 0x2f1: 0x23, 0x2f2: 0x23, 0x2f3: 0x23, 0x2f4: 0xa6, 0x2f5: 0xa7, 0x2f6: 0xa8, 0x2f7: 0xa9, + 0x2f8: 0xaa, 0x2f9: 0xab, 0x2fa: 0x23, 0x2fb: 0xac, 0x2fc: 0xad, 0x2fd: 0xae, 0x2fe: 0xaf, 0x2ff: 0xb0, + // Block 0xc, offset 0x300 + 0x300: 0xb1, 0x301: 0xb2, 0x302: 0x23, 0x303: 0xb3, 0x305: 0xb4, 0x307: 0xb5, + 0x30a: 0xb6, 0x30b: 0xb7, 0x30c: 0xb8, 0x30d: 0xb9, 0x30e: 0xba, 0x30f: 0xbb, + 0x310: 0xbc, 0x311: 0xbd, 0x312: 0xbe, 0x313: 0xbf, 0x314: 0xc0, 0x315: 0xc1, + 0x318: 0x23, 0x319: 0x23, 0x31a: 0x23, 0x31b: 0x23, 0x31c: 0xc2, 0x31d: 0xc3, + 0x320: 0xc4, 0x321: 0xc5, 0x322: 0xc6, 0x323: 0xc7, 0x324: 0xc8, 0x326: 0xc9, + 0x328: 0xca, 0x329: 0xcb, 0x32a: 0xcc, 0x32b: 0xcd, 0x32c: 0x5f, 0x32d: 0xce, 0x32e: 0xcf, + 0x330: 0x23, 0x331: 0xd0, 0x332: 0xd1, 0x333: 0xd2, + // Block 0xd, offset 0x340 + 0x340: 0xd3, 0x341: 0xd4, 0x342: 0xd5, 0x343: 0xd6, 0x344: 0xd7, 0x345: 0xd8, 0x346: 0xd9, 0x347: 0xda, + 0x348: 0xdb, 0x34a: 0xdc, 0x34b: 0xdd, 0x34c: 0xde, 0x34d: 0xdf, + 0x350: 0xe0, 0x351: 0xe1, 0x352: 0xe2, 0x353: 0xe3, 0x356: 0xe4, 0x357: 0xe5, + 0x358: 0xe6, 0x359: 0xe7, 0x35a: 0xe8, 0x35b: 0xe9, 0x35c: 0xea, + 0x362: 0xeb, 0x363: 0xec, + 0x36b: 0xed, + 0x370: 0xee, 0x371: 0xef, 0x372: 0xf0, + // Block 0xe, offset 0x380 + 0x380: 0x23, 0x381: 0x23, 0x382: 0x23, 0x383: 0x23, 0x384: 0x23, 0x385: 0x23, 0x386: 0x23, 0x387: 0x23, + 0x388: 0x23, 0x389: 0x23, 0x38a: 0x23, 0x38b: 0x23, 0x38c: 0x23, 0x38d: 0x23, 0x38e: 0xf1, + 0x390: 0x23, 0x391: 0xf2, 0x392: 0x23, 0x393: 0x23, 0x394: 0x23, 0x395: 0xf3, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x23, 0x3c1: 0x23, 0x3c2: 0x23, 0x3c3: 0x23, 0x3c4: 0x23, 0x3c5: 0x23, 0x3c6: 0x23, 0x3c7: 0x23, + 0x3c8: 0x23, 0x3c9: 0x23, 0x3ca: 0x23, 0x3cb: 0x23, 0x3cc: 0x23, 0x3cd: 0x23, 0x3ce: 0x23, 0x3cf: 0x23, + 0x3d0: 0xf2, + // Block 0x10, offset 0x400 + 0x410: 0x23, 0x411: 0x23, 0x412: 0x23, 0x413: 0x23, 0x414: 0x23, 0x415: 0x23, 0x416: 0x23, 0x417: 0x23, + 0x418: 0x23, 0x419: 0xf4, + // Block 0x11, offset 0x440 + 0x460: 0x23, 0x461: 0x23, 0x462: 0x23, 0x463: 0x23, 0x464: 0x23, 0x465: 0x23, 0x466: 0x23, 0x467: 0x23, + 0x468: 0xed, 0x469: 0xf5, 0x46b: 0xf6, 0x46c: 0xf7, 0x46d: 0xf8, 0x46e: 0xf9, + 0x47c: 0x23, 0x47d: 0xfa, 0x47e: 0xfb, 0x47f: 0xfc, + // Block 0x12, offset 0x480 + 0x4b0: 0x23, 0x4b1: 0xfd, 0x4b2: 0xfe, + // Block 0x13, offset 0x4c0 + 0x4c5: 0xff, 0x4c6: 0x100, + 0x4c9: 0x101, + 0x4d0: 0x102, 0x4d1: 0x103, 0x4d2: 0x104, 0x4d3: 0x105, 0x4d4: 0x106, 0x4d5: 0x107, 0x4d6: 0x108, 0x4d7: 0x109, + 0x4d8: 0x10a, 0x4d9: 0x10b, 0x4da: 0x10c, 0x4db: 0x10d, 0x4dc: 0x10e, 0x4dd: 0x10f, 0x4de: 0x110, 0x4df: 0x111, + 0x4e8: 0x112, 0x4e9: 0x113, 0x4ea: 0x114, + // Block 0x14, offset 0x500 + 0x500: 0x115, + 0x520: 0x23, 0x521: 0x23, 0x522: 0x23, 0x523: 0x116, 0x524: 0x10, 0x525: 0x117, + 0x538: 0x118, 0x539: 0x11, 0x53a: 0x119, + // Block 0x15, offset 0x540 + 0x544: 0x11a, 0x545: 0x11b, 0x546: 0x11c, + 0x54f: 0x11d, + // Block 0x16, offset 0x580 + 0x590: 0x0a, 0x591: 0x0b, 0x592: 0x0c, 0x593: 0x0d, 0x594: 0x0e, 0x596: 0x0f, + 0x59b: 0x10, 0x59d: 0x11, 0x59e: 0x12, 0x59f: 0x13, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x11e, 0x5c1: 0x11f, 0x5c4: 0x11f, 0x5c5: 0x11f, 0x5c6: 0x11f, 0x5c7: 0x120, + // Block 0x18, offset 0x600 + 0x620: 0x15, +} + +// sparseOffsets: 272 entries, 544 bytes +var sparseOffsets = []uint16{0x0, 0x9, 0xf, 0x18, 0x24, 0x2e, 0x3a, 0x3d, 0x41, 0x44, 0x48, 0x52, 0x54, 0x59, 0x69, 0x70, 0x75, 0x83, 0x84, 0x92, 0xa1, 0xab, 0xae, 0xb4, 0xbc, 0xbe, 0xc0, 0xce, 0xd4, 0xe2, 0xed, 0xf8, 0x103, 0x10f, 0x119, 0x124, 0x12f, 0x13b, 0x147, 0x14f, 0x157, 0x161, 0x16c, 0x178, 0x17e, 0x189, 0x18e, 0x196, 0x199, 0x19e, 0x1a2, 0x1a6, 0x1ad, 0x1b6, 0x1be, 0x1bf, 0x1c8, 0x1cf, 0x1d7, 0x1dd, 0x1e3, 0x1e8, 0x1ec, 0x1ef, 0x1f1, 0x1f4, 0x1f9, 0x1fa, 0x1fc, 0x1fe, 0x200, 0x207, 0x20c, 0x210, 0x219, 0x21c, 0x21f, 0x225, 0x226, 0x231, 0x232, 0x233, 0x238, 0x245, 0x24d, 0x255, 0x25e, 0x267, 0x270, 0x275, 0x278, 0x281, 0x28e, 0x290, 0x297, 0x299, 0x2a4, 0x2a5, 0x2b0, 0x2b8, 0x2c0, 0x2c6, 0x2c7, 0x2d5, 0x2da, 0x2dd, 0x2e2, 0x2e6, 0x2ec, 0x2f1, 0x2f4, 0x2f9, 0x2fe, 0x2ff, 0x305, 0x307, 0x308, 0x30a, 0x30c, 0x30f, 0x310, 0x312, 0x315, 0x31b, 0x31f, 0x321, 0x327, 0x32e, 0x332, 0x33b, 0x33c, 0x344, 0x348, 0x34d, 0x355, 0x35b, 0x361, 0x36b, 0x370, 0x379, 0x37f, 0x386, 0x38a, 0x392, 0x394, 0x396, 0x399, 0x39b, 0x39d, 0x39e, 0x39f, 0x3a1, 0x3a3, 0x3a9, 0x3ae, 0x3b0, 0x3b6, 0x3b9, 0x3bb, 0x3c1, 0x3c6, 0x3c8, 0x3c9, 0x3ca, 0x3cb, 0x3cd, 0x3cf, 0x3d1, 0x3d4, 0x3d6, 0x3d9, 0x3e1, 0x3e4, 0x3e8, 0x3f0, 0x3f2, 0x3f3, 0x3f4, 0x3f6, 0x3fc, 0x3fe, 0x3ff, 0x401, 0x403, 0x405, 0x412, 0x413, 0x414, 0x418, 0x41a, 0x41b, 0x41c, 0x41d, 0x41e, 0x422, 0x426, 0x42c, 0x42e, 0x435, 0x438, 0x43c, 0x442, 0x44b, 0x451, 0x457, 0x461, 0x46b, 0x46d, 0x474, 0x47a, 0x480, 0x486, 0x489, 0x48f, 0x492, 0x49a, 0x49b, 0x4a2, 0x4a3, 0x4a6, 0x4a7, 0x4ad, 0x4b0, 0x4b8, 0x4b9, 0x4ba, 0x4bb, 0x4bc, 0x4be, 0x4c0, 0x4c2, 0x4c6, 0x4c7, 0x4c9, 0x4ca, 0x4cb, 0x4cd, 0x4d2, 0x4d7, 0x4db, 0x4dc, 0x4df, 0x4e3, 0x4ee, 0x4f2, 0x4fa, 0x4ff, 0x503, 0x506, 0x50a, 0x50d, 0x510, 0x515, 0x519, 0x51d, 0x521, 0x525, 0x527, 0x529, 0x52c, 0x531, 0x533, 0x538, 0x541, 0x546, 0x547, 0x54a, 0x54b, 0x54c, 0x54e, 0x54f, 0x550} + +// sparseValues: 1360 entries, 5440 bytes +var sparseValues = [1360]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0004, lo: 0xa8, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xaa}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0004, lo: 0xaf, hi: 0xaf}, + {value: 0x0004, lo: 0xb4, hi: 0xb4}, + {value: 0x002a, lo: 0xb5, hi: 0xb5}, + {value: 0x0054, lo: 0xb7, hi: 0xb7}, + {value: 0x0004, lo: 0xb8, hi: 0xb8}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + // Block 0x1, offset 0x9 + {value: 0x2013, lo: 0x80, hi: 0x96}, + {value: 0x2013, lo: 0x98, hi: 0x9e}, + {value: 0x00ea, lo: 0x9f, hi: 0x9f}, + {value: 0x2012, lo: 0xa0, hi: 0xb6}, + {value: 0x2012, lo: 0xb8, hi: 0xbe}, + {value: 0x0252, lo: 0xbf, hi: 0xbf}, + // Block 0x2, offset 0xf + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x01eb, lo: 0xb0, hi: 0xb0}, + {value: 0x02ea, lo: 0xb1, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb7}, + {value: 0x0012, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x0553, lo: 0xbf, hi: 0xbf}, + // Block 0x3, offset 0x18 + {value: 0x0552, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x034a, lo: 0x89, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xb7}, + {value: 0x0253, lo: 0xb8, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x0316, lo: 0xbd, hi: 0xbe}, + {value: 0x044a, lo: 0xbf, hi: 0xbf}, + // Block 0x4, offset 0x24 + {value: 0x0117, lo: 0x80, hi: 0x9f}, + {value: 0x2f53, lo: 0xa0, hi: 0xa0}, + {value: 0x0012, lo: 0xa1, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xb3}, + {value: 0x0012, lo: 0xb4, hi: 0xb9}, + {value: 0x10cb, lo: 0xba, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x2953, lo: 0xbd, hi: 0xbd}, + {value: 0x11cb, lo: 0xbe, hi: 0xbe}, + {value: 0x12ca, lo: 0xbf, hi: 0xbf}, + // Block 0x5, offset 0x2e + {value: 0x0015, lo: 0x80, hi: 0x81}, + {value: 0x0004, lo: 0x82, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x91}, + {value: 0x0004, lo: 0x92, hi: 0x96}, + {value: 0x0054, lo: 0x97, hi: 0x97}, + {value: 0x0004, lo: 0x98, hi: 0x9f}, + {value: 0x0015, lo: 0xa0, hi: 0xa4}, + {value: 0x0004, lo: 0xa5, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xac}, + {value: 0x0004, lo: 0xad, hi: 0xad}, + {value: 0x0014, lo: 0xae, hi: 0xae}, + {value: 0x0004, lo: 0xaf, hi: 0xbf}, + // Block 0x6, offset 0x3a + {value: 0x0024, lo: 0x80, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbf}, + // Block 0x7, offset 0x3d + {value: 0x6553, lo: 0x80, hi: 0x8f}, + {value: 0x2013, lo: 0x90, hi: 0x9f}, + {value: 0x5f53, lo: 0xa0, hi: 0xaf}, + {value: 0x2012, lo: 0xb0, hi: 0xbf}, + // Block 0x8, offset 0x41 + {value: 0x5f52, lo: 0x80, hi: 0x8f}, + {value: 0x6552, lo: 0x90, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x9, offset 0x44 + {value: 0x0117, lo: 0x80, hi: 0x81}, + {value: 0x0024, lo: 0x83, hi: 0x87}, + {value: 0x0014, lo: 0x88, hi: 0x89}, + {value: 0x0117, lo: 0x8a, hi: 0xbf}, + // Block 0xa, offset 0x48 + {value: 0x0f13, lo: 0x80, hi: 0x80}, + {value: 0x0316, lo: 0x81, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0316, lo: 0x85, hi: 0x86}, + {value: 0x0f16, lo: 0x87, hi: 0x88}, + {value: 0x0316, lo: 0x89, hi: 0x8a}, + {value: 0x0716, lo: 0x8b, hi: 0x8c}, + {value: 0x0316, lo: 0x8d, hi: 0x8e}, + {value: 0x0f12, lo: 0x8f, hi: 0x8f}, + {value: 0x0117, lo: 0x90, hi: 0xbf}, + // Block 0xb, offset 0x52 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x6553, lo: 0xb1, hi: 0xbf}, + // Block 0xc, offset 0x54 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6853, lo: 0x90, hi: 0x96}, + {value: 0x0014, lo: 0x99, hi: 0x99}, + {value: 0x6552, lo: 0xa1, hi: 0xaf}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0xd, offset 0x59 + {value: 0x6852, lo: 0x80, hi: 0x86}, + {value: 0x27aa, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0024, lo: 0x92, hi: 0x95}, + {value: 0x0034, lo: 0x96, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x99}, + {value: 0x0034, lo: 0x9a, hi: 0x9b}, + {value: 0x0024, lo: 0x9c, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa7}, + {value: 0x0024, lo: 0xa8, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xbd}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xe, offset 0x69 + {value: 0x0034, lo: 0x81, hi: 0x82}, + {value: 0x0024, lo: 0x84, hi: 0x84}, + {value: 0x0034, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb3}, + {value: 0x0054, lo: 0xb4, hi: 0xb4}, + // Block 0xf, offset 0x70 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0024, lo: 0x90, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0014, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x10, offset 0x75 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x8a}, + {value: 0x0034, lo: 0x8b, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9c}, + {value: 0x0024, lo: 0x9d, hi: 0x9e}, + {value: 0x0034, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0034, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x11, offset 0x83 + {value: 0x0010, lo: 0x80, hi: 0xbf}, + // Block 0x12, offset 0x84 + {value: 0x0010, lo: 0x80, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0024, lo: 0x9f, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xaa, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x13, offset 0x92 + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0034, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0034, lo: 0xb1, hi: 0xb1}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbc}, + {value: 0x0024, lo: 0xbd, hi: 0xbd}, + {value: 0x0034, lo: 0xbe, hi: 0xbe}, + {value: 0x0024, lo: 0xbf, hi: 0xbf}, + // Block 0x14, offset 0xa1 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0024, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x88}, + {value: 0x0024, lo: 0x89, hi: 0x8a}, + {value: 0x0010, lo: 0x8d, hi: 0xbf}, + // Block 0x15, offset 0xab + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x16, offset 0xae + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0024, lo: 0xab, hi: 0xb1}, + {value: 0x0034, lo: 0xb2, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + // Block 0x17, offset 0xb4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0024, lo: 0x96, hi: 0x99}, + {value: 0x0014, lo: 0x9a, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0xa3}, + {value: 0x0014, lo: 0xa4, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xad}, + // Block 0x18, offset 0xbc + {value: 0x0010, lo: 0x80, hi: 0x98}, + {value: 0x0034, lo: 0x99, hi: 0x9b}, + // Block 0x19, offset 0xbe + {value: 0x0010, lo: 0xa0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbd}, + // Block 0x1a, offset 0xc0 + {value: 0x0024, lo: 0x94, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0034, lo: 0xa3, hi: 0xa3}, + {value: 0x0024, lo: 0xa4, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0024, lo: 0xaa, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xb2}, + {value: 0x0024, lo: 0xb3, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbf}, + // Block 0x1b, offset 0xce + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1c, offset 0xd4 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0024, lo: 0x93, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x98, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0x1d, offset 0xe2 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb6, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0xed + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xb1}, + // Block 0x1f, offset 0xf8 + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x20, offset 0x103 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x91, hi: 0x91}, + {value: 0x0010, lo: 0x99, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x21, offset 0x10f + {value: 0x0014, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x22, offset 0x119 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x85}, + {value: 0x0014, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x89, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + // Block 0x23, offset 0x124 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x24, offset 0x12f + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9c, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + // Block 0x25, offset 0x13b + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8a}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + {value: 0x0010, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0010, lo: 0xa8, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x26, offset 0x147 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x82}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x27, offset 0x14f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb9}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbf}, + // Block 0x28, offset 0x157 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x88}, + {value: 0x0014, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0034, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + // Block 0x29, offset 0x161 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x2a, offset 0x16c + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x95, hi: 0x96}, + {value: 0x0010, lo: 0x9e, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb1, hi: 0xb2}, + // Block 0x2b, offset 0x178 + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0xba}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x2c, offset 0x17e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8e, hi: 0x8e}, + {value: 0x0010, lo: 0x94, hi: 0x97}, + {value: 0x0010, lo: 0x9f, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa3}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xba, hi: 0xbf}, + // Block 0x2d, offset 0x189 + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x96}, + {value: 0x0010, lo: 0x9a, hi: 0xb1}, + {value: 0x0010, lo: 0xb3, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x2e, offset 0x18e + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x8f, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x94}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9f}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + // Block 0x2f, offset 0x196 + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xba}, + // Block 0x30, offset 0x199 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x87}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x31, offset 0x19e + {value: 0x0014, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb4, hi: 0xb7}, + {value: 0x0034, lo: 0xb8, hi: 0xb9}, + {value: 0x0014, lo: 0xbb, hi: 0xbc}, + // Block 0x32, offset 0x1a2 + {value: 0x0004, lo: 0x86, hi: 0x86}, + {value: 0x0034, lo: 0x88, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x33, offset 0x1a6 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0034, lo: 0x98, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0034, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x34, offset 0x1ad + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xac}, + {value: 0x0034, lo: 0xb1, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xba, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x1b6 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0024, lo: 0x82, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0024, lo: 0x86, hi: 0x87}, + {value: 0x0010, lo: 0x88, hi: 0x8c}, + {value: 0x0014, lo: 0x8d, hi: 0x97}, + {value: 0x0014, lo: 0x99, hi: 0xbc}, + // Block 0x36, offset 0x1be + {value: 0x0034, lo: 0x86, hi: 0x86}, + // Block 0x37, offset 0x1bf + {value: 0x0010, lo: 0xab, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + {value: 0x0010, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbe}, + // Block 0x38, offset 0x1c8 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x96, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x99}, + {value: 0x0014, lo: 0x9e, hi: 0xa0}, + {value: 0x0010, lo: 0xa2, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xad}, + {value: 0x0014, lo: 0xb1, hi: 0xb4}, + // Block 0x39, offset 0x1cf + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x6c53, lo: 0xa0, hi: 0xbf}, + // Block 0x3a, offset 0x1d7 + {value: 0x7053, lo: 0x80, hi: 0x85}, + {value: 0x7053, lo: 0x87, hi: 0x87}, + {value: 0x7053, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xba}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x3b, offset 0x1dd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x9a, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x1e3 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x3d, offset 0x1e8 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x82, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3e, offset 0x1ec + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0010, lo: 0x92, hi: 0x95}, + {value: 0x0010, lo: 0x98, hi: 0xbf}, + // Block 0x3f, offset 0x1ef + {value: 0x0010, lo: 0x80, hi: 0x9a}, + {value: 0x0024, lo: 0x9d, hi: 0x9f}, + // Block 0x40, offset 0x1f1 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x7453, lo: 0xa0, hi: 0xaf}, + {value: 0x7853, lo: 0xb0, hi: 0xbf}, + // Block 0x41, offset 0x1f4 + {value: 0x7c53, lo: 0x80, hi: 0x8f}, + {value: 0x8053, lo: 0x90, hi: 0x9f}, + {value: 0x7c53, lo: 0xa0, hi: 0xaf}, + {value: 0x0813, lo: 0xb0, hi: 0xb5}, + {value: 0x0892, lo: 0xb8, hi: 0xbd}, + // Block 0x42, offset 0x1f9 + {value: 0x0010, lo: 0x81, hi: 0xbf}, + // Block 0x43, offset 0x1fa + {value: 0x0010, lo: 0x80, hi: 0xac}, + {value: 0x0010, lo: 0xaf, hi: 0xbf}, + // Block 0x44, offset 0x1fc + {value: 0x0010, lo: 0x81, hi: 0x9a}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x45, offset 0x1fe + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xae, hi: 0xb8}, + // Block 0x46, offset 0x200 + {value: 0x0010, lo: 0x80, hi: 0x8c}, + {value: 0x0010, lo: 0x8e, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0034, lo: 0x94, hi: 0x94}, + {value: 0x0010, lo: 0xa0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + // Block 0x47, offset 0x207 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0014, lo: 0x92, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xac}, + {value: 0x0010, lo: 0xae, hi: 0xb0}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + // Block 0x48, offset 0x20c + {value: 0x0014, lo: 0xb4, hi: 0xb5}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0x49, offset 0x210 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0014, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0014, lo: 0x89, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x92}, + {value: 0x0014, lo: 0x93, hi: 0x93}, + {value: 0x0004, lo: 0x97, hi: 0x97}, + {value: 0x0024, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0x4a, offset 0x219 + {value: 0x0014, lo: 0x8b, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x4b, offset 0x21c + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb7}, + // Block 0x4c, offset 0x21f + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x225 + {value: 0x0010, lo: 0x80, hi: 0xb5}, + // Block 0x4e, offset 0x226 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xb9}, + {value: 0x0024, lo: 0xba, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbb}, + // Block 0x4f, offset 0x231 + {value: 0x0010, lo: 0x86, hi: 0x8f}, + // Block 0x50, offset 0x232 + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x51, offset 0x233 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0024, lo: 0x97, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x98}, + {value: 0x0010, lo: 0x99, hi: 0x9a}, + {value: 0x0014, lo: 0x9b, hi: 0x9b}, + // Block 0x52, offset 0x238 + {value: 0x0010, lo: 0x95, hi: 0x95}, + {value: 0x0014, lo: 0x96, hi: 0x96}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0014, lo: 0x98, hi: 0x9e}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa2}, + {value: 0x0010, lo: 0xa3, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xac}, + {value: 0x0010, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0024, lo: 0xb5, hi: 0xbc}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x53, offset 0x245 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xa7, hi: 0xa7}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + {value: 0x0034, lo: 0xb5, hi: 0xba}, + {value: 0x0024, lo: 0xbb, hi: 0xbc}, + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0x54, offset 0x24d + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x255 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x83}, + {value: 0x0030, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x8b}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xab, hi: 0xab}, + {value: 0x0034, lo: 0xac, hi: 0xac}, + {value: 0x0024, lo: 0xad, hi: 0xb3}, + // Block 0x56, offset 0x25e + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0030, lo: 0xaa, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xbf}, + // Block 0x57, offset 0x267 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa9}, + {value: 0x0010, lo: 0xaa, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0030, lo: 0xb2, hi: 0xb3}, + // Block 0x58, offset 0x270 + {value: 0x0010, lo: 0x80, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0x59, offset 0x275 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8d, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x5a, offset 0x278 + {value: 0x296a, lo: 0x80, hi: 0x80}, + {value: 0x2a2a, lo: 0x81, hi: 0x81}, + {value: 0x2aea, lo: 0x82, hi: 0x82}, + {value: 0x2baa, lo: 0x83, hi: 0x83}, + {value: 0x2c6a, lo: 0x84, hi: 0x84}, + {value: 0x2d2a, lo: 0x85, hi: 0x85}, + {value: 0x2dea, lo: 0x86, hi: 0x86}, + {value: 0x2eaa, lo: 0x87, hi: 0x87}, + {value: 0x2f6a, lo: 0x88, hi: 0x88}, + // Block 0x5b, offset 0x281 + {value: 0x0024, lo: 0x90, hi: 0x92}, + {value: 0x0034, lo: 0x94, hi: 0x99}, + {value: 0x0024, lo: 0x9a, hi: 0x9b}, + {value: 0x0034, lo: 0x9c, hi: 0x9f}, + {value: 0x0024, lo: 0xa0, hi: 0xa0}, + {value: 0x0010, lo: 0xa1, hi: 0xa1}, + {value: 0x0034, lo: 0xa2, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xb3}, + {value: 0x0024, lo: 0xb4, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb6}, + {value: 0x0024, lo: 0xb8, hi: 0xb9}, + // Block 0x5c, offset 0x28e + {value: 0x0012, lo: 0x80, hi: 0xab}, + {value: 0x0015, lo: 0xac, hi: 0xbf}, + // Block 0x5d, offset 0x290 + {value: 0x0015, lo: 0x80, hi: 0xaa}, + {value: 0x0012, lo: 0xab, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb8}, + {value: 0x8452, lo: 0xb9, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbc}, + {value: 0x8852, lo: 0xbd, hi: 0xbd}, + {value: 0x0012, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x297 + {value: 0x0012, lo: 0x80, hi: 0x9a}, + {value: 0x0015, lo: 0x9b, hi: 0xbf}, + // Block 0x5f, offset 0x299 + {value: 0x0024, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0024, lo: 0x83, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0024, lo: 0x8b, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x90}, + {value: 0x0024, lo: 0x91, hi: 0xb5}, + {value: 0x0024, lo: 0xbb, hi: 0xbb}, + {value: 0x0034, lo: 0xbc, hi: 0xbd}, + {value: 0x0024, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x60, offset 0x2a4 + {value: 0x0117, lo: 0x80, hi: 0xbf}, + // Block 0x61, offset 0x2a5 + {value: 0x0117, lo: 0x80, hi: 0x95}, + {value: 0x306a, lo: 0x96, hi: 0x96}, + {value: 0x316a, lo: 0x97, hi: 0x97}, + {value: 0x326a, lo: 0x98, hi: 0x98}, + {value: 0x336a, lo: 0x99, hi: 0x99}, + {value: 0x346a, lo: 0x9a, hi: 0x9a}, + {value: 0x356a, lo: 0x9b, hi: 0x9b}, + {value: 0x0012, lo: 0x9c, hi: 0x9d}, + {value: 0x366b, lo: 0x9e, hi: 0x9e}, + {value: 0x0012, lo: 0x9f, hi: 0x9f}, + {value: 0x0117, lo: 0xa0, hi: 0xbf}, + // Block 0x62, offset 0x2b0 + {value: 0x0812, lo: 0x80, hi: 0x87}, + {value: 0x0813, lo: 0x88, hi: 0x8f}, + {value: 0x0812, lo: 0x90, hi: 0x95}, + {value: 0x0813, lo: 0x98, hi: 0x9d}, + {value: 0x0812, lo: 0xa0, hi: 0xa7}, + {value: 0x0813, lo: 0xa8, hi: 0xaf}, + {value: 0x0812, lo: 0xb0, hi: 0xb7}, + {value: 0x0813, lo: 0xb8, hi: 0xbf}, + // Block 0x63, offset 0x2b8 + {value: 0x0004, lo: 0x8b, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8f}, + {value: 0x0054, lo: 0x98, hi: 0x99}, + {value: 0x0054, lo: 0xa4, hi: 0xa4}, + {value: 0x0054, lo: 0xa7, hi: 0xa7}, + {value: 0x0014, lo: 0xaa, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xaf}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x64, offset 0x2c0 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x94, hi: 0x94}, + {value: 0x0014, lo: 0xa0, hi: 0xa4}, + {value: 0x0014, lo: 0xa6, hi: 0xaf}, + {value: 0x0015, lo: 0xb1, hi: 0xb1}, + {value: 0x0015, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x2c6 + {value: 0x0015, lo: 0x90, hi: 0x9c}, + // Block 0x66, offset 0x2c7 + {value: 0x0024, lo: 0x90, hi: 0x91}, + {value: 0x0034, lo: 0x92, hi: 0x93}, + {value: 0x0024, lo: 0x94, hi: 0x97}, + {value: 0x0034, lo: 0x98, hi: 0x9a}, + {value: 0x0024, lo: 0x9b, hi: 0x9c}, + {value: 0x0014, lo: 0x9d, hi: 0xa0}, + {value: 0x0024, lo: 0xa1, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa4}, + {value: 0x0034, lo: 0xa5, hi: 0xa6}, + {value: 0x0024, lo: 0xa7, hi: 0xa7}, + {value: 0x0034, lo: 0xa8, hi: 0xa8}, + {value: 0x0024, lo: 0xa9, hi: 0xa9}, + {value: 0x0034, lo: 0xaa, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + // Block 0x67, offset 0x2d5 + {value: 0x0016, lo: 0x85, hi: 0x86}, + {value: 0x0012, lo: 0x87, hi: 0x89}, + {value: 0x9d52, lo: 0x8e, hi: 0x8e}, + {value: 0x1013, lo: 0xa0, hi: 0xaf}, + {value: 0x1012, lo: 0xb0, hi: 0xbf}, + // Block 0x68, offset 0x2da + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0716, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x88}, + // Block 0x69, offset 0x2dd + {value: 0xa053, lo: 0xb6, hi: 0xb7}, + {value: 0xa353, lo: 0xb8, hi: 0xb9}, + {value: 0xa653, lo: 0xba, hi: 0xbb}, + {value: 0xa353, lo: 0xbc, hi: 0xbd}, + {value: 0xa053, lo: 0xbe, hi: 0xbf}, + // Block 0x6a, offset 0x2e2 + {value: 0x3013, lo: 0x80, hi: 0x8f}, + {value: 0x6553, lo: 0x90, hi: 0x9f}, + {value: 0xa953, lo: 0xa0, hi: 0xae}, + {value: 0x3012, lo: 0xb0, hi: 0xbf}, + // Block 0x6b, offset 0x2e6 + {value: 0x0117, lo: 0x80, hi: 0xa3}, + {value: 0x0012, lo: 0xa4, hi: 0xa4}, + {value: 0x0716, lo: 0xab, hi: 0xac}, + {value: 0x0316, lo: 0xad, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xb3}, + // Block 0x6c, offset 0x2ec + {value: 0x6c52, lo: 0x80, hi: 0x9f}, + {value: 0x7052, lo: 0xa0, hi: 0xa5}, + {value: 0x7052, lo: 0xa7, hi: 0xa7}, + {value: 0x7052, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x6d, offset 0x2f1 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x2f4 + {value: 0x0010, lo: 0x80, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0010, lo: 0xb0, hi: 0xb6}, + {value: 0x0010, lo: 0xb8, hi: 0xbe}, + // Block 0x6f, offset 0x2f9 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x8e}, + {value: 0x0010, lo: 0x90, hi: 0x96}, + {value: 0x0010, lo: 0x98, hi: 0x9e}, + {value: 0x0024, lo: 0xa0, hi: 0xbf}, + // Block 0x70, offset 0x2fe + {value: 0x0014, lo: 0xaf, hi: 0xaf}, + // Block 0x71, offset 0x2ff + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0xaa, hi: 0xad}, + {value: 0x0030, lo: 0xae, hi: 0xaf}, + {value: 0x0004, lo: 0xb1, hi: 0xb5}, + {value: 0x0014, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + // Block 0x72, offset 0x305 + {value: 0x0034, lo: 0x99, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9e}, + // Block 0x73, offset 0x307 + {value: 0x0004, lo: 0xbc, hi: 0xbe}, + // Block 0x74, offset 0x308 + {value: 0x0010, lo: 0x85, hi: 0xad}, + {value: 0x0010, lo: 0xb1, hi: 0xbf}, + // Block 0x75, offset 0x30a + {value: 0x0010, lo: 0x80, hi: 0x8e}, + {value: 0x0010, lo: 0xa0, hi: 0xba}, + // Block 0x76, offset 0x30c + {value: 0x0010, lo: 0x80, hi: 0x94}, + {value: 0x0014, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0x96, hi: 0xbf}, + // Block 0x77, offset 0x30f + {value: 0x0010, lo: 0x80, hi: 0x8c}, + // Block 0x78, offset 0x310 + {value: 0x0010, lo: 0x90, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + // Block 0x79, offset 0x312 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0xab}, + // Block 0x7a, offset 0x315 + {value: 0x0117, lo: 0x80, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xae}, + {value: 0x0024, lo: 0xaf, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb2}, + {value: 0x0024, lo: 0xb4, hi: 0xbd}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x7b, offset 0x31b + {value: 0x0117, lo: 0x80, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9d}, + {value: 0x0024, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0x7c, offset 0x31f + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb1}, + // Block 0x7d, offset 0x321 + {value: 0x0004, lo: 0x80, hi: 0x96}, + {value: 0x0014, lo: 0x97, hi: 0x9f}, + {value: 0x0004, lo: 0xa0, hi: 0xa1}, + {value: 0x0117, lo: 0xa2, hi: 0xaf}, + {value: 0x0012, lo: 0xb0, hi: 0xb1}, + {value: 0x0117, lo: 0xb2, hi: 0xbf}, + // Block 0x7e, offset 0x327 + {value: 0x0117, lo: 0x80, hi: 0xaf}, + {value: 0x0015, lo: 0xb0, hi: 0xb0}, + {value: 0x0012, lo: 0xb1, hi: 0xb8}, + {value: 0x0316, lo: 0xb9, hi: 0xba}, + {value: 0x0716, lo: 0xbb, hi: 0xbc}, + {value: 0x8453, lo: 0xbd, hi: 0xbd}, + {value: 0x0117, lo: 0xbe, hi: 0xbf}, + // Block 0x7f, offset 0x32e + {value: 0x0010, lo: 0xb7, hi: 0xb7}, + {value: 0x0015, lo: 0xb8, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbf}, + // Block 0x80, offset 0x332 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0014, lo: 0x82, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8b}, + {value: 0x0010, lo: 0x8c, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa6}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + // Block 0x81, offset 0x33b + {value: 0x0010, lo: 0x80, hi: 0xb3}, + // Block 0x82, offset 0x33c + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0034, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x85, hi: 0x85}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0024, lo: 0xa0, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb7}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x83, offset 0x344 + {value: 0x0010, lo: 0x80, hi: 0xa5}, + {value: 0x0014, lo: 0xa6, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x84, offset 0x348 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0014, lo: 0x87, hi: 0x91}, + {value: 0x0010, lo: 0x92, hi: 0x92}, + {value: 0x0030, lo: 0x93, hi: 0x93}, + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0x85, offset 0x34d + {value: 0x0014, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xb9}, + {value: 0x0010, lo: 0xba, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0x86, offset 0x355 + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0004, lo: 0xa6, hi: 0xa6}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x87, offset 0x35b + {value: 0x0010, lo: 0x80, hi: 0xa8}, + {value: 0x0014, lo: 0xa9, hi: 0xae}, + {value: 0x0010, lo: 0xaf, hi: 0xb0}, + {value: 0x0014, lo: 0xb1, hi: 0xb2}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0x88, offset 0x361 + {value: 0x0010, lo: 0x80, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x8b}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0010, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbd}, + // Block 0x89, offset 0x36b + {value: 0x0024, lo: 0xb0, hi: 0xb0}, + {value: 0x0024, lo: 0xb2, hi: 0xb3}, + {value: 0x0034, lo: 0xb4, hi: 0xb4}, + {value: 0x0024, lo: 0xb7, hi: 0xb8}, + {value: 0x0024, lo: 0xbe, hi: 0xbf}, + // Block 0x8a, offset 0x370 + {value: 0x0024, lo: 0x81, hi: 0x81}, + {value: 0x0004, lo: 0x9d, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xab}, + {value: 0x0014, lo: 0xac, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb2, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + // Block 0x8b, offset 0x379 + {value: 0x0010, lo: 0x81, hi: 0x86}, + {value: 0x0010, lo: 0x89, hi: 0x8e}, + {value: 0x0010, lo: 0x91, hi: 0x96}, + {value: 0x0010, lo: 0xa0, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0x8c, offset 0x37f + {value: 0x0012, lo: 0x80, hi: 0x92}, + {value: 0xac52, lo: 0x93, hi: 0x93}, + {value: 0x0012, lo: 0x94, hi: 0x9a}, + {value: 0x0004, lo: 0x9b, hi: 0x9b}, + {value: 0x0015, lo: 0x9c, hi: 0x9f}, + {value: 0x0012, lo: 0xa0, hi: 0xa5}, + {value: 0x74d2, lo: 0xb0, hi: 0xbf}, + // Block 0x8d, offset 0x386 + {value: 0x78d2, lo: 0x80, hi: 0x8f}, + {value: 0x7cd2, lo: 0x90, hi: 0x9f}, + {value: 0x80d2, lo: 0xa0, hi: 0xaf}, + {value: 0x7cd2, lo: 0xb0, hi: 0xbf}, + // Block 0x8e, offset 0x38a + {value: 0x0010, lo: 0x80, hi: 0xa4}, + {value: 0x0014, lo: 0xa5, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa7}, + {value: 0x0014, lo: 0xa8, hi: 0xa8}, + {value: 0x0010, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0034, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0x8f, offset 0x392 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0x90, offset 0x394 + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x8b, hi: 0xbb}, + // Block 0x91, offset 0x396 + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x86, hi: 0xbf}, + // Block 0x92, offset 0x399 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0004, lo: 0xb2, hi: 0xbf}, + // Block 0x93, offset 0x39b + {value: 0x0004, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x93, hi: 0xbf}, + // Block 0x94, offset 0x39d + {value: 0x0010, lo: 0x80, hi: 0xbd}, + // Block 0x95, offset 0x39e + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0x96, offset 0x39f + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0xbf}, + // Block 0x97, offset 0x3a1 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0xb0, hi: 0xbb}, + // Block 0x98, offset 0x3a3 + {value: 0x0014, lo: 0x80, hi: 0x8f}, + {value: 0x0054, lo: 0x93, hi: 0x93}, + {value: 0x0024, lo: 0xa0, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xad}, + {value: 0x0024, lo: 0xae, hi: 0xaf}, + {value: 0x0010, lo: 0xb3, hi: 0xb4}, + // Block 0x99, offset 0x3a9 + {value: 0x0010, lo: 0x8d, hi: 0x8f}, + {value: 0x0054, lo: 0x92, hi: 0x92}, + {value: 0x0054, lo: 0x95, hi: 0x95}, + {value: 0x0010, lo: 0xb0, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x3ae + {value: 0x0010, lo: 0x80, hi: 0xbc}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0x9b, offset 0x3b0 + {value: 0x0054, lo: 0x87, hi: 0x87}, + {value: 0x0054, lo: 0x8e, hi: 0x8e}, + {value: 0x0054, lo: 0x9a, hi: 0x9a}, + {value: 0x5f53, lo: 0xa1, hi: 0xba}, + {value: 0x0004, lo: 0xbe, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x3b6 + {value: 0x0004, lo: 0x80, hi: 0x80}, + {value: 0x5f52, lo: 0x81, hi: 0x9a}, + {value: 0x0004, lo: 0xb0, hi: 0xb0}, + // Block 0x9d, offset 0x3b9 + {value: 0x0014, lo: 0x9e, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xbe}, + // Block 0x9e, offset 0x3bb + {value: 0x0010, lo: 0x82, hi: 0x87}, + {value: 0x0010, lo: 0x8a, hi: 0x8f}, + {value: 0x0010, lo: 0x92, hi: 0x97}, + {value: 0x0010, lo: 0x9a, hi: 0x9c}, + {value: 0x0004, lo: 0xa3, hi: 0xa3}, + {value: 0x0014, lo: 0xb9, hi: 0xbb}, + // Block 0x9f, offset 0x3c1 + {value: 0x0010, lo: 0x80, hi: 0x8b}, + {value: 0x0010, lo: 0x8d, hi: 0xa6}, + {value: 0x0010, lo: 0xa8, hi: 0xba}, + {value: 0x0010, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x3c6 + {value: 0x0010, lo: 0x80, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x9d}, + // Block 0xa1, offset 0x3c8 + {value: 0x0010, lo: 0x80, hi: 0xba}, + // Block 0xa2, offset 0x3c9 + {value: 0x0010, lo: 0x80, hi: 0xb4}, + // Block 0xa3, offset 0x3ca + {value: 0x0034, lo: 0xbd, hi: 0xbd}, + // Block 0xa4, offset 0x3cb + {value: 0x0010, lo: 0x80, hi: 0x9c}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x3cd + {value: 0x0010, lo: 0x80, hi: 0x90}, + {value: 0x0034, lo: 0xa0, hi: 0xa0}, + // Block 0xa6, offset 0x3cf + {value: 0x0010, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xa7, offset 0x3d1 + {value: 0x0010, lo: 0x80, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0xb5}, + {value: 0x0024, lo: 0xb6, hi: 0xba}, + // Block 0xa8, offset 0x3d4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xbf}, + // Block 0xa9, offset 0x3d6 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x91, hi: 0x95}, + // Block 0xaa, offset 0x3d9 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x97}, + {value: 0xaf53, lo: 0x98, hi: 0x9f}, + {value: 0xb253, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x3e1 + {value: 0xaf52, lo: 0x80, hi: 0x87}, + {value: 0xb252, lo: 0x88, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0xbf}, + // Block 0xac, offset 0x3e4 + {value: 0x0010, lo: 0x80, hi: 0x9d}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0xb253, lo: 0xb0, hi: 0xb7}, + {value: 0xaf53, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x3e8 + {value: 0x2813, lo: 0x80, hi: 0x87}, + {value: 0x3813, lo: 0x88, hi: 0x8f}, + {value: 0x2813, lo: 0x90, hi: 0x93}, + {value: 0xb252, lo: 0x98, hi: 0x9f}, + {value: 0xaf52, lo: 0xa0, hi: 0xa7}, + {value: 0x2812, lo: 0xa8, hi: 0xaf}, + {value: 0x3812, lo: 0xb0, hi: 0xb7}, + {value: 0x2812, lo: 0xb8, hi: 0xbb}, + // Block 0xae, offset 0x3f0 + {value: 0x0010, lo: 0x80, hi: 0xa7}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xaf, offset 0x3f2 + {value: 0x0010, lo: 0x80, hi: 0xa3}, + // Block 0xb0, offset 0x3f3 + {value: 0x0010, lo: 0x80, hi: 0xb6}, + // Block 0xb1, offset 0x3f4 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xa7}, + // Block 0xb2, offset 0x3f6 + {value: 0x0010, lo: 0x80, hi: 0x85}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xb5}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0010, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x3fc + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb6}, + // Block 0xb4, offset 0x3fe + {value: 0x0010, lo: 0x80, hi: 0x9e}, + // Block 0xb5, offset 0x3ff + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb5}, + // Block 0xb6, offset 0x401 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb9}, + // Block 0xb7, offset 0x403 + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0010, lo: 0xbe, hi: 0xbf}, + // Block 0xb8, offset 0x405 + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x83}, + {value: 0x0014, lo: 0x85, hi: 0x86}, + {value: 0x0014, lo: 0x8c, hi: 0x8c}, + {value: 0x0034, lo: 0x8d, hi: 0x8d}, + {value: 0x0014, lo: 0x8e, hi: 0x8e}, + {value: 0x0024, lo: 0x8f, hi: 0x8f}, + {value: 0x0010, lo: 0x90, hi: 0x93}, + {value: 0x0010, lo: 0x95, hi: 0x97}, + {value: 0x0010, lo: 0x99, hi: 0xb3}, + {value: 0x0024, lo: 0xb8, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xb9, offset 0x412 + {value: 0x0010, lo: 0xa0, hi: 0xbc}, + // Block 0xba, offset 0x413 + {value: 0x0010, lo: 0x80, hi: 0x9c}, + // Block 0xbb, offset 0x414 + {value: 0x0010, lo: 0x80, hi: 0x87}, + {value: 0x0010, lo: 0x89, hi: 0xa4}, + {value: 0x0024, lo: 0xa5, hi: 0xa5}, + {value: 0x0034, lo: 0xa6, hi: 0xa6}, + // Block 0xbc, offset 0x418 + {value: 0x0010, lo: 0x80, hi: 0x95}, + {value: 0x0010, lo: 0xa0, hi: 0xb2}, + // Block 0xbd, offset 0x41a + {value: 0x0010, lo: 0x80, hi: 0x91}, + // Block 0xbe, offset 0x41b + {value: 0x0010, lo: 0x80, hi: 0x88}, + // Block 0xbf, offset 0x41c + {value: 0x5653, lo: 0x80, hi: 0xb2}, + // Block 0xc0, offset 0x41d + {value: 0x5652, lo: 0x80, hi: 0xb2}, + // Block 0xc1, offset 0x41e + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xc2, offset 0x422 + {value: 0x0014, lo: 0x80, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0xa6, hi: 0xaf}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x426 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb6}, + {value: 0x0010, lo: 0xb7, hi: 0xb8}, + {value: 0x0034, lo: 0xb9, hi: 0xba}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + // Block 0xc4, offset 0x42c + {value: 0x0010, lo: 0x90, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xc5, offset 0x42e + {value: 0x0024, lo: 0x80, hi: 0x82}, + {value: 0x0010, lo: 0x83, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb4}, + {value: 0x0010, lo: 0xb6, hi: 0xbf}, + // Block 0xc6, offset 0x435 + {value: 0x0010, lo: 0x90, hi: 0xb2}, + {value: 0x0034, lo: 0xb3, hi: 0xb3}, + {value: 0x0010, lo: 0xb6, hi: 0xb6}, + // Block 0xc7, offset 0x438 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0xb5}, + {value: 0x0014, lo: 0xb6, hi: 0xbe}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xc8, offset 0x43c + {value: 0x0030, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0014, lo: 0x8b, hi: 0x8c}, + {value: 0x0010, lo: 0x90, hi: 0x9a}, + {value: 0x0010, lo: 0x9c, hi: 0x9c}, + // Block 0xc9, offset 0x442 + {value: 0x0010, lo: 0x80, hi: 0x91}, + {value: 0x0010, lo: 0x93, hi: 0xae}, + {value: 0x0014, lo: 0xaf, hi: 0xb1}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0014, lo: 0xb4, hi: 0xb4}, + {value: 0x0030, lo: 0xb5, hi: 0xb5}, + {value: 0x0034, lo: 0xb6, hi: 0xb6}, + {value: 0x0014, lo: 0xb7, hi: 0xb7}, + {value: 0x0014, lo: 0xbe, hi: 0xbe}, + // Block 0xca, offset 0x44b + {value: 0x0010, lo: 0x80, hi: 0x86}, + {value: 0x0010, lo: 0x88, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0x8d}, + {value: 0x0010, lo: 0x8f, hi: 0x9d}, + {value: 0x0010, lo: 0x9f, hi: 0xa8}, + {value: 0x0010, lo: 0xb0, hi: 0xbf}, + // Block 0xcb, offset 0x451 + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0014, lo: 0x9f, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa2}, + {value: 0x0014, lo: 0xa3, hi: 0xa8}, + {value: 0x0034, lo: 0xa9, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xcc, offset 0x457 + {value: 0x0014, lo: 0x80, hi: 0x81}, + {value: 0x0010, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x8c}, + {value: 0x0010, lo: 0x8f, hi: 0x90}, + {value: 0x0010, lo: 0x93, hi: 0xa8}, + {value: 0x0010, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb5, hi: 0xb9}, + {value: 0x0034, lo: 0xbc, hi: 0xbc}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xcd, offset 0x461 + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x84}, + {value: 0x0010, lo: 0x87, hi: 0x88}, + {value: 0x0010, lo: 0x8b, hi: 0x8c}, + {value: 0x0030, lo: 0x8d, hi: 0x8d}, + {value: 0x0010, lo: 0x90, hi: 0x90}, + {value: 0x0010, lo: 0x97, hi: 0x97}, + {value: 0x0010, lo: 0x9d, hi: 0xa3}, + {value: 0x0024, lo: 0xa6, hi: 0xac}, + {value: 0x0024, lo: 0xb0, hi: 0xb4}, + // Block 0xce, offset 0x46b + {value: 0x0010, lo: 0x80, hi: 0xb7}, + {value: 0x0014, lo: 0xb8, hi: 0xbf}, + // Block 0xcf, offset 0x46d + {value: 0x0010, lo: 0x80, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x82}, + {value: 0x0014, lo: 0x83, hi: 0x84}, + {value: 0x0010, lo: 0x85, hi: 0x85}, + {value: 0x0034, lo: 0x86, hi: 0x86}, + {value: 0x0010, lo: 0x87, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd0, offset 0x474 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xb8}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0014, lo: 0xba, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbe}, + {value: 0x0014, lo: 0xbf, hi: 0xbf}, + // Block 0xd1, offset 0x47a + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x81, hi: 0x81}, + {value: 0x0034, lo: 0x82, hi: 0x83}, + {value: 0x0010, lo: 0x84, hi: 0x85}, + {value: 0x0010, lo: 0x87, hi: 0x87}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd2, offset 0x480 + {value: 0x0010, lo: 0x80, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb5}, + {value: 0x0010, lo: 0xb8, hi: 0xbb}, + {value: 0x0014, lo: 0xbc, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x486 + {value: 0x0034, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x98, hi: 0x9b}, + {value: 0x0014, lo: 0x9c, hi: 0x9d}, + // Block 0xd4, offset 0x489 + {value: 0x0010, lo: 0x80, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0010, lo: 0xbb, hi: 0xbc}, + {value: 0x0014, lo: 0xbd, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xd5, offset 0x48f + {value: 0x0014, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x84, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0xd6, offset 0x492 + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0014, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xac, hi: 0xac}, + {value: 0x0014, lo: 0xad, hi: 0xad}, + {value: 0x0010, lo: 0xae, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb5}, + {value: 0x0030, lo: 0xb6, hi: 0xb6}, + {value: 0x0034, lo: 0xb7, hi: 0xb7}, + // Block 0xd7, offset 0x49a + {value: 0x0010, lo: 0x80, hi: 0x89}, + // Block 0xd8, offset 0x49b + {value: 0x0014, lo: 0x9d, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa1}, + {value: 0x0014, lo: 0xa2, hi: 0xa5}, + {value: 0x0010, lo: 0xa6, hi: 0xa6}, + {value: 0x0014, lo: 0xa7, hi: 0xaa}, + {value: 0x0034, lo: 0xab, hi: 0xab}, + {value: 0x0010, lo: 0xb0, hi: 0xb9}, + // Block 0xd9, offset 0x4a2 + {value: 0x5f53, lo: 0xa0, hi: 0xbf}, + // Block 0xda, offset 0x4a3 + {value: 0x5f52, lo: 0x80, hi: 0x9f}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + {value: 0x0010, lo: 0xbf, hi: 0xbf}, + // Block 0xdb, offset 0x4a6 + {value: 0x0010, lo: 0x80, hi: 0xb8}, + // Block 0xdc, offset 0x4a7 + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x8a, hi: 0xaf}, + {value: 0x0014, lo: 0xb0, hi: 0xb6}, + {value: 0x0014, lo: 0xb8, hi: 0xbd}, + {value: 0x0010, lo: 0xbe, hi: 0xbe}, + {value: 0x0034, lo: 0xbf, hi: 0xbf}, + // Block 0xdd, offset 0x4ad + {value: 0x0010, lo: 0x80, hi: 0x80}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xb2, hi: 0xbf}, + // Block 0xde, offset 0x4b0 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + {value: 0x0014, lo: 0x92, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xa9}, + {value: 0x0014, lo: 0xaa, hi: 0xb0}, + {value: 0x0010, lo: 0xb1, hi: 0xb1}, + {value: 0x0014, lo: 0xb2, hi: 0xb3}, + {value: 0x0010, lo: 0xb4, hi: 0xb4}, + {value: 0x0014, lo: 0xb5, hi: 0xb6}, + // Block 0xdf, offset 0x4b8 + {value: 0x0010, lo: 0x80, hi: 0x99}, + // Block 0xe0, offset 0x4b9 + {value: 0x0010, lo: 0x80, hi: 0xae}, + // Block 0xe1, offset 0x4ba + {value: 0x0010, lo: 0x80, hi: 0x83}, + // Block 0xe2, offset 0x4bb + {value: 0x0010, lo: 0x80, hi: 0x86}, + // Block 0xe3, offset 0x4bc + {value: 0x0010, lo: 0x80, hi: 0x9e}, + {value: 0x0010, lo: 0xa0, hi: 0xa9}, + // Block 0xe4, offset 0x4be + {value: 0x0010, lo: 0x90, hi: 0xad}, + {value: 0x0034, lo: 0xb0, hi: 0xb4}, + // Block 0xe5, offset 0x4c0 + {value: 0x0010, lo: 0x80, hi: 0xaf}, + {value: 0x0024, lo: 0xb0, hi: 0xb6}, + // Block 0xe6, offset 0x4c2 + {value: 0x0014, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0010, lo: 0xa3, hi: 0xb7}, + {value: 0x0010, lo: 0xbd, hi: 0xbf}, + // Block 0xe7, offset 0x4c6 + {value: 0x0010, lo: 0x80, hi: 0x8f}, + // Block 0xe8, offset 0x4c7 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0010, lo: 0x90, hi: 0xbe}, + // Block 0xe9, offset 0x4c9 + {value: 0x0014, lo: 0x8f, hi: 0x9f}, + // Block 0xea, offset 0x4ca + {value: 0x0014, lo: 0xa0, hi: 0xa0}, + // Block 0xeb, offset 0x4cb + {value: 0x0010, lo: 0x80, hi: 0xaa}, + {value: 0x0010, lo: 0xb0, hi: 0xbc}, + // Block 0xec, offset 0x4cd + {value: 0x0010, lo: 0x80, hi: 0x88}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + {value: 0x0014, lo: 0x9d, hi: 0x9d}, + {value: 0x0034, lo: 0x9e, hi: 0x9e}, + {value: 0x0014, lo: 0xa0, hi: 0xa3}, + // Block 0xed, offset 0x4d2 + {value: 0x0030, lo: 0xa5, hi: 0xa6}, + {value: 0x0034, lo: 0xa7, hi: 0xa9}, + {value: 0x0030, lo: 0xad, hi: 0xb2}, + {value: 0x0014, lo: 0xb3, hi: 0xba}, + {value: 0x0034, lo: 0xbb, hi: 0xbf}, + // Block 0xee, offset 0x4d7 + {value: 0x0034, lo: 0x80, hi: 0x82}, + {value: 0x0024, lo: 0x85, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8b}, + {value: 0x0024, lo: 0xaa, hi: 0xad}, + // Block 0xef, offset 0x4db + {value: 0x0024, lo: 0x82, hi: 0x84}, + // Block 0xf0, offset 0x4dc + {value: 0x0013, lo: 0x80, hi: 0x99}, + {value: 0x0012, lo: 0x9a, hi: 0xb3}, + {value: 0x0013, lo: 0xb4, hi: 0xbf}, + // Block 0xf1, offset 0x4df + {value: 0x0013, lo: 0x80, hi: 0x8d}, + {value: 0x0012, lo: 0x8e, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0xa7}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xf2, offset 0x4e3 + {value: 0x0013, lo: 0x80, hi: 0x81}, + {value: 0x0012, lo: 0x82, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0x9c}, + {value: 0x0013, lo: 0x9e, hi: 0x9f}, + {value: 0x0013, lo: 0xa2, hi: 0xa2}, + {value: 0x0013, lo: 0xa5, hi: 0xa6}, + {value: 0x0013, lo: 0xa9, hi: 0xac}, + {value: 0x0013, lo: 0xae, hi: 0xb5}, + {value: 0x0012, lo: 0xb6, hi: 0xb9}, + {value: 0x0012, lo: 0xbb, hi: 0xbb}, + {value: 0x0012, lo: 0xbd, hi: 0xbf}, + // Block 0xf3, offset 0x4ee + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0012, lo: 0x85, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xf4, offset 0x4f2 + {value: 0x0012, lo: 0x80, hi: 0x83}, + {value: 0x0013, lo: 0x84, hi: 0x85}, + {value: 0x0013, lo: 0x87, hi: 0x8a}, + {value: 0x0013, lo: 0x8d, hi: 0x94}, + {value: 0x0013, lo: 0x96, hi: 0x9c}, + {value: 0x0012, lo: 0x9e, hi: 0xb7}, + {value: 0x0013, lo: 0xb8, hi: 0xb9}, + {value: 0x0013, lo: 0xbb, hi: 0xbe}, + // Block 0xf5, offset 0x4fa + {value: 0x0013, lo: 0x80, hi: 0x84}, + {value: 0x0013, lo: 0x86, hi: 0x86}, + {value: 0x0013, lo: 0x8a, hi: 0x90}, + {value: 0x0012, lo: 0x92, hi: 0xab}, + {value: 0x0013, lo: 0xac, hi: 0xbf}, + // Block 0xf6, offset 0x4ff + {value: 0x0013, lo: 0x80, hi: 0x85}, + {value: 0x0012, lo: 0x86, hi: 0x9f}, + {value: 0x0013, lo: 0xa0, hi: 0xb9}, + {value: 0x0012, lo: 0xba, hi: 0xbf}, + // Block 0xf7, offset 0x503 + {value: 0x0012, lo: 0x80, hi: 0x93}, + {value: 0x0013, lo: 0x94, hi: 0xad}, + {value: 0x0012, lo: 0xae, hi: 0xbf}, + // Block 0xf8, offset 0x506 + {value: 0x0012, lo: 0x80, hi: 0x87}, + {value: 0x0013, lo: 0x88, hi: 0xa1}, + {value: 0x0012, lo: 0xa2, hi: 0xbb}, + {value: 0x0013, lo: 0xbc, hi: 0xbf}, + // Block 0xf9, offset 0x50a + {value: 0x0013, lo: 0x80, hi: 0x95}, + {value: 0x0012, lo: 0x96, hi: 0xaf}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0xfa, offset 0x50d + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0012, lo: 0x8a, hi: 0xa5}, + {value: 0x0013, lo: 0xa8, hi: 0xbf}, + // Block 0xfb, offset 0x510 + {value: 0x0013, lo: 0x80, hi: 0x80}, + {value: 0x0012, lo: 0x82, hi: 0x9a}, + {value: 0x0012, lo: 0x9c, hi: 0xa1}, + {value: 0x0013, lo: 0xa2, hi: 0xba}, + {value: 0x0012, lo: 0xbc, hi: 0xbf}, + // Block 0xfc, offset 0x515 + {value: 0x0012, lo: 0x80, hi: 0x94}, + {value: 0x0012, lo: 0x96, hi: 0x9b}, + {value: 0x0013, lo: 0x9c, hi: 0xb4}, + {value: 0x0012, lo: 0xb6, hi: 0xbf}, + // Block 0xfd, offset 0x519 + {value: 0x0012, lo: 0x80, hi: 0x8e}, + {value: 0x0012, lo: 0x90, hi: 0x95}, + {value: 0x0013, lo: 0x96, hi: 0xae}, + {value: 0x0012, lo: 0xb0, hi: 0xbf}, + // Block 0xfe, offset 0x51d + {value: 0x0012, lo: 0x80, hi: 0x88}, + {value: 0x0012, lo: 0x8a, hi: 0x8f}, + {value: 0x0013, lo: 0x90, hi: 0xa8}, + {value: 0x0012, lo: 0xaa, hi: 0xbf}, + // Block 0xff, offset 0x521 + {value: 0x0012, lo: 0x80, hi: 0x82}, + {value: 0x0012, lo: 0x84, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8b}, + {value: 0x0010, lo: 0x8e, hi: 0xbf}, + // Block 0x100, offset 0x525 + {value: 0x0014, lo: 0x80, hi: 0xb6}, + {value: 0x0014, lo: 0xbb, hi: 0xbf}, + // Block 0x101, offset 0x527 + {value: 0x0014, lo: 0x80, hi: 0xac}, + {value: 0x0014, lo: 0xb5, hi: 0xb5}, + // Block 0x102, offset 0x529 + {value: 0x0014, lo: 0x84, hi: 0x84}, + {value: 0x0014, lo: 0x9b, hi: 0x9f}, + {value: 0x0014, lo: 0xa1, hi: 0xaf}, + // Block 0x103, offset 0x52c + {value: 0x0024, lo: 0x80, hi: 0x86}, + {value: 0x0024, lo: 0x88, hi: 0x98}, + {value: 0x0024, lo: 0x9b, hi: 0xa1}, + {value: 0x0024, lo: 0xa3, hi: 0xa4}, + {value: 0x0024, lo: 0xa6, hi: 0xaa}, + // Block 0x104, offset 0x531 + {value: 0x0010, lo: 0x80, hi: 0x84}, + {value: 0x0034, lo: 0x90, hi: 0x96}, + // Block 0x105, offset 0x533 + {value: 0xb552, lo: 0x80, hi: 0x81}, + {value: 0xb852, lo: 0x82, hi: 0x83}, + {value: 0x0024, lo: 0x84, hi: 0x89}, + {value: 0x0034, lo: 0x8a, hi: 0x8a}, + {value: 0x0010, lo: 0x90, hi: 0x99}, + // Block 0x106, offset 0x538 + {value: 0x0010, lo: 0x80, hi: 0x83}, + {value: 0x0010, lo: 0x85, hi: 0x9f}, + {value: 0x0010, lo: 0xa1, hi: 0xa2}, + {value: 0x0010, lo: 0xa4, hi: 0xa4}, + {value: 0x0010, lo: 0xa7, hi: 0xa7}, + {value: 0x0010, lo: 0xa9, hi: 0xb2}, + {value: 0x0010, lo: 0xb4, hi: 0xb7}, + {value: 0x0010, lo: 0xb9, hi: 0xb9}, + {value: 0x0010, lo: 0xbb, hi: 0xbb}, + // Block 0x107, offset 0x541 + {value: 0x0010, lo: 0x80, hi: 0x89}, + {value: 0x0010, lo: 0x8b, hi: 0x9b}, + {value: 0x0010, lo: 0xa1, hi: 0xa3}, + {value: 0x0010, lo: 0xa5, hi: 0xa9}, + {value: 0x0010, lo: 0xab, hi: 0xbb}, + // Block 0x108, offset 0x546 + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x109, offset 0x547 + {value: 0x0013, lo: 0x80, hi: 0x89}, + {value: 0x0013, lo: 0x90, hi: 0xa9}, + {value: 0x0013, lo: 0xb0, hi: 0xbf}, + // Block 0x10a, offset 0x54a + {value: 0x0013, lo: 0x80, hi: 0x89}, + // Block 0x10b, offset 0x54b + {value: 0x0004, lo: 0xbb, hi: 0xbf}, + // Block 0x10c, offset 0x54c + {value: 0x0014, lo: 0x81, hi: 0x81}, + {value: 0x0014, lo: 0xa0, hi: 0xbf}, + // Block 0x10d, offset 0x54e + {value: 0x0014, lo: 0x80, hi: 0xbf}, + // Block 0x10e, offset 0x54f + {value: 0x0014, lo: 0x80, hi: 0xaf}, +} + +// Total table size 13811 bytes (13KiB); checksum: 4CC48DA3 diff --git a/vendor/golang.org/x/text/cases/trieval.go b/vendor/golang.org/x/text/cases/trieval.go new file mode 100644 index 000000000..a2edb1057 --- /dev/null +++ b/vendor/golang.org/x/text/cases/trieval.go @@ -0,0 +1,215 @@ +// This file was generated by go generate; DO NOT EDIT + +package cases + +// This file contains definitions for interpreting the trie value of the case +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds case information for a single rune. It is the value returned +// by a trie lookup. Most mapping information can be stored in a single 16-bit +// value. If not, for example when a rune is mapped to multiple runes, the value +// stores some basic case data and an index into an array with additional data. +// +// The per-rune values have the following format: +// +// if (exception) { +// 15..5 unsigned exception index +// 4 unused +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode +// +// For the non-exceptional cases, a rune must be either uncased, lowercase or +// uppercase. If the rune is cased, the XOR pattern maps either a lowercase +// rune to uppercase or an uppercase rune to lowercase (applied to the 10 +// least-significant bits of the rune). +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + casedMask = 0x0003 + fullCasedMask = 0x0007 + ignorableMask = 0x0006 + ignorableValue = 0x0004 + + inverseFoldBit = 1 << 7 + isMidBit = 1 << 6 + + exceptionBit = 1 << 3 + exceptionShift = 5 + numExceptionBits = 11 + + xorIndexBit = 1 << 6 + xorShift = 8 + + // There is no mapping if all xor bits and the exception bit are zero. + hasMappingMask = 0xff80 | exceptionBit +) + +// The case mode bits encodes the case type of a rune. This includes uncased, +// title, upper and lower case and case ignorable. (For a definition of these +// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare +// cases, a rune can be both cased and case-ignorable. This is encoded by +// cIgnorableCased. A rune of this type is always lower case. Some runes are +// cased while not having a mapping. +// +// A common pattern for scripts in the Unicode standard is for upper and lower +// case runes to alternate for increasing rune values (e.g. the accented Latin +// ranges starting from U+0100 and U+1E00 among others and some Cyrillic +// characters). We use this property by defining a cXORCase mode, where the case +// mode (always upper or lower case) is derived from the rune value. As the XOR +// pattern for case mappings is often identical for successive runes, using +// cXORCase can result in large series of identical trie values. This, in turn, +// allows us to better compress the trie blocks. +const ( + cUncased info = iota // 000 + cTitle // 001 + cLower // 010 + cUpper // 011 + cIgnorableUncased // 100 + cIgnorableCased // 101 // lower case if mappings exist + cXORCase // 11x // case is cLower | ((rune&1) ^ x) + + maxCaseMode = cUpper +) + +func (c info) isCased() bool { + return c&casedMask != 0 +} + +func (c info) isCaseIgnorable() bool { + return c&ignorableMask == ignorableValue +} + +func (c info) isNotCasedAndNotCaseIgnorable() bool { + return c&fullCasedMask == 0 +} + +func (c info) isCaseIgnorableAndNotCased() bool { + return c&fullCasedMask == cIgnorableUncased +} + +func (c info) isMid() bool { + return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased +} + +// The case mapping implementation will need to know about various Canonical +// Combining Class (CCC) values. We encode two of these in the trie value: +// cccZero (0) and cccAbove (230). If the value is cccOther, it means that +// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that +// the rune also has the break category Break (see below). +const ( + cccBreak info = iota << 4 + cccZero + cccAbove + cccOther + + cccMask = cccBreak | cccZero | cccAbove | cccOther +) + +const ( + starter = 0 + above = 230 + iotaSubscript = 240 +) + +// The exceptions slice holds data that does not fit in a normal info entry. +// The entry is pointed to by the exception index in an entry. It has the +// following format: +// +// Header +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold +// +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper +// +// Lengths with the value 0x7 indicate no value and implies no change. +// A length of 0 indicates a mapping to zero-length string. +// +// Body bytes: +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// Fallbacks: +// missing fold -> lower +// missing title -> upper +// all missing -> original rune +// +// exceptions starts with a dummy byte to enforce that there is no zero index +// value. +const ( + lengthMask = 0x07 + lengthBits = 3 + noChange = 0 +) + +// References to generated trie. + +var trie = newCaseTrie(0) + +var sparse = sparseBlocks{ + values: sparseValues[:], + offsets: sparseOffsets[:], +} + +// Sparse block lookup code. + +// valueRange is an entry in a sparse block. +type valueRange struct { + value uint16 + lo, hi byte +} + +type sparseBlocks struct { + values []valueRange + offsets []uint16 +} + +// lookup returns the value from values block n for byte b using binary search. +func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { + lo := s.offsets[n] + hi := s.offsets[n+1] + for lo < hi { + m := lo + (hi-lo)/2 + r := s.values[m] + if r.lo <= b && b <= r.hi { + return r.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} + +// lastRuneForTesting is the last rune used for testing. Everything after this +// is boring. +const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go new file mode 100644 index 000000000..2cf4ecd29 --- /dev/null +++ b/vendor/golang.org/x/text/internal/tag/tag.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag contains functionality handling tags and related data. +package tag + +import "sort" + +// An Index converts tags to a compact numeric value. +// +// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can +// be used to store additional information about the tag. +type Index string + +// Elem returns the element data at the given index. +func (s Index) Elem(x int) string { + return string(s[x*4 : x*4+4]) +} + +// Index reports the index of the given key or -1 if it could not be found. +// Only the first len(key) bytes from the start of the 4-byte entries will be +// considered for the search and the first match in Index will be returned. +func (s Index) Index(key []byte) int { + n := len(key) + // search the index of the first entry with an equal or higher value than + // key in s. + index := sort.Search(len(s)/4, func(i int) bool { + return cmp(s[i*4:i*4+n], key) != -1 + }) + i := index * 4 + if cmp(s[i:i+len(key)], key) != 0 { + return -1 + } + return index +} + +// Next finds the next occurrence of key after index x, which must have been +// obtained from a call to Index using the same key. It returns x+1 or -1. +func (s Index) Next(key []byte, x int) int { + if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 { + return x + } + return -1 +} + +// cmp returns an integer comparing a and b lexicographically. +func cmp(a Index, b []byte) int { + n := len(a) + if len(b) < n { + n = len(b) + } + for i, c := range b[:n] { + switch { + case a[i] > c: + return 1 + case a[i] < c: + return -1 + } + } + switch { + case len(a) < len(b): + return -1 + case len(a) > len(b): + return 1 + } + return 0 +} + +// Compare returns an integer comparing a and b lexicographically. +func Compare(a string, b []byte) int { + return cmp(Index(a), b) +} + +// FixCase reformats b to the same pattern of cases as form. +// If returns false if string b is malformed. +func FixCase(form string, b []byte) bool { + if len(form) != len(b) { + return false + } + for i, c := range b { + if form[i] <= 'Z' { + if c >= 'a' { + c -= 'z' - 'Z' + } + if c < 'A' || 'Z' < c { + return false + } + } else { + if c <= 'Z' { + c += 'z' - 'Z' + } + if c < 'a' || 'z' < c { + return false + } + } + b[i] = c + } + return true +} diff --git a/vendor/golang.org/x/text/language/Makefile b/vendor/golang.org/x/text/language/Makefile new file mode 100644 index 000000000..79f005784 --- /dev/null +++ b/vendor/golang.org/x/text/language/Makefile @@ -0,0 +1,16 @@ +# Copyright 2013 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +CLEANFILES+=maketables + +maketables: maketables.go + go build $^ + +tables: maketables + ./maketables > tables.go + gofmt -w -s tables.go + +# Build (but do not run) maketables during testing, +# just to make sure it still compiles. +testshort: maketables diff --git a/vendor/golang.org/x/text/language/common.go b/vendor/golang.org/x/text/language/common.go new file mode 100644 index 000000000..a255bb0a5 --- /dev/null +++ b/vendor/golang.org/x/text/language/common.go @@ -0,0 +1,16 @@ +// This file was generated by go generate; DO NOT EDIT + +package language + +// This file contains code common to the maketables.go and the package code. + +// langAliasType is the type of an alias in langAliasMap. +type langAliasType int8 + +const ( + langDeprecated langAliasType = iota + langMacro + langLegacy + + langAliasTypeUnknown langAliasType = -1 +) diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go new file mode 100644 index 000000000..101fd23c1 --- /dev/null +++ b/vendor/golang.org/x/text/language/coverage.go @@ -0,0 +1,197 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "fmt" + "sort" +) + +// The Coverage interface is used to define the level of coverage of an +// internationalization service. Note that not all types are supported by all +// services. As lists may be generated on the fly, it is recommended that users +// of a Coverage cache the results. +type Coverage interface { + // Tags returns the list of supported tags. + Tags() []Tag + + // BaseLanguages returns the list of supported base languages. + BaseLanguages() []Base + + // Scripts returns the list of supported scripts. + Scripts() []Script + + // Regions returns the list of supported regions. + Regions() []Region +} + +var ( + // Supported defines a Coverage that lists all supported subtags. Tags + // always returns nil. + Supported Coverage = allSubtags{} +) + +// TODO: +// - Support Variants, numbering systems. +// - CLDR coverage levels. +// - Set of common tags defined in this package. + +type allSubtags struct{} + +// Regions returns the list of supported regions. As all regions are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" region is not returned. +func (s allSubtags) Regions() []Region { + reg := make([]Region, numRegions) + for i := range reg { + reg[i] = Region{regionID(i + 1)} + } + return reg +} + +// Scripts returns the list of supported scripts. As all scripts are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" script is not returned. +func (s allSubtags) Scripts() []Script { + scr := make([]Script, numScripts) + for i := range scr { + scr[i] = Script{scriptID(i + 1)} + } + return scr +} + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func (s allSubtags) BaseLanguages() []Base { + base := make([]Base, 0, numLanguages) + for i := 0; i < langNoIndexOffset; i++ { + // We included "und" already for the value 0. + if i != nonCanonicalUnd { + base = append(base, Base{langID(i)}) + } + } + i := langNoIndexOffset + for _, v := range langNoIndex { + for k := 0; k < 8; k++ { + if v&1 == 1 { + base = append(base, Base{langID(i)}) + } + v >>= 1 + i++ + } + } + return base +} + +// Tags always returns nil. +func (s allSubtags) Tags() []Tag { + return nil +} + +// coverage is used used by NewCoverage which is used as a convenient way for +// creating Coverage implementations for partially defined data. Very often a +// package will only need to define a subset of slices. coverage provides a +// convenient way to do this. Moreover, packages using NewCoverage, instead of +// their own implementation, will not break if later new slice types are added. +type coverage struct { + tags func() []Tag + bases func() []Base + scripts func() []Script + regions func() []Region +} + +func (s *coverage) Tags() []Tag { + if s.tags == nil { + return nil + } + return s.tags() +} + +// bases implements sort.Interface and is used to sort base languages. +type bases []Base + +func (b bases) Len() int { + return len(b) +} + +func (b bases) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bases) Less(i, j int) bool { + return b[i].langID < b[j].langID +} + +// BaseLanguages returns the result from calling s.bases if it is specified or +// otherwise derives the set of supported base languages from tags. +func (s *coverage) BaseLanguages() []Base { + if s.bases == nil { + tags := s.Tags() + if len(tags) == 0 { + return nil + } + a := make([]Base, len(tags)) + for i, t := range tags { + a[i] = Base{langID(t.lang)} + } + sort.Sort(bases(a)) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + k++ + a[k] = a[i] + } + } + return a[:k+1] + } + return s.bases() +} + +func (s *coverage) Scripts() []Script { + if s.scripts == nil { + return nil + } + return s.scripts() +} + +func (s *coverage) Regions() []Region { + if s.regions == nil { + return nil + } + return s.regions() +} + +// NewCoverage returns a Coverage for the given lists. It is typically used by +// packages providing internationalization services to define their level of +// coverage. A list may be of type []T or func() []T, where T is either Tag, +// Base, Script or Region. The returned Coverage derives the value for Bases +// from Tags if no func or slice for []Base is specified. For other unspecified +// types the returned Coverage will return nil for the respective methods. +func NewCoverage(list ...interface{}) Coverage { + s := &coverage{} + for _, x := range list { + switch v := x.(type) { + case func() []Base: + s.bases = v + case func() []Script: + s.scripts = v + case func() []Region: + s.regions = v + case func() []Tag: + s.tags = v + case []Base: + s.bases = func() []Base { return v } + case []Script: + s.scripts = func() []Script { return v } + case []Region: + s.regions = func() []Region { return v } + case []Tag: + s.tags = func() []Tag { return v } + default: + panic(fmt.Sprintf("language: unsupported set type %T", v)) + } + } + return s +} diff --git a/vendor/golang.org/x/text/language/gen_common.go b/vendor/golang.org/x/text/language/gen_common.go new file mode 100644 index 000000000..83ce18013 --- /dev/null +++ b/vendor/golang.org/x/text/language/gen_common.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This file contains code common to the maketables.go and the package code. + +// langAliasType is the type of an alias in langAliasMap. +type langAliasType int8 + +const ( + langDeprecated langAliasType = iota + langMacro + langLegacy + + langAliasTypeUnknown langAliasType = -1 +) diff --git a/vendor/golang.org/x/text/language/gen_index.go b/vendor/golang.org/x/text/language/gen_index.go new file mode 100644 index 000000000..eef555cd3 --- /dev/null +++ b/vendor/golang.org/x/text/language/gen_index.go @@ -0,0 +1,162 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This file generates derivative tables based on the language package itself. + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "log" + "reflect" + "sort" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/language" + "golang.org/x/text/unicode/cldr" +) + +var ( + test = flag.Bool("test", false, + "test existing tables; can be used to compare web data with package data.") + + draft = flag.String("draft", + "contributed", + `Minimal draft requirements (approved, contributed, provisional, unconfirmed).`) +) + +func main() { + gen.Init() + + // Read the CLDR zip file. + r := gen.OpenCLDRCoreZip() + defer r.Close() + + d := &cldr.Decoder{} + data, err := d.DecodeZip(r) + if err != nil { + log.Fatalf("DecodeZip: %v", err) + } + + w := gen.NewCodeWriter() + defer func() { + buf := &bytes.Buffer{} + + if _, err = w.WriteGo(buf, "language"); err != nil { + log.Fatalf("Error formatting file index.go: %v", err) + } + + // Since we're generating a table for our own package we need to rewrite + // doing the equivalent of go fmt -r 'language.b -> b'. Using + // bytes.Replace will do. + out := bytes.Replace(buf.Bytes(), []byte("language."), nil, -1) + if err := ioutil.WriteFile("index.go", out, 0600); err != nil { + log.Fatalf("Could not create file index.go: %v", err) + } + }() + + m := map[language.Tag]bool{} + for _, lang := range data.Locales() { + // We include all locales unconditionally to be consistent with en_US. + // We want en_US, even though it has no data associated with it. + + // TODO: put any of the languages for which no data exists at the end + // of the index. This allows all components based on ICU to use that + // as the cutoff point. + // if x := data.RawLDML(lang); false || + // x.LocaleDisplayNames != nil || + // x.Characters != nil || + // x.Delimiters != nil || + // x.Measurement != nil || + // x.Dates != nil || + // x.Numbers != nil || + // x.Units != nil || + // x.ListPatterns != nil || + // x.Collations != nil || + // x.Segmentations != nil || + // x.Rbnf != nil || + // x.Annotations != nil || + // x.Metadata != nil { + + // TODO: support POSIX natively, albeit non-standard. + tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) + m[tag] = true + // } + } + // Include locales for plural rules, which uses a different structure. + for _, plurals := range data.Supplemental().Plurals { + for _, rules := range plurals.PluralRules { + for _, lang := range strings.Split(rules.Locales, " ") { + m[language.Make(lang)] = true + } + } + } + + var core, special []language.Tag + + for t := range m { + if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { + log.Fatalf("Unexpected extension %v in %v", x, t) + } + if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { + core = append(core, t) + } else { + special = append(special, t) + } + } + + w.WriteComment(` + NumCompactTags is the number of common tags. The maximum tag is + NumCompactTags-1.`) + w.WriteConst("NumCompactTags", len(core)+len(special)) + + sort.Sort(byAlpha(special)) + w.WriteVar("specialTags", special) + + // TODO: order by frequency? + sort.Sort(byAlpha(core)) + + // Size computations are just an estimate. + w.Size += int(reflect.TypeOf(map[uint32]uint16{}).Size()) + w.Size += len(core) * 6 // size of uint32 and uint16 + + fmt.Fprintln(w) + fmt.Fprintln(w, "var coreTags = map[uint32]uint16{") + fmt.Fprintln(w, "0x0: 0, // und") + i := len(special) + 1 // Und and special tags already written. + for _, t := range core { + if t == language.Und { + continue + } + fmt.Fprint(w.Hash, t, i) + b, s, r := t.Raw() + fmt.Fprintf(w, "0x%s%s%s: %d, // %s\n", + getIndex(b, 3), // 3 is enough as it is guaranteed to be a compact number + getIndex(s, 2), + getIndex(r, 3), + i, t) + i++ + } + fmt.Fprintln(w, "}") +} + +// getIndex prints the subtag type and extracts its index of size nibble. +// If the index is less than n nibbles, the result is prefixed with 0s. +func getIndex(x interface{}, n int) string { + s := fmt.Sprintf("%#v", x) // s is of form Type{typeID: 0x00} + s = s[strings.Index(s, "0x")+2 : len(s)-1] + return strings.Repeat("0", n-len(s)) + s +} + +type byAlpha []language.Tag + +func (a byAlpha) Len() int { return len(a) } +func (a byAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byAlpha) Less(i, j int) bool { return a[i].String() < a[j].String() } diff --git a/vendor/golang.org/x/text/language/go1_1.go b/vendor/golang.org/x/text/language/go1_1.go new file mode 100644 index 000000000..380f4c09f --- /dev/null +++ b/vendor/golang.org/x/text/language/go1_1.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.2 + +package language + +import "sort" + +func sortStable(s sort.Interface) { + ss := stableSort{ + s: s, + pos: make([]int, s.Len()), + } + for i := range ss.pos { + ss.pos[i] = i + } + sort.Sort(&ss) +} + +type stableSort struct { + s sort.Interface + pos []int +} + +func (s *stableSort) Len() int { + return len(s.pos) +} + +func (s *stableSort) Less(i, j int) bool { + return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j] +} + +func (s *stableSort) Swap(i, j int) { + s.s.Swap(i, j) + s.pos[i], s.pos[j] = s.pos[j], s.pos[i] +} diff --git a/vendor/golang.org/x/text/language/go1_2.go b/vendor/golang.org/x/text/language/go1_2.go new file mode 100644 index 000000000..38268c57a --- /dev/null +++ b/vendor/golang.org/x/text/language/go1_2.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.2 + +package language + +import "sort" + +var sortStable = sort.Stable diff --git a/vendor/golang.org/x/text/language/index.go b/vendor/golang.org/x/text/language/index.go new file mode 100644 index 000000000..7fa9cc82d --- /dev/null +++ b/vendor/golang.org/x/text/language/index.go @@ -0,0 +1,762 @@ +// This file was generated by go generate; DO NOT EDIT + +package language + +// NumCompactTags is the number of common tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = 747 + +var specialTags = []Tag{ // 2 elements + 0: {lang: 0x61, region: 0x6d, script: 0x0, pVariant: 0x5, pExt: 0xe, str: "ca-ES-valencia"}, + 1: {lang: 0x9b, region: 0x132, script: 0x0, pVariant: 0x5, pExt: 0x5, str: "en-US-u-va-posix"}, +} // Size: 72 bytes + +var coreTags = map[uint32]uint16{ + 0x0: 0, // und + 0x00a00000: 3, // af + 0x00a000d0: 4, // af-NA + 0x00a0015e: 5, // af-ZA + 0x00b00000: 6, // agq + 0x00b00051: 7, // agq-CM + 0x00d00000: 8, // ak + 0x00d0007e: 9, // ak-GH + 0x01100000: 10, // am + 0x0110006e: 11, // am-ET + 0x01500000: 12, // ar + 0x01500001: 13, // ar-001 + 0x01500022: 14, // ar-AE + 0x01500038: 15, // ar-BH + 0x01500061: 16, // ar-DJ + 0x01500066: 17, // ar-DZ + 0x0150006a: 18, // ar-EG + 0x0150006b: 19, // ar-EH + 0x0150006c: 20, // ar-ER + 0x01500095: 21, // ar-IL + 0x01500099: 22, // ar-IQ + 0x0150009f: 23, // ar-JO + 0x015000a6: 24, // ar-KM + 0x015000aa: 25, // ar-KW + 0x015000ae: 26, // ar-LB + 0x015000b7: 27, // ar-LY + 0x015000b8: 28, // ar-MA + 0x015000c7: 29, // ar-MR + 0x015000df: 30, // ar-OM + 0x015000eb: 31, // ar-PS + 0x015000f1: 32, // ar-QA + 0x01500106: 33, // ar-SA + 0x01500109: 34, // ar-SD + 0x01500113: 35, // ar-SO + 0x01500115: 36, // ar-SS + 0x0150011a: 37, // ar-SY + 0x0150011e: 38, // ar-TD + 0x01500126: 39, // ar-TN + 0x0150015b: 40, // ar-YE + 0x01c00000: 41, // as + 0x01c00097: 42, // as-IN + 0x01d00000: 43, // asa + 0x01d0012d: 44, // asa-TZ + 0x01f00000: 45, // ast + 0x01f0006d: 46, // ast-ES + 0x02400000: 47, // az + 0x0241e000: 48, // az-Cyrl + 0x0241e031: 49, // az-Cyrl-AZ + 0x02452000: 50, // az-Latn + 0x02452031: 51, // az-Latn-AZ + 0x02a00000: 52, // bas + 0x02a00051: 53, // bas-CM + 0x02f00000: 54, // be + 0x02f00046: 55, // be-BY + 0x03100000: 56, // bem + 0x0310015f: 57, // bem-ZM + 0x03300000: 58, // bez + 0x0330012d: 59, // bez-TZ + 0x03800000: 60, // bg + 0x03800037: 61, // bg-BG + 0x03c00000: 62, // bh + 0x04900000: 63, // bm + 0x049000c1: 64, // bm-ML + 0x04b00000: 65, // bn + 0x04b00034: 66, // bn-BD + 0x04b00097: 67, // bn-IN + 0x04c00000: 68, // bo + 0x04c00052: 69, // bo-CN + 0x04c00097: 70, // bo-IN + 0x05000000: 71, // br + 0x05000076: 72, // br-FR + 0x05300000: 73, // brx + 0x05300097: 74, // brx-IN + 0x05400000: 75, // bs + 0x0541e000: 76, // bs-Cyrl + 0x0541e032: 77, // bs-Cyrl-BA + 0x05452000: 78, // bs-Latn + 0x05452032: 79, // bs-Latn-BA + 0x06100000: 80, // ca + 0x06100021: 81, // ca-AD + 0x0610006d: 82, // ca-ES + 0x06100076: 83, // ca-FR + 0x0610009c: 84, // ca-IT + 0x06400000: 85, // ce + 0x06400104: 86, // ce-RU + 0x06600000: 87, // cgg + 0x0660012f: 88, // cgg-UG + 0x06c00000: 89, // chr + 0x06c00132: 90, // chr-US + 0x06f00000: 91, // ckb + 0x06f00099: 92, // ckb-IQ + 0x06f0009a: 93, // ckb-IR + 0x07900000: 94, // cs + 0x0790005d: 95, // cs-CZ + 0x07d00000: 96, // cu + 0x07d00104: 97, // cu-RU + 0x07f00000: 98, // cy + 0x07f00079: 99, // cy-GB + 0x08000000: 100, // da + 0x08000062: 101, // da-DK + 0x08000080: 102, // da-GL + 0x08300000: 103, // dav + 0x083000a2: 104, // dav-KE + 0x08500000: 105, // de + 0x0850002d: 106, // de-AT + 0x08500035: 107, // de-BE + 0x0850004d: 108, // de-CH + 0x0850005f: 109, // de-DE + 0x085000b0: 110, // de-LI + 0x085000b5: 111, // de-LU + 0x08800000: 112, // dje + 0x088000d2: 113, // dje-NE + 0x08b00000: 114, // dsb + 0x08b0005f: 115, // dsb-DE + 0x08f00000: 116, // dua + 0x08f00051: 117, // dua-CM + 0x09000000: 118, // dv + 0x09100000: 119, // dyo + 0x09100112: 120, // dyo-SN + 0x09300000: 121, // dz + 0x09300042: 122, // dz-BT + 0x09400000: 123, // ebu + 0x094000a2: 124, // ebu-KE + 0x09500000: 125, // ee + 0x0950007e: 126, // ee-GH + 0x09500120: 127, // ee-TG + 0x09a00000: 128, // el + 0x09a0005c: 129, // el-CY + 0x09a00085: 130, // el-GR + 0x09b00000: 131, // en + 0x09b00001: 132, // en-001 + 0x09b0001a: 133, // en-150 + 0x09b00024: 134, // en-AG + 0x09b00025: 135, // en-AI + 0x09b0002c: 136, // en-AS + 0x09b0002d: 137, // en-AT + 0x09b0002e: 138, // en-AU + 0x09b00033: 139, // en-BB + 0x09b00035: 140, // en-BE + 0x09b00039: 141, // en-BI + 0x09b0003c: 142, // en-BM + 0x09b00041: 143, // en-BS + 0x09b00045: 144, // en-BW + 0x09b00047: 145, // en-BZ + 0x09b00048: 146, // en-CA + 0x09b00049: 147, // en-CC + 0x09b0004d: 148, // en-CH + 0x09b0004f: 149, // en-CK + 0x09b00051: 150, // en-CM + 0x09b0005b: 151, // en-CX + 0x09b0005c: 152, // en-CY + 0x09b0005f: 153, // en-DE + 0x09b00060: 154, // en-DG + 0x09b00062: 155, // en-DK + 0x09b00063: 156, // en-DM + 0x09b0006c: 157, // en-ER + 0x09b00070: 158, // en-FI + 0x09b00071: 159, // en-FJ + 0x09b00072: 160, // en-FK + 0x09b00073: 161, // en-FM + 0x09b00079: 162, // en-GB + 0x09b0007a: 163, // en-GD + 0x09b0007d: 164, // en-GG + 0x09b0007e: 165, // en-GH + 0x09b0007f: 166, // en-GI + 0x09b00081: 167, // en-GM + 0x09b00088: 168, // en-GU + 0x09b0008a: 169, // en-GY + 0x09b0008b: 170, // en-HK + 0x09b00094: 171, // en-IE + 0x09b00095: 172, // en-IL + 0x09b00096: 173, // en-IM + 0x09b00097: 174, // en-IN + 0x09b00098: 175, // en-IO + 0x09b0009d: 176, // en-JE + 0x09b0009e: 177, // en-JM + 0x09b000a2: 178, // en-KE + 0x09b000a5: 179, // en-KI + 0x09b000a7: 180, // en-KN + 0x09b000ab: 181, // en-KY + 0x09b000af: 182, // en-LC + 0x09b000b2: 183, // en-LR + 0x09b000b3: 184, // en-LS + 0x09b000bd: 185, // en-MG + 0x09b000be: 186, // en-MH + 0x09b000c4: 187, // en-MO + 0x09b000c5: 188, // en-MP + 0x09b000c8: 189, // en-MS + 0x09b000c9: 190, // en-MT + 0x09b000ca: 191, // en-MU + 0x09b000cc: 192, // en-MW + 0x09b000ce: 193, // en-MY + 0x09b000d0: 194, // en-NA + 0x09b000d3: 195, // en-NF + 0x09b000d4: 196, // en-NG + 0x09b000d7: 197, // en-NL + 0x09b000db: 198, // en-NR + 0x09b000dd: 199, // en-NU + 0x09b000de: 200, // en-NZ + 0x09b000e4: 201, // en-PG + 0x09b000e5: 202, // en-PH + 0x09b000e6: 203, // en-PK + 0x09b000e9: 204, // en-PN + 0x09b000ea: 205, // en-PR + 0x09b000ee: 206, // en-PW + 0x09b00105: 207, // en-RW + 0x09b00107: 208, // en-SB + 0x09b00108: 209, // en-SC + 0x09b00109: 210, // en-SD + 0x09b0010a: 211, // en-SE + 0x09b0010b: 212, // en-SG + 0x09b0010c: 213, // en-SH + 0x09b0010d: 214, // en-SI + 0x09b00110: 215, // en-SL + 0x09b00115: 216, // en-SS + 0x09b00119: 217, // en-SX + 0x09b0011b: 218, // en-SZ + 0x09b0011d: 219, // en-TC + 0x09b00123: 220, // en-TK + 0x09b00127: 221, // en-TO + 0x09b0012a: 222, // en-TT + 0x09b0012b: 223, // en-TV + 0x09b0012d: 224, // en-TZ + 0x09b0012f: 225, // en-UG + 0x09b00131: 226, // en-UM + 0x09b00132: 227, // en-US + 0x09b00136: 228, // en-VC + 0x09b00139: 229, // en-VG + 0x09b0013a: 230, // en-VI + 0x09b0013c: 231, // en-VU + 0x09b0013f: 232, // en-WS + 0x09b0015e: 233, // en-ZA + 0x09b0015f: 234, // en-ZM + 0x09b00161: 235, // en-ZW + 0x09c00000: 236, // eo + 0x09c00001: 237, // eo-001 + 0x09d00000: 238, // es + 0x09d0001e: 239, // es-419 + 0x09d0002b: 240, // es-AR + 0x09d0003e: 241, // es-BO + 0x09d00040: 242, // es-BR + 0x09d00050: 243, // es-CL + 0x09d00053: 244, // es-CO + 0x09d00055: 245, // es-CR + 0x09d00058: 246, // es-CU + 0x09d00064: 247, // es-DO + 0x09d00067: 248, // es-EA + 0x09d00068: 249, // es-EC + 0x09d0006d: 250, // es-ES + 0x09d00084: 251, // es-GQ + 0x09d00087: 252, // es-GT + 0x09d0008d: 253, // es-HN + 0x09d00092: 254, // es-IC + 0x09d000cd: 255, // es-MX + 0x09d000d6: 256, // es-NI + 0x09d000e0: 257, // es-PA + 0x09d000e2: 258, // es-PE + 0x09d000e5: 259, // es-PH + 0x09d000ea: 260, // es-PR + 0x09d000ef: 261, // es-PY + 0x09d00118: 262, // es-SV + 0x09d00132: 263, // es-US + 0x09d00133: 264, // es-UY + 0x09d00138: 265, // es-VE + 0x09f00000: 266, // et + 0x09f00069: 267, // et-EE + 0x0a100000: 268, // eu + 0x0a10006d: 269, // eu-ES + 0x0a200000: 270, // ewo + 0x0a200051: 271, // ewo-CM + 0x0a400000: 272, // fa + 0x0a400023: 273, // fa-AF + 0x0a40009a: 274, // fa-IR + 0x0a600000: 275, // ff + 0x0a600051: 276, // ff-CM + 0x0a600082: 277, // ff-GN + 0x0a6000c7: 278, // ff-MR + 0x0a600112: 279, // ff-SN + 0x0a800000: 280, // fi + 0x0a800070: 281, // fi-FI + 0x0aa00000: 282, // fil + 0x0aa000e5: 283, // fil-PH + 0x0ad00000: 284, // fo + 0x0ad00062: 285, // fo-DK + 0x0ad00074: 286, // fo-FO + 0x0af00000: 287, // fr + 0x0af00035: 288, // fr-BE + 0x0af00036: 289, // fr-BF + 0x0af00039: 290, // fr-BI + 0x0af0003a: 291, // fr-BJ + 0x0af0003b: 292, // fr-BL + 0x0af00048: 293, // fr-CA + 0x0af0004a: 294, // fr-CD + 0x0af0004b: 295, // fr-CF + 0x0af0004c: 296, // fr-CG + 0x0af0004d: 297, // fr-CH + 0x0af0004e: 298, // fr-CI + 0x0af00051: 299, // fr-CM + 0x0af00061: 300, // fr-DJ + 0x0af00066: 301, // fr-DZ + 0x0af00076: 302, // fr-FR + 0x0af00078: 303, // fr-GA + 0x0af0007c: 304, // fr-GF + 0x0af00082: 305, // fr-GN + 0x0af00083: 306, // fr-GP + 0x0af00084: 307, // fr-GQ + 0x0af0008f: 308, // fr-HT + 0x0af000a6: 309, // fr-KM + 0x0af000b5: 310, // fr-LU + 0x0af000b8: 311, // fr-MA + 0x0af000b9: 312, // fr-MC + 0x0af000bc: 313, // fr-MF + 0x0af000bd: 314, // fr-MG + 0x0af000c1: 315, // fr-ML + 0x0af000c6: 316, // fr-MQ + 0x0af000c7: 317, // fr-MR + 0x0af000ca: 318, // fr-MU + 0x0af000d1: 319, // fr-NC + 0x0af000d2: 320, // fr-NE + 0x0af000e3: 321, // fr-PF + 0x0af000e8: 322, // fr-PM + 0x0af00100: 323, // fr-RE + 0x0af00105: 324, // fr-RW + 0x0af00108: 325, // fr-SC + 0x0af00112: 326, // fr-SN + 0x0af0011a: 327, // fr-SY + 0x0af0011e: 328, // fr-TD + 0x0af00120: 329, // fr-TG + 0x0af00126: 330, // fr-TN + 0x0af0013c: 331, // fr-VU + 0x0af0013d: 332, // fr-WF + 0x0af0015c: 333, // fr-YT + 0x0b600000: 334, // fur + 0x0b60009c: 335, // fur-IT + 0x0b900000: 336, // fy + 0x0b9000d7: 337, // fy-NL + 0x0ba00000: 338, // ga + 0x0ba00094: 339, // ga-IE + 0x0c200000: 340, // gd + 0x0c200079: 341, // gd-GB + 0x0c800000: 342, // gl + 0x0c80006d: 343, // gl-ES + 0x0d200000: 344, // gsw + 0x0d20004d: 345, // gsw-CH + 0x0d200076: 346, // gsw-FR + 0x0d2000b0: 347, // gsw-LI + 0x0d300000: 348, // gu + 0x0d300097: 349, // gu-IN + 0x0d700000: 350, // guw + 0x0d800000: 351, // guz + 0x0d8000a2: 352, // guz-KE + 0x0d900000: 353, // gv + 0x0d900096: 354, // gv-IM + 0x0dc00000: 355, // ha + 0x0dc0007e: 356, // ha-GH + 0x0dc000d2: 357, // ha-NE + 0x0dc000d4: 358, // ha-NG + 0x0de00000: 359, // haw + 0x0de00132: 360, // haw-US + 0x0e000000: 361, // he + 0x0e000095: 362, // he-IL + 0x0e100000: 363, // hi + 0x0e100097: 364, // hi-IN + 0x0ee00000: 365, // hr + 0x0ee00032: 366, // hr-BA + 0x0ee0008e: 367, // hr-HR + 0x0ef00000: 368, // hsb + 0x0ef0005f: 369, // hsb-DE + 0x0f200000: 370, // hu + 0x0f200090: 371, // hu-HU + 0x0f300000: 372, // hy + 0x0f300027: 373, // hy-AM + 0x0f800000: 374, // id + 0x0f800093: 375, // id-ID + 0x0fa00000: 376, // ig + 0x0fa000d4: 377, // ig-NG + 0x0fb00000: 378, // ii + 0x0fb00052: 379, // ii-CN + 0x10200000: 380, // is + 0x1020009b: 381, // is-IS + 0x10300000: 382, // it + 0x1030004d: 383, // it-CH + 0x1030009c: 384, // it-IT + 0x10300111: 385, // it-SM + 0x10400000: 386, // iu + 0x10700000: 387, // ja + 0x107000a0: 388, // ja-JP + 0x10900000: 389, // jbo + 0x10a00000: 390, // jgo + 0x10a00051: 391, // jgo-CM + 0x10c00000: 392, // jmc + 0x10c0012d: 393, // jmc-TZ + 0x10f00000: 394, // jv + 0x11100000: 395, // ka + 0x1110007b: 396, // ka-GE + 0x11300000: 397, // kab + 0x11300066: 398, // kab-DZ + 0x11500000: 399, // kaj + 0x11600000: 400, // kam + 0x116000a2: 401, // kam-KE + 0x11900000: 402, // kcg + 0x11b00000: 403, // kde + 0x11b0012d: 404, // kde-TZ + 0x11d00000: 405, // kea + 0x11d00059: 406, // kea-CV + 0x12800000: 407, // khq + 0x128000c1: 408, // khq-ML + 0x12b00000: 409, // ki + 0x12b000a2: 410, // ki-KE + 0x12f00000: 411, // kk + 0x12f000ac: 412, // kk-KZ + 0x13000000: 413, // kkj + 0x13000051: 414, // kkj-CM + 0x13100000: 415, // kl + 0x13100080: 416, // kl-GL + 0x13200000: 417, // kln + 0x132000a2: 418, // kln-KE + 0x13300000: 419, // km + 0x133000a4: 420, // km-KH + 0x13500000: 421, // kn + 0x13500097: 422, // kn-IN + 0x13600000: 423, // ko + 0x136000a8: 424, // ko-KP + 0x136000a9: 425, // ko-KR + 0x13800000: 426, // kok + 0x13800097: 427, // kok-IN + 0x14100000: 428, // ks + 0x14100097: 429, // ks-IN + 0x14200000: 430, // ksb + 0x1420012d: 431, // ksb-TZ + 0x14300000: 432, // ksf + 0x14300051: 433, // ksf-CM + 0x14400000: 434, // ksh + 0x1440005f: 435, // ksh-DE + 0x14500000: 436, // ku + 0x14a00000: 437, // kw + 0x14a00079: 438, // kw-GB + 0x14d00000: 439, // ky + 0x14d000a3: 440, // ky-KG + 0x15100000: 441, // lag + 0x1510012d: 442, // lag-TZ + 0x15400000: 443, // lb + 0x154000b5: 444, // lb-LU + 0x15a00000: 445, // lg + 0x15a0012f: 446, // lg-UG + 0x16100000: 447, // lkt + 0x16100132: 448, // lkt-US + 0x16400000: 449, // ln + 0x16400029: 450, // ln-AO + 0x1640004a: 451, // ln-CD + 0x1640004b: 452, // ln-CF + 0x1640004c: 453, // ln-CG + 0x16500000: 454, // lo + 0x165000ad: 455, // lo-LA + 0x16800000: 456, // lrc + 0x16800099: 457, // lrc-IQ + 0x1680009a: 458, // lrc-IR + 0x16900000: 459, // lt + 0x169000b4: 460, // lt-LT + 0x16b00000: 461, // lu + 0x16b0004a: 462, // lu-CD + 0x16d00000: 463, // luo + 0x16d000a2: 464, // luo-KE + 0x16e00000: 465, // luy + 0x16e000a2: 466, // luy-KE + 0x17000000: 467, // lv + 0x170000b6: 468, // lv-LV + 0x17a00000: 469, // mas + 0x17a000a2: 470, // mas-KE + 0x17a0012d: 471, // mas-TZ + 0x18000000: 472, // mer + 0x180000a2: 473, // mer-KE + 0x18200000: 474, // mfe + 0x182000ca: 475, // mfe-MU + 0x18300000: 476, // mg + 0x183000bd: 477, // mg-MG + 0x18400000: 478, // mgh + 0x184000cf: 479, // mgh-MZ + 0x18500000: 480, // mgo + 0x18500051: 481, // mgo-CM + 0x18c00000: 482, // mk + 0x18c000c0: 483, // mk-MK + 0x18d00000: 484, // ml + 0x18d00097: 485, // ml-IN + 0x18f00000: 486, // mn + 0x18f000c3: 487, // mn-MN + 0x19600000: 488, // mr + 0x19600097: 489, // mr-IN + 0x19a00000: 490, // ms + 0x19a0003d: 491, // ms-BN + 0x19a000ce: 492, // ms-MY + 0x19a0010b: 493, // ms-SG + 0x19b00000: 494, // mt + 0x19b000c9: 495, // mt-MT + 0x19d00000: 496, // mua + 0x19d00051: 497, // mua-CM + 0x1a500000: 498, // my + 0x1a5000c2: 499, // my-MM + 0x1a900000: 500, // mzn + 0x1a90009a: 501, // mzn-IR + 0x1ab00000: 502, // nah + 0x1ae00000: 503, // naq + 0x1ae000d0: 504, // naq-NA + 0x1af00000: 505, // nb + 0x1af000d8: 506, // nb-NO + 0x1af0010e: 507, // nb-SJ + 0x1b100000: 508, // nd + 0x1b100161: 509, // nd-ZW + 0x1b400000: 510, // ne + 0x1b400097: 511, // ne-IN + 0x1b4000d9: 512, // ne-NP + 0x1bd00000: 513, // nl + 0x1bd0002f: 514, // nl-AW + 0x1bd00035: 515, // nl-BE + 0x1bd0003f: 516, // nl-BQ + 0x1bd0005a: 517, // nl-CW + 0x1bd000d7: 518, // nl-NL + 0x1bd00114: 519, // nl-SR + 0x1bd00119: 520, // nl-SX + 0x1be00000: 521, // nmg + 0x1be00051: 522, // nmg-CM + 0x1bf00000: 523, // nn + 0x1bf000d8: 524, // nn-NO + 0x1c000000: 525, // nnh + 0x1c000051: 526, // nnh-CM + 0x1c100000: 527, // no + 0x1c500000: 528, // nqo + 0x1c600000: 529, // nr + 0x1c800000: 530, // nso + 0x1c900000: 531, // nus + 0x1c900115: 532, // nus-SS + 0x1cc00000: 533, // ny + 0x1ce00000: 534, // nyn + 0x1ce0012f: 535, // nyn-UG + 0x1d200000: 536, // om + 0x1d20006e: 537, // om-ET + 0x1d2000a2: 538, // om-KE + 0x1d300000: 539, // or + 0x1d300097: 540, // or-IN + 0x1d400000: 541, // os + 0x1d40007b: 542, // os-GE + 0x1d400104: 543, // os-RU + 0x1d700000: 544, // pa + 0x1d705000: 545, // pa-Arab + 0x1d7050e6: 546, // pa-Arab-PK + 0x1d72f000: 547, // pa-Guru + 0x1d72f097: 548, // pa-Guru-IN + 0x1db00000: 549, // pap + 0x1e700000: 550, // pl + 0x1e7000e7: 551, // pl-PL + 0x1ed00000: 552, // prg + 0x1ed00001: 553, // prg-001 + 0x1ee00000: 554, // ps + 0x1ee00023: 555, // ps-AF + 0x1ef00000: 556, // pt + 0x1ef00029: 557, // pt-AO + 0x1ef00040: 558, // pt-BR + 0x1ef0004d: 559, // pt-CH + 0x1ef00059: 560, // pt-CV + 0x1ef00084: 561, // pt-GQ + 0x1ef00089: 562, // pt-GW + 0x1ef000b5: 563, // pt-LU + 0x1ef000c4: 564, // pt-MO + 0x1ef000cf: 565, // pt-MZ + 0x1ef000ec: 566, // pt-PT + 0x1ef00116: 567, // pt-ST + 0x1ef00124: 568, // pt-TL + 0x1f100000: 569, // qu + 0x1f10003e: 570, // qu-BO + 0x1f100068: 571, // qu-EC + 0x1f1000e2: 572, // qu-PE + 0x1fc00000: 573, // rm + 0x1fc0004d: 574, // rm-CH + 0x20100000: 575, // rn + 0x20100039: 576, // rn-BI + 0x20300000: 577, // ro + 0x203000ba: 578, // ro-MD + 0x20300102: 579, // ro-RO + 0x20500000: 580, // rof + 0x2050012d: 581, // rof-TZ + 0x20700000: 582, // ru + 0x20700046: 583, // ru-BY + 0x207000a3: 584, // ru-KG + 0x207000ac: 585, // ru-KZ + 0x207000ba: 586, // ru-MD + 0x20700104: 587, // ru-RU + 0x2070012e: 588, // ru-UA + 0x20a00000: 589, // rw + 0x20a00105: 590, // rw-RW + 0x20b00000: 591, // rwk + 0x20b0012d: 592, // rwk-TZ + 0x20f00000: 593, // sah + 0x20f00104: 594, // sah-RU + 0x21000000: 595, // saq + 0x210000a2: 596, // saq-KE + 0x21400000: 597, // sbp + 0x2140012d: 598, // sbp-TZ + 0x21c00000: 599, // sdh + 0x21d00000: 600, // se + 0x21d00070: 601, // se-FI + 0x21d000d8: 602, // se-NO + 0x21d0010a: 603, // se-SE + 0x21f00000: 604, // seh + 0x21f000cf: 605, // seh-MZ + 0x22100000: 606, // ses + 0x221000c1: 607, // ses-ML + 0x22200000: 608, // sg + 0x2220004b: 609, // sg-CF + 0x22600000: 610, // shi + 0x22652000: 611, // shi-Latn + 0x226520b8: 612, // shi-Latn-MA + 0x226d2000: 613, // shi-Tfng + 0x226d20b8: 614, // shi-Tfng-MA + 0x22800000: 615, // si + 0x228000b1: 616, // si-LK + 0x22a00000: 617, // sk + 0x22a0010f: 618, // sk-SK + 0x22c00000: 619, // sl + 0x22c0010d: 620, // sl-SI + 0x23000000: 621, // sma + 0x23100000: 622, // smi + 0x23200000: 623, // smj + 0x23300000: 624, // smn + 0x23300070: 625, // smn-FI + 0x23500000: 626, // sms + 0x23600000: 627, // sn + 0x23600161: 628, // sn-ZW + 0x23800000: 629, // so + 0x23800061: 630, // so-DJ + 0x2380006e: 631, // so-ET + 0x238000a2: 632, // so-KE + 0x23800113: 633, // so-SO + 0x23a00000: 634, // sq + 0x23a00026: 635, // sq-AL + 0x23a000c0: 636, // sq-MK + 0x23a0014a: 637, // sq-XK + 0x23b00000: 638, // sr + 0x23b1e000: 639, // sr-Cyrl + 0x23b1e032: 640, // sr-Cyrl-BA + 0x23b1e0bb: 641, // sr-Cyrl-ME + 0x23b1e103: 642, // sr-Cyrl-RS + 0x23b1e14a: 643, // sr-Cyrl-XK + 0x23b52000: 644, // sr-Latn + 0x23b52032: 645, // sr-Latn-BA + 0x23b520bb: 646, // sr-Latn-ME + 0x23b52103: 647, // sr-Latn-RS + 0x23b5214a: 648, // sr-Latn-XK + 0x24000000: 649, // ss + 0x24100000: 650, // ssy + 0x24200000: 651, // st + 0x24700000: 652, // sv + 0x24700030: 653, // sv-AX + 0x24700070: 654, // sv-FI + 0x2470010a: 655, // sv-SE + 0x24800000: 656, // sw + 0x2480004a: 657, // sw-CD + 0x248000a2: 658, // sw-KE + 0x2480012d: 659, // sw-TZ + 0x2480012f: 660, // sw-UG + 0x24f00000: 661, // syr + 0x25100000: 662, // ta + 0x25100097: 663, // ta-IN + 0x251000b1: 664, // ta-LK + 0x251000ce: 665, // ta-MY + 0x2510010b: 666, // ta-SG + 0x25800000: 667, // te + 0x25800097: 668, // te-IN + 0x25a00000: 669, // teo + 0x25a000a2: 670, // teo-KE + 0x25a0012f: 671, // teo-UG + 0x25d00000: 672, // th + 0x25d00121: 673, // th-TH + 0x26100000: 674, // ti + 0x2610006c: 675, // ti-ER + 0x2610006e: 676, // ti-ET + 0x26200000: 677, // tig + 0x26400000: 678, // tk + 0x26400125: 679, // tk-TM + 0x26b00000: 680, // tn + 0x26c00000: 681, // to + 0x26c00127: 682, // to-TO + 0x26f00000: 683, // tr + 0x26f0005c: 684, // tr-CY + 0x26f00129: 685, // tr-TR + 0x27200000: 686, // ts + 0x27e00000: 687, // twq + 0x27e000d2: 688, // twq-NE + 0x28200000: 689, // tzm + 0x282000b8: 690, // tzm-MA + 0x28400000: 691, // ug + 0x28400052: 692, // ug-CN + 0x28600000: 693, // uk + 0x2860012e: 694, // uk-UA + 0x28c00000: 695, // ur + 0x28c00097: 696, // ur-IN + 0x28c000e6: 697, // ur-PK + 0x28d00000: 698, // uz + 0x28d05000: 699, // uz-Arab + 0x28d05023: 700, // uz-Arab-AF + 0x28d1e000: 701, // uz-Cyrl + 0x28d1e134: 702, // uz-Cyrl-UZ + 0x28d52000: 703, // uz-Latn + 0x28d52134: 704, // uz-Latn-UZ + 0x28e00000: 705, // vai + 0x28e52000: 706, // vai-Latn + 0x28e520b2: 707, // vai-Latn-LR + 0x28ed9000: 708, // vai-Vaii + 0x28ed90b2: 709, // vai-Vaii-LR + 0x28f00000: 710, // ve + 0x29200000: 711, // vi + 0x2920013b: 712, // vi-VN + 0x29700000: 713, // vo + 0x29700001: 714, // vo-001 + 0x29a00000: 715, // vun + 0x29a0012d: 716, // vun-TZ + 0x29b00000: 717, // wa + 0x29c00000: 718, // wae + 0x29c0004d: 719, // wae-CH + 0x2a400000: 720, // wo + 0x2a900000: 721, // xh + 0x2b100000: 722, // xog + 0x2b10012f: 723, // xog-UG + 0x2b700000: 724, // yav + 0x2b700051: 725, // yav-CM + 0x2b900000: 726, // yi + 0x2b900001: 727, // yi-001 + 0x2ba00000: 728, // yo + 0x2ba0003a: 729, // yo-BJ + 0x2ba000d4: 730, // yo-NG + 0x2bd00000: 731, // yue + 0x2bd0008b: 732, // yue-HK + 0x2c300000: 733, // zgh + 0x2c3000b8: 734, // zgh-MA + 0x2c400000: 735, // zh + 0x2c434000: 736, // zh-Hans + 0x2c434052: 737, // zh-Hans-CN + 0x2c43408b: 738, // zh-Hans-HK + 0x2c4340c4: 739, // zh-Hans-MO + 0x2c43410b: 740, // zh-Hans-SG + 0x2c435000: 741, // zh-Hant + 0x2c43508b: 742, // zh-Hant-HK + 0x2c4350c4: 743, // zh-Hant-MO + 0x2c43512c: 744, // zh-Hant-TW + 0x2c600000: 745, // zu + 0x2c60015e: 746, // zu-ZA +} + +// Total table size 4550 bytes (4KiB); checksum: B6D49547 diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go new file mode 100644 index 000000000..5c6dcbd94 --- /dev/null +++ b/vendor/golang.org/x/text/language/language.go @@ -0,0 +1,975 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run maketables.go gen_common.go -output tables.go +//go:generate go run gen_index.go + +// Package language implements BCP 47 language tags and related functionality. +// +// The Tag type, which is used to represent languages, is agnostic to the +// meaning of its subtags. Tags are not fully canonicalized to preserve +// information that may be valuable in certain contexts. As a consequence, two +// different tags may represent identical languages. +// +// Initializing language- or locale-specific components usually consists of +// two steps. The first step is to select a display language based on the +// preferred languages of the user and the languages supported by an application. +// The second step is to create the language-specific services based on +// this selection. Each is discussed in more details below. +// +// Matching preferred against supported languages +// +// An application may support various languages. This list is typically limited +// by the languages for which there exists translations of the user interface. +// Similarly, a user may provide a list of preferred languages which is limited +// by the languages understood by this user. +// An application should use a Matcher to find the best supported language based +// on the user's preferred list. +// Matchers are aware of the intricacies of equivalence between languages. +// The default Matcher implementation takes into account things such as +// deprecated subtags, legacy tags, and mutual intelligibility between scripts +// and languages. +// +// A Matcher for English, Australian English, Danish, and standard Mandarin can +// be defined as follows: +// +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) +// +// The following code selects the best match for someone speaking Spanish and +// Norwegian: +// +// preferred := []language.Tag{ language.Spanish, language.Norwegian } +// tag, _, _ := matcher.Match(preferred...) +// +// In this case, the best match is Danish, as Danish is sufficiently a match to +// Norwegian to not have to fall back to the default. +// See ParseAcceptLanguage on how to handle the Accept-Language HTTP header. +// +// Selecting language-specific services +// +// One should always use the Tag returned by the Matcher to create an instance +// of any of the language-specific services provided by the text repository. +// This prevents the mixing of languages, such as having a different language for +// messages and display names, as well as improper casing or sorting order for +// the selected language. +// Using the returned Tag also allows user-defined settings, such as collation +// order or numbering system to be transparently passed as options. +// +// If you have language-specific data in your application, however, it will in +// most cases suffice to use the index returned by the matcher to identify +// the user language. +// The following loop provides an alternative in case this is not sufficient: +// +// supported := map[language.Tag]data{ +// language.English: enData, +// language.MustParse("en-AU"): enAUData, +// language.Danish: daData, +// language.Chinese: zhData, +// } +// tag, _, _ := matcher.Match(preferred...) +// for ; tag != language.Und; tag = tag.Parent() { +// if v, ok := supported[tag]; ok { +// return v +// } +// } +// return enData // should not reach here +// +// Repeatedly taking the Parent of the tag returned by Match will eventually +// match one of the tags used to initialize the Matcher. +// +// Canonicalization +// +// By default, only legacy and deprecated tags are converted into their +// canonical equivalent. All other information is preserved. This approach makes +// the confidence scores more accurate and allows matchers to distinguish +// between variants that are otherwise lost. +// +// As a consequence, two tags that should be treated as identical according to +// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The +// Matchers will handle such distinctions, though, and are aware of the +// equivalence relations. The CanonType type can be used to alter the +// canonicalization form. +// +// References +// +// BCP 47 - Tags for Identifying Languages +// http://tools.ietf.org/html/bcp47 +package language + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // maxCoreSize is the maximum size of a BCP 47 tag without variants and + // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes. + maxCoreSize = 12 + + // max99thPercentileSize is a somewhat arbitrary buffer size that presumably + // is large enough to hold at least 99% of the BCP 47 tags. + max99thPercentileSize = 32 + + // maxSimpleUExtensionSize is the maximum size of a -u extension with one + // key-type pair. Equals len("-u-") + key (2) + dash + max value (8). + maxSimpleUExtensionSize = 14 +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag struct { + lang langID + region regionID + script scriptID + pVariant byte // offset in str, includes preceding '-' + pExt uint16 // offset of first extension, includes preceding '-' + + // str is the string representation of the Tag. It will only be used if the + // tag has variants or extensions. + str string +} + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + return Default.Make(s) +} + +// Make is a convenience wrapper for c.Parse that omits the error. +// In case of an error, a sensible default is returned. +func (c CanonType) Make(s string) Tag { + t, _ := c.Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +func (t Tag) Raw() (b Base, s Script, r Region) { + return Base{t.lang}, Script{t.script}, Region{t.region} +} + +// equalTags compares language, script and region subtags only. +func (t Tag) equalTags(a Tag) bool { + return t.lang == a.lang && t.script == a.script && t.region == a.region +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if int(t.pVariant) < len(t.str) { + return false + } + return t.equalTags(und) +} + +// private reports whether the Tag consists solely of a private use tag. +func (t Tag) private() bool { + return t.str != "" && t.pVariant == 0 +} + +// CanonType can be used to enable or disable various types of canonicalization. +type CanonType int + +const ( + // Replace deprecated base languages with their preferred replacements. + DeprecatedBase CanonType = 1 << iota + // Replace deprecated scripts with their preferred replacements. + DeprecatedScript + // Replace deprecated regions with their preferred replacements. + DeprecatedRegion + // Remove redundant scripts. + SuppressScript + // Normalize legacy encodings. This includes legacy languages defined in + // CLDR as well as bibliographic codes defined in ISO-639. + Legacy + // Map the dominant language of a macro language group to the macro language + // subtag. For example cmn -> zh. + Macro + // The CLDR flag should be used if full compatibility with CLDR is required. + // There are a few cases where language.Tag may differ from CLDR. To follow all + // of CLDR's suggestions, use All|CLDR. + CLDR + + // Raw can be used to Compose or Parse without Canonicalization. + Raw CanonType = 0 + + // Replace all deprecated tags with their preferred replacements. + Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion + + // All canonicalizations recommended by BCP 47. + BCP47 = Deprecated | SuppressScript + + // All canonicalizations. + All = BCP47 | Legacy | Macro + + // Default is the canonicalization used by Parse, Make and Compose. To + // preserve as much information as possible, canonicalizations that remove + // potentially valuable information are not included. The Matcher is + // designed to recognize similar tags that would be the same if + // they were canonicalized using All. + Default = Deprecated | Legacy + + canonLang = DeprecatedBase | Legacy | Macro + + // TODO: LikelyScript, LikelyRegion: suppress similar to ICU. +) + +// canonicalize returns the canonicalized equivalent of the tag and +// whether there was any change. +func (t Tag) canonicalize(c CanonType) (Tag, bool) { + if c == Raw { + return t, false + } + changed := false + if c&SuppressScript != 0 { + if t.lang < langNoIndexOffset && uint8(t.script) == suppressScript[t.lang] { + t.script = 0 + changed = true + } + } + if c&canonLang != 0 { + for { + if l, aliasType := normLang(t.lang); l != t.lang { + switch aliasType { + case langLegacy: + if c&Legacy != 0 { + if t.lang == _sh && t.script == 0 { + t.script = _Latn + } + t.lang = l + changed = true + } + case langMacro: + if c&Macro != 0 { + // We deviate here from CLDR. The mapping "nb" -> "no" + // qualifies as a typical Macro language mapping. However, + // for legacy reasons, CLDR maps "no", the macro language + // code for Norwegian, to the dominant variant "nb". This + // change is currently under consideration for CLDR as well. + // See http://unicode.org/cldr/trac/ticket/2698 and also + // http://unicode.org/cldr/trac/ticket/1790 for some of the + // practical implications. TODO: this check could be removed + // if CLDR adopts this change. + if c&CLDR == 0 || t.lang != _nb { + changed = true + t.lang = l + } + } + case langDeprecated: + if c&DeprecatedBase != 0 { + if t.lang == _mo && t.region == 0 { + t.region = _MD + } + t.lang = l + changed = true + // Other canonicalization types may still apply. + continue + } + } + } else if c&Legacy != 0 && t.lang == _no && c&CLDR != 0 { + t.lang = _nb + changed = true + } + break + } + } + if c&DeprecatedScript != 0 { + if t.script == _Qaai { + changed = true + t.script = _Zinh + } + } + if c&DeprecatedRegion != 0 { + if r := normRegion(t.region); r != 0 { + changed = true + t.region = r + } + } + return t, changed +} + +// Canonicalize returns the canonicalized equivalent of the tag. +func (c CanonType) Canonicalize(t Tag) (Tag, error) { + t, changed := t.canonicalize(c) + if changed { + t.remakeString() + } + return t, nil +} + +// Confidence indicates the level of certainty for a given return value. +// For example, Serbian may be written in Cyrillic or Latin script. +// The confidence level indicates whether a value was explicitly specified, +// whether it is typically the only possible value, or whether there is +// an ambiguity. +type Confidence int + +const ( + No Confidence = iota // full confidence that there was no match + Low // most likely value picked out of a set of alternatives + High // value is generally assumed to be the correct match + Exact // exact match or explicitly specified value +) + +var confName = []string{"No", "Low", "High", "Exact"} + +func (c Confidence) String() string { + return confName[c] +} + +// remakeString is used to update t.str in case lang, script or region changed. +// It is assumed that pExt and pVariant still point to the start of the +// respective parts. +func (t *Tag) remakeString() { + if t.str == "" { + return + } + extra := t.str[t.pVariant:] + if t.pVariant > 0 { + extra = extra[1:] + } + if t.equalTags(und) && strings.HasPrefix(extra, "x-") { + t.str = extra + t.pVariant = 0 + t.pExt = 0 + return + } + var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases. + b := buf[:t.genCoreBytes(buf[:])] + if extra != "" { + diff := len(b) - int(t.pVariant) + b = append(b, '-') + b = append(b, extra...) + t.pVariant = uint8(int(t.pVariant) + diff) + t.pExt = uint16(int(t.pExt) + diff) + } else { + t.pVariant = uint8(len(b)) + t.pExt = uint16(len(b)) + } + t.str = string(b) +} + +// genCoreBytes writes a string for the base languages, script and region tags +// to the given buffer and returns the number of bytes written. It will never +// write more than maxCoreSize bytes. +func (t *Tag) genCoreBytes(buf []byte) int { + n := t.lang.stringToBuf(buf[:]) + if t.script != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.script.String()) + } + if t.region != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.region.String()) + } + return n +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + if t.str != "" { + return t.str + } + if t.script == 0 && t.region == 0 { + return t.lang.String() + } + buf := [maxCoreSize]byte{} + return string(buf[:t.genCoreBytes(buf[:])]) +} + +// Base returns the base language of the language tag. If the base language is +// unspecified, an attempt will be made to infer it from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Base() (Base, Confidence) { + if t.lang != 0 { + return Base{t.lang}, Exact + } + c := High + if t.script == 0 && !(Region{t.region}).IsCountry() { + c = Low + } + if tag, err := addTags(t); err == nil && tag.lang != 0 { + return Base{tag.lang}, c + } + return Base{0}, No +} + +// Script infers the script for the language tag. If it was not explicitly given, it will infer +// a most likely candidate. +// If more than one script is commonly used for a language, the most likely one +// is returned with a low confidence indication. For example, it returns (Cyrl, Low) +// for Serbian. +// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined) +// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks +// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts. +// See http://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for +// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified. +// Note that an inferred script is never guaranteed to be the correct one. Latin is +// almost exclusively used for Afrikaans, but Arabic has been used for some texts +// in the past. Also, the script that is commonly used may change over time. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Script() (Script, Confidence) { + if t.script != 0 { + return Script{t.script}, Exact + } + sc, c := scriptID(_Zzzz), No + if t.lang < langNoIndexOffset { + if scr := scriptID(suppressScript[t.lang]); scr != 0 { + // Note: it is not always the case that a language with a suppress + // script value is only written in one script (e.g. kk, ms, pa). + if t.region == 0 { + return Script{scriptID(scr)}, High + } + sc, c = scr, High + } + } + if tag, err := addTags(t); err == nil { + if tag.script != sc { + sc, c = tag.script, Low + } + } else { + t, _ = (Deprecated | Macro).Canonicalize(t) + if tag, err := addTags(t); err == nil && tag.script != sc { + sc, c = tag.script, Low + } + } + return Script{sc}, c +} + +// Region returns the region for the language tag. If it was not explicitly given, it will +// infer a most likely candidate from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Region() (Region, Confidence) { + if t.region != 0 { + return Region{t.region}, Exact + } + if t, err := addTags(t); err == nil { + return Region{t.region}, Low // TODO: differentiate between high and low. + } + t, _ = (Deprecated | Macro).Canonicalize(t) + if tag, err := addTags(t); err == nil { + return Region{tag.region}, Low + } + return Region{_ZZ}, No // TODO: return world instead of undetermined? +} + +// Variant returns the variants specified explicitly for this language tag. +// or nil if no variant was specified. +func (t Tag) Variants() []Variant { + v := []Variant{} + if int(t.pVariant) < int(t.pExt) { + for x, str := "", t.str[t.pVariant:t.pExt]; str != ""; { + x, str = nextToken(str) + v = append(v, Variant{x}) + } + } + return v +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.str != "" { + // Strip the variants and extensions. + t, _ = Raw.Compose(t.Raw()) + if t.region == 0 && t.script != 0 && t.lang != 0 { + base, _ := addTags(Tag{lang: t.lang}) + if base.script == t.script { + return Tag{lang: t.lang} + } + } + return t + } + if t.lang != 0 { + if t.region != 0 { + maxScript := t.script + if maxScript == 0 { + max, _ := addTags(t) + maxScript = max.script + } + + for i := range parents { + if langID(parents[i].lang) == t.lang && scriptID(parents[i].maxScript) == maxScript { + for _, r := range parents[i].fromRegion { + if regionID(r) == t.region { + return Tag{ + lang: t.lang, + script: scriptID(parents[i].script), + region: regionID(parents[i].toRegion), + } + } + } + } + } + + // Strip the script if it is the default one. + base, _ := addTags(Tag{lang: t.lang}) + if base.script != maxScript { + return Tag{lang: t.lang, script: maxScript} + } + return Tag{lang: t.lang} + } else if t.script != 0 { + // The parent for an base-script pair with a non-default script is + // "und" instead of the base language. + base, _ := addTags(Tag{lang: t.lang}) + if base.script != t.script { + return und + } + return Tag{lang: t.lang} + } + } + return und +} + +// returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// Extension is a single BCP 47 extension. +type Extension struct { + s string +} + +// String returns the string representation of the extension, including the +// type tag. +func (e Extension) String() string { + return e.s +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (e Extension, err error) { + scan := makeScannerString(s) + var end int + if n := len(scan.token); n != 1 { + return Extension{}, errSyntax + } + scan.toLower(0, len(scan.b)) + end = parseExtension(&scan) + if end != len(s) { + return Extension{}, errSyntax + } + return Extension{string(scan.b)}, nil +} + +// Type returns the one-byte extension type of e. It returns 0 for the zero +// exception. +func (e Extension) Type() byte { + if e.s == "" { + return 0 + } + return e.s[0] +} + +// Tokens returns the list of tokens of e. +func (e Extension) Tokens() []string { + return strings.Split(e.s, "-") +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext Extension, ok bool) { + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + if ext[0] == x { + return Extension{ext}, true + } + } + return Extension{string(x)}, false +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []Extension { + e := []Extension{} + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + e = append(e, Extension{ext}) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +func (t Tag) TypeForKey(key string) string { + if start, end, _ := t.findTypeForKey(key); end != start { + return t.str[start:end] + } + return "" +} + +var ( + errPrivateUse = errors.New("cannot set a key on a private use tag") + errInvalidArguments = errors.New("invalid key or type") +) + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + if t.private() { + return t, errPrivateUse + } + if len(key) != 2 { + return t, errInvalidArguments + } + + // Remove the setting if value is "". + if value == "" { + start, end, _ := t.findTypeForKey(key) + if start != end { + // Remove key tag and leading '-'. + start -= 4 + + // Remove a possible empty extension. + if (end == len(t.str) || t.str[end+2] == '-') && t.str[start-2] == '-' { + start -= 2 + } + if start == int(t.pVariant) && end == len(t.str) { + t.str = "" + t.pVariant, t.pExt = 0, 0 + } else { + t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:]) + } + } + return t, nil + } + + if len(value) < 3 || len(value) > 8 { + return t, errInvalidArguments + } + + var ( + buf [maxCoreSize + maxSimpleUExtensionSize]byte + uStart int // start of the -u extension. + ) + + // Generate the tag string if needed. + if t.str == "" { + uStart = t.genCoreBytes(buf[:]) + buf[uStart] = '-' + uStart++ + } + + // Create new key-type pair and parse it to verify. + b := buf[uStart:] + copy(b, "u-") + copy(b[2:], key) + b[4] = '-' + b = b[:5+copy(b[5:], value)] + scan := makeScanner(b) + if parseExtensions(&scan); scan.err != nil { + return t, scan.err + } + + // Assemble the replacement string. + if t.str == "" { + t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1) + t.str = string(buf[:uStart+len(b)]) + } else { + s := t.str + start, end, hasExt := t.findTypeForKey(key) + if start == end { + if hasExt { + b = b[2:] + } + t.str = fmt.Sprintf("%s-%s%s", s[:start], b, s[end:]) + } else { + t.str = fmt.Sprintf("%s%s%s", s[:start], value, s[end:]) + } + } + return t, nil +} + +// findKeyAndType returns the start and end position for the type corresponding +// to key or the point at which to insert the key-value pair if the type +// wasn't found. The hasExt return value reports whether an -u extension was present. +// Note: the extensions are typically very small and are likely to contain +// only one key-type pair. +func (t Tag) findTypeForKey(key string) (start, end int, hasExt bool) { + p := int(t.pExt) + if len(key) != 2 || p == len(t.str) || p == 0 { + return p, p, false + } + s := t.str + + // Find the correct extension. + for p++; s[p] != 'u'; p++ { + if s[p] > 'u' { + p-- + return p, p, false + } + if p = nextExtension(s, p); p == len(s) { + return len(s), len(s), false + } + } + // Proceed to the hyphen following the extension name. + p++ + + // curKey is the key currently being processed. + curKey := "" + + // Iterate over keys until we get the end of a section. + for { + // p points to the hyphen preceding the current token. + if p3 := p + 3; s[p3] == '-' { + // Found a key. + // Check whether we just processed the key that was requested. + if curKey == key { + return start, p, true + } + // Set to the next key and continue scanning type tokens. + curKey = s[p+1 : p3] + if curKey > key { + return p, p, true + } + // Start of the type token sequence. + start = p + 4 + // A type is at least 3 characters long. + p += 7 // 4 + 3 + } else { + // Attribute or type, which is at least 3 characters long. + p += 4 + } + // p points past the third character of a type or attribute. + max := p + 5 // maximum length of token plus hyphen. + if len(s) < max { + max = len(s) + } + for ; p < max && s[p] != '-'; p++ { + } + // Bail if we have exhausted all tokens or if the next token starts + // a new extension. + if p == len(s) || s[p+2] == '-' { + if curKey == key { + return start, p, true + } + return p, p, true + } + } +} + +// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository. The index will change over time +// and should not be stored in persistent storage. Extensions, except for the +// 'va' type of the 'u' extension, are ignored. It will return 0, false if no +// compact tag exists, where 0 is the index for the root language (Und). +func CompactIndex(t Tag) (index int, ok bool) { + // TODO: perhaps give more frequent tags a lower index. + // TODO: we could make the indexes stable. This will excluded some + // possibilities for optimization, so don't do this quite yet. + b, s, r := t.Raw() + if len(t.str) > 0 { + if strings.HasPrefix(t.str, "x-") { + // We have no entries for user-defined tags. + return 0, false + } + if uint16(t.pVariant) != t.pExt { + // There are no tags with variants and an u-va type. + if t.TypeForKey("va") != "" { + return 0, false + } + t, _ = Raw.Compose(b, s, r, t.Variants()) + } else if _, ok := t.Extension('u'); ok { + // Strip all but the 'va' entry. + variant := t.TypeForKey("va") + t, _ = Raw.Compose(b, s, r) + t, _ = t.SetTypeForKey("va", variant) + } + if len(t.str) > 0 { + // We have some variants. + for i, s := range specialTags { + if s == t { + return i + 1, true + } + } + return 0, false + } + } + // No variants specified: just compare core components. + // The key has the form lllssrrr, where l, s, and r are nibbles for + // respectively the langID, scriptID, and regionID. + key := uint32(b.langID) << (8 + 12) + key |= uint32(s.scriptID) << 12 + key |= uint32(r.regionID) + x, ok := coreTags[key] + return int(x), ok +} + +// Base is an ISO 639 language code, used for encoding the base language +// of a language tag. +type Base struct { + langID +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (Base, error) { + if n := len(s); n < 2 || 3 < n { + return Base{}, errSyntax + } + var buf [3]byte + l, err := getLangID(buf[:copy(buf[:], s)]) + return Base{l}, err +} + +// Script is a 4-letter ISO 15924 code for representing scripts. +// It is idiomatically represented in title case. +type Script struct { + scriptID +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (Script, error) { + if len(s) != 4 { + return Script{}, errSyntax + } + var buf [4]byte + sc, err := getScriptID(script, buf[:copy(buf[:], s)]) + return Script{sc}, err +} + +// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions. +type Region struct { + regionID +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + rid, err := getRegionM49(r) + return Region{rid}, err +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (Region, error) { + if n := len(s); n < 2 || 3 < n { + return Region{}, errSyntax + } + var buf [3]byte + r, err := getRegionID(buf[:copy(buf[:], s)]) + return Region{r}, err +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + if r.regionID == 0 || r.IsGroup() || r.IsPrivateUse() && r.regionID != _XK { + return false + } + return true +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + if r.regionID == 0 { + return false + } + return int(regionInclusion[r.regionID]) < len(regionContainment) +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + return r.regionID.contains(c.regionID) +} + +func (r regionID) contains(c regionID) bool { + if r == c { + return true + } + g := regionInclusion[r] + if g >= nRegionGroups { + return false + } + m := regionContainment[g] + + d := regionInclusion[c] + b := regionInclusionBits[d] + + // A contained country may belong to multiple disjoint groups. Matching any + // of these indicates containment. If the contained region is a group, it + // must strictly be a subset. + if d >= nRegionGroups { + return b&m != 0 + } + return b&^m == 0 +} + +var errNoTLD = errors.New("language: region is not a valid ccTLD") + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the + // difference between ISO 3166-1 and IANA ccTLD. + if r.regionID == _GB { + r = Region{_UK} + } + if (r.typ() & ccTLD) == 0 { + return Region{}, errNoTLD + } + return r, nil +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + if cr := normRegion(r.regionID); cr != 0 { + return Region{cr} + } + return r +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + variant string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (Variant, error) { + s = strings.ToLower(s) + if _, ok := variantIndex[s]; ok { + return Variant{s}, nil + } + return Variant{}, mkErrInvalid([]byte(s)) +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.variant +} diff --git a/vendor/golang.org/x/text/language/lookup.go b/vendor/golang.org/x/text/language/lookup.go new file mode 100644 index 000000000..1d80ac370 --- /dev/null +++ b/vendor/golang.org/x/text/language/lookup.go @@ -0,0 +1,396 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "golang.org/x/text/internal/tag" +) + +// findIndex tries to find the given tag in idx and returns a standardized error +// if it could not be found. +func findIndex(idx tag.Index, key []byte, form string) (index int, err error) { + if !tag.FixCase(form, key) { + return 0, errSyntax + } + i := idx.Index(key) + if i == -1 { + return 0, mkErrInvalid(key) + } + return i, nil +} + +func searchUint(imap []uint16, key uint16) int { + return sort.Search(len(imap), func(i int) bool { + return imap[i] >= key + }) +} + +type langID uint16 + +// getLangID returns the langID of s if s is a canonical subtag +// or langUnknown if s is not a canonical subtag. +func getLangID(s []byte) (langID, error) { + if len(s) == 2 { + return getLangISO2(s) + } + return getLangISO3(s) +} + +// mapLang returns the mapped langID of id according to mapping m. +func normLang(id langID) (langID, langAliasType) { + k := sort.Search(len(langAliasMap), func(i int) bool { + return langAliasMap[i].from >= uint16(id) + }) + if k < len(langAliasMap) && langAliasMap[k].from == uint16(id) { + return langID(langAliasMap[k].to), langAliasTypes[k] + } + return id, langAliasTypeUnknown +} + +// getLangISO2 returns the langID for the given 2-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO2(s []byte) (langID, error) { + if !tag.FixCase("zz", s) { + return 0, errSyntax + } + if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 { + return langID(i), nil + } + return 0, mkErrInvalid(s) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s []byte) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +// getLangISO3 returns the langID for the given 3-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO3(s []byte) (langID, error) { + if tag.FixCase("und", s) { + // first try to match canonical 3-letter entries + for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) { + if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] { + // We treat "und" as special and always translate it to "unspecified". + // Note that ZZ and Zzzz are private use and are not treated as + // unspecified by default. + id := langID(i) + if id == nonCanonicalUnd { + return 0, nil + } + return id, nil + } + } + if i := altLangISO3.Index(s); i != -1 { + return langID(altLangIndex[altLangISO3.Elem(i)[3]]), nil + } + n := strToInt(s) + if langNoIndex[n/8]&(1<<(n%8)) != 0 { + return langID(n) + langNoIndexOffset, nil + } + // Check for non-canonical uses of ISO3. + for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) { + if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] { + return langID(i), nil + } + } + return 0, mkErrInvalid(s) + } + return 0, errSyntax +} + +// stringToBuf writes the string to b and returns the number of bytes +// written. cap(b) must be >= 3. +func (id langID) stringToBuf(b []byte) int { + if id >= langNoIndexOffset { + intToStr(uint(id)-langNoIndexOffset, b[:3]) + return 3 + } else if id == 0 { + return copy(b, "und") + } + l := lang[id<<2:] + if l[3] == 0 { + return copy(b, l[:3]) + } + return copy(b, l[:2]) +} + +// String returns the BCP 47 representation of the langID. +// Use b as variable name, instead of id, to ensure the variable +// used is consistent with that of Base in which this type is embedded. +func (b langID) String() string { + if b == 0 { + return "und" + } else if b >= langNoIndexOffset { + b -= langNoIndexOffset + buf := [3]byte{} + intToStr(uint(b), buf[:]) + return string(buf[:]) + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } + return l[:2] +} + +// ISO3 returns the ISO 639-3 language code. +func (b langID) ISO3() string { + if b == 0 || b >= langNoIndexOffset { + return b.String() + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } else if l[2] == 0 { + return altLangISO3.Elem(int(l[3]))[:3] + } + // This allocation will only happen for 3-letter ISO codes + // that are non-canonical BCP 47 language identifiers. + return l[0:1] + l[2:4] +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b langID) IsPrivateUse() bool { + return langPrivateStart <= b && b <= langPrivateEnd +} + +type regionID uint16 + +// getRegionID returns the region id for s if s is a valid 2-letter region code +// or unknownRegion. +func getRegionID(s []byte) (regionID, error) { + if len(s) == 3 { + if isAlpha(s[0]) { + return getRegionISO3(s) + } + if i, err := strconv.ParseUint(string(s), 10, 10); err == nil { + return getRegionM49(int(i)) + } + } + return getRegionISO2(s) +} + +// getRegionISO2 returns the regionID for the given 2-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO2(s []byte) (regionID, error) { + i, err := findIndex(regionISO, s, "ZZ") + if err != nil { + return 0, err + } + return regionID(i) + isoRegionOffset, nil +} + +// getRegionISO3 returns the regionID for the given 3-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO3(s []byte) (regionID, error) { + if tag.FixCase("ZZZ", s) { + for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) { + if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] { + return regionID(i) + isoRegionOffset, nil + } + } + for i := 0; i < len(altRegionISO3); i += 3 { + if tag.Compare(altRegionISO3[i:i+3], s) == 0 { + return regionID(altRegionIDs[i/3]), nil + } + } + return 0, mkErrInvalid(s) + } + return 0, errSyntax +} + +func getRegionM49(n int) (regionID, error) { + if 0 < n && n <= 999 { + const ( + searchBits = 7 + regionBits = 9 + regionMask = 1<<regionBits - 1 + ) + idx := n >> searchBits + buf := fromM49[m49Index[idx]:m49Index[idx+1]] + val := uint16(n) << regionBits // we rely on bits shifting out + i := sort.Search(len(buf), func(i int) bool { + return buf[i] >= val + }) + if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val { + return regionID(r & regionMask), nil + } + } + var e ValueError + fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n) + return 0, e +} + +// normRegion returns a region if r is deprecated or 0 otherwise. +// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ). +// TODO: consider mapping split up regions to new most populous one (like CLDR). +func normRegion(r regionID) regionID { + m := regionOldMap + k := sort.Search(len(m), func(i int) bool { + return m[i].from >= uint16(r) + }) + if k < len(m) && m[k].from == uint16(r) { + return regionID(m[k].to) + } + return 0 +} + +const ( + iso3166UserAssigned = 1 << iota + ccTLD + bcp47Region +) + +func (r regionID) typ() byte { + return regionTypes[r] +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r regionID) String() string { + if r < isoRegionOffset { + if r == 0 { + return "ZZ" + } + return fmt.Sprintf("%03d", r.M49()) + } + r -= isoRegionOffset + return regionISO.Elem(int(r))[:2] +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r regionID) ISO3() string { + if r < isoRegionOffset { + return "ZZZ" + } + r -= isoRegionOffset + reg := regionISO.Elem(int(r)) + switch reg[2] { + case 0: + return altRegionISO3[reg[3]:][:3] + case ' ': + return "ZZZ" + } + return reg[0:1] + reg[2:4] +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r regionID) M49() int { + return int(m49[r]) +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r regionID) IsPrivateUse() bool { + return r.typ()&iso3166UserAssigned != 0 +} + +type scriptID uint8 + +// getScriptID returns the script id for string s. It assumes that s +// is of the format [A-Z][a-z]{3}. +func getScriptID(idx tag.Index, s []byte) (scriptID, error) { + i, err := findIndex(idx, s, "Zzzz") + return scriptID(i), err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s scriptID) String() string { + if s == 0 { + return "Zzzz" + } + return script.Elem(int(s)) +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s scriptID) IsPrivateUse() bool { + return _Qaaa <= s && s <= _Qabx +} + +const ( + maxAltTaglen = len("en-US-POSIX") + maxLen = maxAltTaglen +) + +var ( + // grandfatheredMap holds a mapping from legacy and grandfathered tags to + // their base language or index to more elaborate tag. + grandfatheredMap = map[[maxLen]byte]int16{ + [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban + [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami + [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn + [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak + [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon + [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux + [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo + [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn + [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao + [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay + [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu + [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok + [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL + [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE + [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu + [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan + [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang + + // Grandfathered tags with no modern replacement will be converted as + // follows: + [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish + [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed + [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default + [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian + [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min + + // CLDR-specific tag. + [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root + [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX" + } + + altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102} + + altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix" +) + +func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) { + if v, ok := grandfatheredMap[s]; ok { + if v < 0 { + return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true + } + t.lang = langID(v) + return t, true + } + return t, false +} diff --git a/vendor/golang.org/x/text/language/maketables.go b/vendor/golang.org/x/text/language/maketables.go new file mode 100644 index 000000000..2cc995b37 --- /dev/null +++ b/vendor/golang.org/x/text/language/maketables.go @@ -0,0 +1,1635 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Language tag table generator. +// Data read from the web. + +package main + +import ( + "bufio" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/tag" + "golang.org/x/text/unicode/cldr" +) + +var ( + test = flag.Bool("test", + false, + "test existing tables; can be used to compare web data with package data.") + outputFile = flag.String("output", + "tables.go", + "output file for generated tables") +) + +var comment = []string{ + ` +lang holds an alphabetically sorted list of ISO-639 language identifiers. +All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +For 2-byte language identifiers, the two successive bytes have the following meaning: + - if the first letter of the 2- and 3-letter ISO codes are the same: + the second and third letter of the 3-letter ISO code. + - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +For 3-byte language identifiers the 4th byte is 0.`, + ` +langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +in lookup tables. The language ids for these language codes are derived directly +from the letters and are not consecutive.`, + ` +altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +to 2-letter language codes that cannot be derived using the method described above. +Each 3-letter code is followed by its 1-byte langID.`, + ` +altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, + ` +langAliasMap maps langIDs to their suggested replacements.`, + ` +script is an alphabetically sorted list of ISO 15924 codes. The index +of the script in the string, divided by 4, is the internal scriptID.`, + ` +isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +the UN.M49 codes used for groups.)`, + ` +regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +Each 2-letter codes is followed by two bytes with the following meaning: + - [A-Z}{2}: the first letter of the 2-letter code plus these two + letters form the 3-letter ISO code. + - 0, n: index into altRegionISO3.`, + ` +regionTypes defines the status of a region for various standards.`, + ` +m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +codes indicating collections of regions.`, + ` +m49Index gives indexes into fromM49 based on the three most significant bits +of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in + fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +The region code is stored in the 9 lsb of the indexed value.`, + ` +fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, + ` +altRegionISO3 holds a list of 3-letter region codes that cannot be +mapped to 2-letter codes using the default algorithm. This is a short list.`, + ` +altRegionIDs holds a list of regionIDs the positions of which match those +of the 3-letter ISO codes in altRegionISO3.`, + ` +variantNumSpecialized is the number of specialized variants in variants.`, + ` +suppressScript is an index from langID to the dominant script for that language, +if it exists. If a script is given, it should be suppressed from the language tag.`, + ` +likelyLang is a lookup table, indexed by langID, for the most likely +scripts and regions given incomplete information. If more entries exist for a +given language, region and script are the index and size respectively +of the list in likelyLangList.`, + ` +likelyLangList holds lists info associated with likelyLang.`, + ` +likelyRegion is a lookup table, indexed by regionID, for the most likely +languages and scripts given incomplete information. If more entries exist +for a given regionID, lang and script are the index and size respectively +of the list in likelyRegionList. +TODO: exclude containers and user-definable regions from the list.`, + ` +likelyRegionList holds lists info associated with likelyRegion.`, + ` +likelyScript is a lookup table, indexed by scriptID, for the most likely +languages and regions given a script.`, + ` +matchLang holds pairs of langIDs of base languages that are typically +mutually intelligible. Each pair is associated with a confidence and +whether the intelligibility goes one or both ways.`, + ` +matchScript holds pairs of scriptIDs where readers of one script +can typically also read the other. Each is associated with a confidence.`, + ` +nRegionGroups is the number of region groups.`, + ` +regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +where each set holds all groupings that are directly connected in a region +containment graph.`, + ` +regionInclusionBits is an array of bit vectors where every vector represents +a set of region groupings. These sets are used to compute the distance +between two regions for the purpose of language matching.`, + ` +regionInclusionNext marks, for each entry in regionInclusionBits, the set of +all groups that are reachable from the groups set in the respective entry.`, +} + +// TODO: consider changing some of these structures to tries. This can reduce +// memory, but may increase the need for memory allocations. This could be +// mitigated if we can piggyback on language tags for common cases. + +func failOnError(e error) { + if e != nil { + log.Panic(e) + } +} + +type setType int + +const ( + Indexed setType = 1 + iota // all elements must be of same size + Linear +) + +type stringSet struct { + s []string + sorted, frozen bool + + // We often need to update values after the creation of an index is completed. + // We include a convenience map for keeping track of this. + update map[string]string + typ setType // used for checking. +} + +func (ss *stringSet) clone() stringSet { + c := *ss + c.s = append([]string(nil), c.s...) + return c +} + +func (ss *stringSet) setType(t setType) { + if ss.typ != t && ss.typ != 0 { + log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) + } +} + +// parse parses a whitespace-separated string and initializes ss with its +// components. +func (ss *stringSet) parse(s string) { + scan := bufio.NewScanner(strings.NewReader(s)) + scan.Split(bufio.ScanWords) + for scan.Scan() { + ss.add(scan.Text()) + } +} + +func (ss *stringSet) assertChangeable() { + if ss.frozen { + log.Panic("attempt to modify a frozen stringSet") + } +} + +func (ss *stringSet) add(s string) { + ss.assertChangeable() + ss.s = append(ss.s, s) + ss.sorted = ss.frozen +} + +func (ss *stringSet) freeze() { + ss.compact() + ss.frozen = true +} + +func (ss *stringSet) compact() { + if ss.sorted { + return + } + a := ss.s + sort.Strings(a) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + a[k+1] = a[i] + k++ + } + } + ss.s = a[:k+1] + ss.sorted = ss.frozen +} + +type funcSorter struct { + fn func(a, b string) bool + sort.StringSlice +} + +func (s funcSorter) Less(i, j int) bool { + return s.fn(s.StringSlice[i], s.StringSlice[j]) +} + +func (ss *stringSet) sortFunc(f func(a, b string) bool) { + ss.compact() + sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) +} + +func (ss *stringSet) remove(s string) { + ss.assertChangeable() + if i, ok := ss.find(s); ok { + copy(ss.s[i:], ss.s[i+1:]) + ss.s = ss.s[:len(ss.s)-1] + } +} + +func (ss *stringSet) replace(ol, nu string) { + ss.s[ss.index(ol)] = nu + ss.sorted = ss.frozen +} + +func (ss *stringSet) index(s string) int { + ss.setType(Indexed) + i, ok := ss.find(s) + if !ok { + if i < len(ss.s) { + log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) + } + log.Panicf("find: item %q is not in list", s) + + } + return i +} + +func (ss *stringSet) find(s string) (int, bool) { + ss.compact() + i := sort.SearchStrings(ss.s, s) + return i, i != len(ss.s) && ss.s[i] == s +} + +func (ss *stringSet) slice() []string { + ss.compact() + return ss.s +} + +func (ss *stringSet) updateLater(v, key string) { + if ss.update == nil { + ss.update = map[string]string{} + } + ss.update[v] = key +} + +// join joins the string and ensures that all entries are of the same length. +func (ss *stringSet) join() string { + ss.setType(Indexed) + n := len(ss.s[0]) + for _, s := range ss.s { + if len(s) != n { + log.Panicf("join: not all entries are of the same length: %q", s) + } + } + ss.s = append(ss.s, strings.Repeat("\xff", n)) + return strings.Join(ss.s, "") +} + +// ianaEntry holds information for an entry in the IANA Language Subtag Repository. +// All types use the same entry. +// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various +// fields. +type ianaEntry struct { + typ string + description []string + scope string + added string + preferred string + deprecated string + suppressScript string + macro string + prefix []string +} + +type builder struct { + w *gen.CodeWriter + hw io.Writer // MultiWriter for w and w.Hash + data *cldr.CLDR + supp *cldr.SupplementalData + + // indices + locale stringSet // common locales + lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data + langNoIndex stringSet // 3-letter ISO codes with no associated data + script stringSet // 4-letter ISO codes + region stringSet // 2-letter ISO or 3-digit UN M49 codes + variant stringSet // 4-8-alphanumeric variant code. + + // Region codes that are groups with their corresponding group IDs. + groups map[int]index + + // langInfo + registry map[string]*ianaEntry +} + +type index uint + +func newBuilder(w *gen.CodeWriter) *builder { + r := gen.OpenCLDRCoreZip() + defer r.Close() + d := &cldr.Decoder{} + data, err := d.DecodeZip(r) + failOnError(err) + b := builder{ + w: w, + hw: io.MultiWriter(w, w.Hash), + data: data, + supp: data.Supplemental(), + } + b.parseRegistry() + return &b +} + +func (b *builder) parseRegistry() { + r := gen.OpenIANAFile("assignments/language-subtag-registry") + defer r.Close() + b.registry = make(map[string]*ianaEntry) + + scan := bufio.NewScanner(r) + scan.Split(bufio.ScanWords) + var record *ianaEntry + for more := scan.Scan(); more; { + key := scan.Text() + more = scan.Scan() + value := scan.Text() + switch key { + case "Type:": + record = &ianaEntry{typ: value} + case "Subtag:", "Tag:": + if s := strings.SplitN(value, "..", 2); len(s) > 1 { + for a := s[0]; a <= s[1]; a = inc(a) { + b.addToRegistry(a, record) + } + } else { + b.addToRegistry(value, record) + } + case "Suppress-Script:": + record.suppressScript = value + case "Added:": + record.added = value + case "Deprecated:": + record.deprecated = value + case "Macrolanguage:": + record.macro = value + case "Preferred-Value:": + record.preferred = value + case "Prefix:": + record.prefix = append(record.prefix, value) + case "Scope:": + record.scope = value + case "Description:": + buf := []byte(value) + for more = scan.Scan(); more; more = scan.Scan() { + b := scan.Bytes() + if b[0] == '%' || b[len(b)-1] == ':' { + break + } + buf = append(buf, ' ') + buf = append(buf, b...) + } + record.description = append(record.description, string(buf)) + continue + default: + continue + } + more = scan.Scan() + } + if scan.Err() != nil { + log.Panic(scan.Err()) + } +} + +func (b *builder) addToRegistry(key string, entry *ianaEntry) { + if info, ok := b.registry[key]; ok { + if info.typ != "language" || entry.typ != "extlang" { + log.Fatalf("parseRegistry: tag %q already exists", key) + } + } else { + b.registry[key] = entry + } +} + +var commentIndex = make(map[string]string) + +func init() { + for _, s := range comment { + key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) + commentIndex[key] = s + } +} + +func (b *builder) comment(name string) { + if s := commentIndex[name]; len(s) > 0 { + b.w.WriteComment(s) + } else { + fmt.Fprintln(b.w) + } +} + +func (b *builder) pf(f string, x ...interface{}) { + fmt.Fprintf(b.hw, f, x...) + fmt.Fprint(b.hw, "\n") +} + +func (b *builder) p(x ...interface{}) { + fmt.Fprintln(b.hw, x...) +} + +func (b *builder) addSize(s int) { + b.w.Size += s + b.pf("// Size: %d bytes", s) +} + +func (b *builder) writeConst(name string, x interface{}) { + b.comment(name) + b.w.WriteConst(name, x) +} + +// writeConsts computes f(v) for all v in values and writes the results +// as constants named _v to a single constant block. +func (b *builder) writeConsts(f func(string) int, values ...string) { + b.pf("const (") + for _, v := range values { + b.pf("\t_%s = %v", v, f(v)) + } + b.pf(")") +} + +// writeType writes the type of the given value, which must be a struct. +func (b *builder) writeType(value interface{}) { + b.comment(reflect.TypeOf(value).Name()) + b.w.WriteType(value) +} + +func (b *builder) writeSlice(name string, ss interface{}) { + b.writeSliceAddSize(name, 0, ss) +} + +func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { + b.comment(name) + b.w.Size += extraSize + v := reflect.ValueOf(ss) + t := v.Type().Elem() + b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) + + fmt.Fprintf(b.w, "var %s = ", name) + b.w.WriteArray(ss) + b.p() +} + +type fromTo struct { + from, to uint16 +} + +func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { + ss.sortFunc(func(a, b string) bool { + return index(a) < index(b) + }) + m := []fromTo{} + for _, s := range ss.s { + m = append(m, fromTo{index(s), index(ss.update[s])}) + } + b.writeSlice(name, m) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s string) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +func (b *builder) writeBitVector(name string, ss []string) { + vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) + for _, s := range ss { + v := strToInt(s) + vec[v/8] |= 1 << (v % 8) + } + b.writeSlice(name, vec) +} + +// TODO: convert this type into a list or two-stage trie. +func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { + b.comment(name) + v := reflect.ValueOf(m) + sz := v.Len() * (2 + int(v.Type().Key().Size())) + for _, k := range m { + sz += len(k) + } + b.addSize(sz) + keys := []string{} + b.pf(`var %s = map[string]uint16{`, name) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + b.pf("\t%q: %v,", k, f(m[k])) + } + b.p("}") +} + +func (b *builder) writeMap(name string, m interface{}) { + b.comment(name) + v := reflect.ValueOf(m) + sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) + b.addSize(sz) + f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { + return strings.IndexRune("{}, ", r) != -1 + }) + sort.Strings(f[1:]) + b.pf(`var %s = %s{`, name, f[0]) + for _, kv := range f[1:] { + b.pf("\t%s,", kv) + } + b.p("}") +} + +func (b *builder) langIndex(s string) uint16 { + if s == "und" { + return 0 + } + if i, ok := b.lang.find(s); ok { + return uint16(i) + } + return uint16(strToInt(s)) + uint16(len(b.lang.s)) +} + +// inc advances the string to its lexicographical successor. +func inc(s string) string { + const maxTagLength = 4 + var buf [maxTagLength]byte + intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) + for i := 0; i < len(s); i++ { + if s[i] <= 'Z' { + buf[i] -= 'a' - 'A' + } + } + return string(buf[:len(s)]) +} + +func (b *builder) parseIndices() { + meta := b.supp.Metadata + + for k, v := range b.registry { + var ss *stringSet + switch v.typ { + case "language": + if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { + b.lang.add(k) + continue + } else { + ss = &b.langNoIndex + } + case "region": + ss = &b.region + case "script": + ss = &b.script + case "variant": + ss = &b.variant + default: + continue + } + ss.add(k) + } + // Include any language for which there is data. + for _, lang := range b.data.Locales() { + if x := b.data.RawLDML(lang); false || + x.LocaleDisplayNames != nil || + x.Characters != nil || + x.Delimiters != nil || + x.Measurement != nil || + x.Dates != nil || + x.Numbers != nil || + x.Units != nil || + x.ListPatterns != nil || + x.Collations != nil || + x.Segmentations != nil || + x.Rbnf != nil || + x.Annotations != nil || + x.Metadata != nil { + + from := strings.Split(lang, "_") + if lang := from[0]; lang != "root" { + b.lang.add(lang) + } + } + } + // Include locales for plural rules, which uses a different structure. + for _, plurals := range b.data.Supplemental().Plurals { + for _, rules := range plurals.PluralRules { + for _, lang := range strings.Split(rules.Locales, " ") { + if lang = strings.Split(lang, "_")[0]; lang != "root" { + b.lang.add(lang) + } + } + } + } + // Include languages in likely subtags. + for _, m := range b.supp.LikelySubtags.LikelySubtag { + from := strings.Split(m.From, "_") + b.lang.add(from[0]) + } + // Include ISO-639 alpha-3 bibliographic entries. + for _, a := range meta.Alias.LanguageAlias { + if a.Reason == "bibliographic" { + b.langNoIndex.add(a.Type) + } + } + // Include regions in territoryAlias (not all are in the IANA registry!) + for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { + if len(reg.Type) == 2 { + b.region.add(reg.Type) + } + } + + for _, s := range b.lang.s { + if len(s) == 3 { + b.langNoIndex.remove(s) + } + } + b.writeConst("numLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) + b.writeConst("numScripts", len(b.script.slice())) + b.writeConst("numRegions", len(b.region.slice())) + + // Add dummy codes at the start of each list to represent "unspecified". + b.lang.add("---") + b.script.add("----") + b.region.add("---") + + // common locales + b.locale.parse(meta.DefaultContent.Locales) +} + +func (b *builder) computeRegionGroups() { + b.groups = make(map[int]index) + + // Create group indices. + for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. + b.groups[i] = index(len(b.groups)) + } + for _, g := range b.supp.TerritoryContainment.Group { + group := b.region.index(g.Type) + if _, ok := b.groups[group]; !ok { + b.groups[group] = index(len(b.groups)) + } + } + if len(b.groups) > 32 { + log.Fatalf("only 32 groups supported, found %d", len(b.groups)) + } + b.writeConst("nRegionGroups", len(b.groups)) +} + +var langConsts = []string{ + "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", + "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", + "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", + "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", + "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", + "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", + + // constants for grandfathered tags (if not already defined) + "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", + "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", +} + +// writeLanguage generates all tables needed for language canonicalization. +func (b *builder) writeLanguage() { + meta := b.supp.Metadata + + b.writeConst("nonCanonicalUnd", b.lang.index("und")) + b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) + b.writeConst("langPrivateStart", b.langIndex("qaa")) + b.writeConst("langPrivateEnd", b.langIndex("qtz")) + + // Get language codes that need to be mapped (overlong 3-letter codes, + // deprecated 2-letter codes, legacy and grandfathered tags.) + langAliasMap := stringSet{} + aliasTypeMap := map[string]langAliasType{} + + // altLangISO3 get the alternative ISO3 names that need to be mapped. + altLangISO3 := stringSet{} + // Add dummy start to avoid the use of index 0. + altLangISO3.add("---") + altLangISO3.updateLater("---", "aa") + + lang := b.lang.clone() + for _, a := range meta.Alias.LanguageAlias { + if a.Replacement == "" { + a.Replacement = "und" + } + // TODO: support mapping to tags + repl := strings.SplitN(a.Replacement, "_", 2)[0] + if a.Reason == "overlong" { + if len(a.Replacement) == 2 && len(a.Type) == 3 { + lang.updateLater(a.Replacement, a.Type) + } + } else if len(a.Type) <= 3 { + switch a.Reason { + case "macrolanguage": + aliasTypeMap[a.Type] = langMacro + case "deprecated": + // handled elsewhere + continue + case "bibliographic", "legacy": + if a.Type == "no" { + continue + } + aliasTypeMap[a.Type] = langLegacy + default: + log.Fatalf("new %s alias: %s", a.Reason, a.Type) + } + langAliasMap.add(a.Type) + langAliasMap.updateLater(a.Type, repl) + } + } + // Manually add the mapping of "nb" (Norwegian) to its macro language. + // This can be removed if CLDR adopts this change. + langAliasMap.add("nb") + langAliasMap.updateLater("nb", "no") + aliasTypeMap["nb"] = langMacro + + for k, v := range b.registry { + // Also add deprecated values for 3-letter ISO codes, which CLDR omits. + if v.typ == "language" && v.deprecated != "" && v.preferred != "" { + langAliasMap.add(k) + langAliasMap.updateLater(k, v.preferred) + aliasTypeMap[k] = langDeprecated + } + } + // Fix CLDR mappings. + lang.updateLater("tl", "tgl") + lang.updateLater("sh", "hbs") + lang.updateLater("mo", "mol") + lang.updateLater("no", "nor") + lang.updateLater("tw", "twi") + lang.updateLater("nb", "nob") + lang.updateLater("ak", "aka") + + // Ensure that each 2-letter code is matched with a 3-letter code. + for _, v := range lang.s[1:] { + s, ok := lang.update[v] + if !ok { + if s, ok = lang.update[langAliasMap.update[v]]; !ok { + continue + } + lang.update[v] = s + } + if v[0] != s[0] { + altLangISO3.add(s) + altLangISO3.updateLater(s, v) + } + } + + // Complete canonialized language tags. + lang.freeze() + for i, v := range lang.s { + // We can avoid these manual entries by using the IANI registry directly. + // Seems easier to update the list manually, as changes are rare. + // The panic in this loop will trigger if we miss an entry. + add := "" + if s, ok := lang.update[v]; ok { + if s[0] == v[0] { + add = s[1:] + } else { + add = string([]byte{0, byte(altLangISO3.index(s))}) + } + } else if len(v) == 3 { + add = "\x00" + } else { + log.Panicf("no data for long form of %q", v) + } + lang.s[i] += add + } + b.writeConst("lang", tag.Index(lang.join())) + + b.writeConst("langNoIndexOffset", len(b.lang.s)) + + // space of all valid 3-letter language identifiers. + b.writeBitVector("langNoIndex", b.langNoIndex.slice()) + + altLangIndex := []uint16{} + for i, s := range altLangISO3.slice() { + altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) + if i > 0 { + idx := b.lang.index(altLangISO3.update[s]) + altLangIndex = append(altLangIndex, uint16(idx)) + } + } + b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) + b.writeSlice("altLangIndex", altLangIndex) + + b.writeSortedMap("langAliasMap", &langAliasMap, b.langIndex) + types := make([]langAliasType, len(langAliasMap.s)) + for i, s := range langAliasMap.s { + types[i] = aliasTypeMap[s] + } + b.writeSlice("langAliasTypes", types) +} + +var scriptConsts = []string{ + "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", + "Zzzz", +} + +func (b *builder) writeScript() { + b.writeConsts(b.script.index, scriptConsts...) + b.writeConst("script", tag.Index(b.script.join())) + + supp := make([]uint8, len(b.lang.slice())) + for i, v := range b.lang.slice()[1:] { + if sc := b.registry[v].suppressScript; sc != "" { + supp[i+1] = uint8(b.script.index(sc)) + } + } + b.writeSlice("suppressScript", supp) + + // There is only one deprecated script in CLDR. This value is hard-coded. + // We check here if the code must be updated. + for _, a := range b.supp.Metadata.Alias.ScriptAlias { + if a.Type != "Qaai" { + log.Panicf("unexpected deprecated stript %q", a.Type) + } + } +} + +func parseM49(s string) int16 { + if len(s) == 0 { + return 0 + } + v, err := strconv.ParseUint(s, 10, 10) + failOnError(err) + return int16(v) +} + +var regionConsts = []string{ + "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", + "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. +} + +func (b *builder) writeRegion() { + b.writeConsts(b.region.index, regionConsts...) + + isoOffset := b.region.index("AA") + m49map := make([]int16, len(b.region.slice())) + fromM49map := make(map[int16]int) + altRegionISO3 := "" + altRegionIDs := []uint16{} + + b.writeConst("isoRegionOffset", isoOffset) + + // 2-letter region lookup and mapping to numeric codes. + regionISO := b.region.clone() + regionISO.s = regionISO.s[isoOffset:] + regionISO.sorted = false + + regionTypes := make([]byte, len(b.region.s)) + + // Is the region valid BCP 47? + for s, e := range b.registry { + if len(s) == 2 && s == strings.ToUpper(s) { + i := b.region.index(s) + for _, d := range e.description { + if strings.Contains(d, "Private use") { + regionTypes[i] = iso3166UserAssgined + } + } + regionTypes[i] |= bcp47Region + } + } + + // Is the region a valid ccTLD? + r := gen.OpenIANAFile("domains/root/db") + defer r.Close() + + buf, err := ioutil.ReadAll(r) + failOnError(err) + re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) + for _, m := range re.FindAllSubmatch(buf, -1) { + i := b.region.index(strings.ToUpper(string(m[1]))) + regionTypes[i] |= ccTLD + } + + b.writeSlice("regionTypes", regionTypes) + + iso3Set := make(map[string]int) + update := func(iso2, iso3 string) { + i := regionISO.index(iso2) + if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { + regionISO.s[i] += iso3[1:] + iso3Set[iso3] = -1 + } else { + if ok && j >= 0 { + regionISO.s[i] += string([]byte{0, byte(j)}) + } else { + iso3Set[iso3] = len(altRegionISO3) + regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) + altRegionISO3 += iso3 + altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) + } + } + } + for _, tc := range b.supp.CodeMappings.TerritoryCodes { + i := regionISO.index(tc.Type) + isoOffset + if d := m49map[i]; d != 0 { + log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) + } + m49 := parseM49(tc.Numeric) + m49map[i] = m49 + if r := fromM49map[m49]; r == 0 { + fromM49map[m49] = i + } else if r != i { + dep := b.registry[regionISO.s[r-isoOffset]].deprecated + if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { + fromM49map[m49] = i + } + } + } + for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { + if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { + from := parseM49(ta.Type) + if r := fromM49map[from]; r == 0 { + fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset + } + } + } + for _, tc := range b.supp.CodeMappings.TerritoryCodes { + if len(tc.Alpha3) == 3 { + update(tc.Type, tc.Alpha3) + } + } + // This entries are not included in territoryCodes. Mostly 3-letter variants + // of deleted codes and an entry for QU. + for _, m := range []struct{ iso2, iso3 string }{ + {"CT", "CTE"}, + {"DY", "DHY"}, + {"HV", "HVO"}, + {"JT", "JTN"}, + {"MI", "MID"}, + {"NH", "NHB"}, + {"NQ", "ATN"}, + {"PC", "PCI"}, + {"PU", "PUS"}, + {"PZ", "PCZ"}, + {"RH", "RHO"}, + {"VD", "VDR"}, + {"WK", "WAK"}, + // These three-letter codes are used for others as well. + {"FQ", "ATF"}, + } { + update(m.iso2, m.iso3) + } + for i, s := range regionISO.s { + if len(s) != 4 { + regionISO.s[i] = s + " " + } + } + b.writeConst("regionISO", tag.Index(regionISO.join())) + b.writeConst("altRegionISO3", altRegionISO3) + b.writeSlice("altRegionIDs", altRegionIDs) + + // Create list of deprecated regions. + // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only + // Transitionally-reserved mapping not included. + regionOldMap := stringSet{} + // Include regions in territoryAlias (not all are in the IANA registry!) + for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { + if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { + regionOldMap.add(reg.Type) + regionOldMap.updateLater(reg.Type, reg.Replacement) + i, _ := regionISO.find(reg.Type) + j, _ := regionISO.find(reg.Replacement) + if k := m49map[i+isoOffset]; k == 0 { + m49map[i+isoOffset] = m49map[j+isoOffset] + } + } + } + b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { + return uint16(b.region.index(s)) + }) + // 3-digit region lookup, groupings. + for i := 1; i < isoOffset; i++ { + m := parseM49(b.region.s[i]) + m49map[i] = m + fromM49map[m] = i + } + b.writeSlice("m49", m49map) + + const ( + searchBits = 7 + regionBits = 9 + ) + if len(m49map) >= 1<<regionBits { + log.Fatalf("Maximum number of regions exceeded: %d > %d", len(m49map), 1<<regionBits) + } + m49Index := [9]int16{} + fromM49 := []uint16{} + m49 := []int{} + for k, _ := range fromM49map { + m49 = append(m49, int(k)) + } + sort.Ints(m49) + for _, k := range m49[1:] { + val := (k & (1<<searchBits - 1)) << regionBits + fromM49 = append(fromM49, uint16(val|fromM49map[int16(k)])) + m49Index[1:][k>>searchBits] = int16(len(fromM49)) + } + b.writeSlice("m49Index", m49Index) + b.writeSlice("fromM49", fromM49) +} + +const ( + // TODO: put these lists in regionTypes as user data? Could be used for + // various optimizations and refinements and could be exposed in the API. + iso3166Except = "AC CP DG EA EU FX IC SU TA UK" + iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. + // DY and RH are actually not deleted, but indeterminately reserved. + iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" +) + +const ( + iso3166UserAssgined = 1 << iota + ccTLD + bcp47Region +) + +func find(list []string, s string) int { + for i, t := range list { + if t == s { + return i + } + } + return -1 +} + +// writeVariants generates per-variant information and creates a map from variant +// name to index value. We assign index values such that sorting multiple +// variants by index value will result in the correct order. +// There are two types of variants: specialized and general. Specialized variants +// are only applicable to certain language or language-script pairs. Generalized +// variants apply to any language. Generalized variants always sort after +// specialized variants. We will therefore always assign a higher index value +// to a generalized variant than any other variant. Generalized variants are +// sorted alphabetically among themselves. +// Specialized variants may also sort after other specialized variants. Such +// variants will be ordered after any of the variants they may follow. +// We assume that if a variant x is followed by a variant y, then for any prefix +// p of x, p-x is a prefix of y. This allows us to order tags based on the +// maximum of the length of any of its prefixes. +// TODO: it is possible to define a set of Prefix values on variants such that +// a total order cannot be defined to the point that this algorithm breaks. +// In other words, we cannot guarantee the same order of variants for the +// future using the same algorithm or for non-compliant combinations of +// variants. For this reason, consider using simple alphabetic sorting +// of variants and ignore Prefix restrictions altogether. +func (b *builder) writeVariant() { + generalized := stringSet{} + specialized := stringSet{} + specializedExtend := stringSet{} + // Collate the variants by type and check assumptions. + for _, v := range b.variant.slice() { + e := b.registry[v] + if len(e.prefix) == 0 { + generalized.add(v) + continue + } + c := strings.Split(e.prefix[0], "-") + hasScriptOrRegion := false + if len(c) > 1 { + _, hasScriptOrRegion = b.script.find(c[1]) + if !hasScriptOrRegion { + _, hasScriptOrRegion = b.region.find(c[1]) + + } + } + if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { + // Variant is preceded by a language. + specialized.add(v) + continue + } + // Variant is preceded by another variant. + specializedExtend.add(v) + prefix := c[0] + "-" + if hasScriptOrRegion { + prefix += c[1] + } + for _, p := range e.prefix { + // Verify that the prefix minus the last element is a prefix of the + // predecessor element. + i := strings.LastIndex(p, "-") + pred := b.registry[p[i+1:]] + if find(pred.prefix, p[:i]) < 0 { + log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) + } + // The sorting used below does not work in the general case. It works + // if we assume that variants that may be followed by others only have + // prefixes of the same length. Verify this. + count := strings.Count(p[:i], "-") + for _, q := range pred.prefix { + if c := strings.Count(q, "-"); c != count { + log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) + } + } + if !strings.HasPrefix(p, prefix) { + log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) + } + } + } + + // Sort extended variants. + a := specializedExtend.s + less := func(v, w string) bool { + // Sort by the maximum number of elements. + maxCount := func(s string) (max int) { + for _, p := range b.registry[s].prefix { + if c := strings.Count(p, "-"); c > max { + max = c + } + } + return + } + if cv, cw := maxCount(v), maxCount(w); cv != cw { + return cv < cw + } + // Sort by name as tie breaker. + return v < w + } + sort.Sort(funcSorter{less, sort.StringSlice(a)}) + specializedExtend.frozen = true + + // Create index from variant name to index. + variantIndex := make(map[string]uint8) + add := func(s []string) { + for _, v := range s { + variantIndex[v] = uint8(len(variantIndex)) + } + } + add(specialized.slice()) + add(specializedExtend.s) + numSpecialized := len(variantIndex) + add(generalized.slice()) + if n := len(variantIndex); n > 255 { + log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) + } + b.writeMap("variantIndex", variantIndex) + b.writeConst("variantNumSpecialized", numSpecialized) +} + +func (b *builder) writeLanguageInfo() { +} + +// writeLikelyData writes tables that are used both for finding parent relations and for +// language matching. Each entry contains additional bits to indicate the status of the +// data to know when it cannot be used for parent relations. +func (b *builder) writeLikelyData() { + const ( + isList = 1 << iota + scriptInFrom + regionInFrom + ) + type ( // generated types + likelyScriptRegion struct { + region uint16 + script uint8 + flags uint8 + } + likelyLangScript struct { + lang uint16 + script uint8 + flags uint8 + } + likelyLangRegion struct { + lang uint16 + region uint16 + } + // likelyTag is used for getting likely tags for group regions, where + // the likely region might be a region contained in the group. + likelyTag struct { + lang uint16 + region uint16 + script uint8 + } + ) + var ( // generated variables + likelyRegionGroup = make([]likelyTag, len(b.groups)) + likelyLang = make([]likelyScriptRegion, len(b.lang.s)) + likelyRegion = make([]likelyLangScript, len(b.region.s)) + likelyScript = make([]likelyLangRegion, len(b.script.s)) + likelyLangList = []likelyScriptRegion{} + likelyRegionList = []likelyLangScript{} + ) + type fromTo struct { + from, to []string + } + langToOther := map[int][]fromTo{} + regionToOther := map[int][]fromTo{} + for _, m := range b.supp.LikelySubtags.LikelySubtag { + from := strings.Split(m.From, "_") + to := strings.Split(m.To, "_") + if len(to) != 3 { + log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) + } + if len(from) > 3 { + log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) + } + if from[0] != to[0] && from[0] != "und" { + log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) + } + if len(from) == 3 { + if from[2] != to[2] { + log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) + } + if from[0] != "und" { + log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) + } + } + if len(from) == 1 || from[0] != "und" { + id := 0 + if from[0] != "und" { + id = b.lang.index(from[0]) + } + langToOther[id] = append(langToOther[id], fromTo{from, to}) + } else if len(from) == 2 && len(from[1]) == 4 { + sid := b.script.index(from[1]) + likelyScript[sid].lang = uint16(b.langIndex(to[0])) + likelyScript[sid].region = uint16(b.region.index(to[2])) + } else { + r := b.region.index(from[len(from)-1]) + if id, ok := b.groups[r]; ok { + if from[0] != "und" { + log.Fatalf("region changed unexpectedly: %s -> %s", from, to) + } + likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) + likelyRegionGroup[id].script = uint8(b.script.index(to[1])) + likelyRegionGroup[id].region = uint16(b.region.index(to[2])) + } else { + regionToOther[r] = append(regionToOther[r], fromTo{from, to}) + } + } + } + b.writeType(likelyLangRegion{}) + b.writeSlice("likelyScript", likelyScript) + + for id := range b.lang.s { + list := langToOther[id] + if len(list) == 1 { + likelyLang[id].region = uint16(b.region.index(list[0].to[2])) + likelyLang[id].script = uint8(b.script.index(list[0].to[1])) + } else if len(list) > 1 { + likelyLang[id].flags = isList + likelyLang[id].region = uint16(len(likelyLangList)) + likelyLang[id].script = uint8(len(list)) + for _, x := range list { + flags := uint8(0) + if len(x.from) > 1 { + if x.from[1] == x.to[2] { + flags = regionInFrom + } else { + flags = scriptInFrom + } + } + likelyLangList = append(likelyLangList, likelyScriptRegion{ + region: uint16(b.region.index(x.to[2])), + script: uint8(b.script.index(x.to[1])), + flags: flags, + }) + } + } + } + // TODO: merge suppressScript data with this table. + b.writeType(likelyScriptRegion{}) + b.writeSlice("likelyLang", likelyLang) + b.writeSlice("likelyLangList", likelyLangList) + + for id := range b.region.s { + list := regionToOther[id] + if len(list) == 1 { + likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) + likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) + if len(list[0].from) > 2 { + likelyRegion[id].flags = scriptInFrom + } + } else if len(list) > 1 { + likelyRegion[id].flags = isList + likelyRegion[id].lang = uint16(len(likelyRegionList)) + likelyRegion[id].script = uint8(len(list)) + for i, x := range list { + if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { + log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) + } + x := likelyLangScript{ + lang: uint16(b.langIndex(x.to[0])), + script: uint8(b.script.index(x.to[1])), + } + if len(list[0].from) > 2 { + x.flags = scriptInFrom + } + likelyRegionList = append(likelyRegionList, x) + } + } + } + b.writeType(likelyLangScript{}) + b.writeSlice("likelyRegion", likelyRegion) + b.writeSlice("likelyRegionList", likelyRegionList) + + b.writeType(likelyTag{}) + b.writeSlice("likelyRegionGroup", likelyRegionGroup) +} + +type mutualIntelligibility struct { + want, have uint16 + conf uint8 + oneway bool +} + +type scriptIntelligibility struct { + lang uint16 // langID or 0 if * + want, have uint8 + conf uint8 +} + +type sortByConf []mutualIntelligibility + +func (l sortByConf) Less(a, b int) bool { + return l[a].conf > l[b].conf +} + +func (l sortByConf) Swap(a, b int) { + l[a], l[b] = l[b], l[a] +} + +func (l sortByConf) Len() int { + return len(l) +} + +// toConf converts a percentage value [0, 100] to a confidence class. +func toConf(pct uint8) uint8 { + switch { + case pct == 100: + return 3 // Exact + case pct >= 90: + return 2 // High + case pct > 50: + return 1 // Low + default: + return 0 // No + } +} + +// writeMatchData writes tables with languages and scripts for which there is +// mutual intelligibility. The data is based on CLDR's languageMatching data. +// Note that we use a different algorithm than the one defined by CLDR and that +// we slightly modify the data. For example, we convert scores to confidence levels. +// We also drop all region-related data as we use a different algorithm to +// determine region equivalence. +func (b *builder) writeMatchData() { + b.writeType(mutualIntelligibility{}) + b.writeType(scriptIntelligibility{}) + lm := b.supp.LanguageMatching.LanguageMatches + cldr.MakeSlice(&lm).SelectAnyOf("type", "written") + + matchLang := []mutualIntelligibility{} + matchScript := []scriptIntelligibility{} + // Convert the languageMatch entries in lists keyed by desired language. + for _, m := range lm[0].LanguageMatch { + // Different versions of CLDR use different separators. + desired := strings.Replace(m.Desired, "-", "_", -1) + supported := strings.Replace(m.Supported, "-", "_", -1) + d := strings.Split(desired, "_") + s := strings.Split(supported, "_") + if len(d) != len(s) || len(d) > 2 { + // Skip all entries with regions and work around CLDR bug. + continue + } + pct, _ := strconv.ParseInt(m.Percent, 10, 8) + if len(d) == 2 && d[0] == s[0] && len(d[1]) == 4 { + // language-script pair. + lang := uint16(0) + if d[0] != "*" { + lang = uint16(b.langIndex(d[0])) + } + matchScript = append(matchScript, scriptIntelligibility{ + lang: lang, + want: uint8(b.script.index(d[1])), + have: uint8(b.script.index(s[1])), + conf: toConf(uint8(pct)), + }) + if m.Oneway != "true" { + matchScript = append(matchScript, scriptIntelligibility{ + lang: lang, + want: uint8(b.script.index(s[1])), + have: uint8(b.script.index(d[1])), + conf: toConf(uint8(pct)), + }) + } + } else if len(d) == 1 && d[0] != "*" { + if pct == 100 { + // nb == no is already handled by macro mapping. Check there + // really is only this case. + if d[0] != "no" || s[0] != "nb" { + log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) + } + continue + } + matchLang = append(matchLang, mutualIntelligibility{ + want: uint16(b.langIndex(d[0])), + have: uint16(b.langIndex(s[0])), + conf: uint8(pct), + oneway: m.Oneway == "true", + }) + } else { + // TODO: Handle other mappings. + a := []string{"*;*", "*_*;*_*", "es_MX;es_419"} + s := strings.Join([]string{desired, supported}, ";") + if i := sort.SearchStrings(a, s); i == len(a) || a[i] != s { + log.Printf("%q not handled", s) + } + } + } + sort.Stable(sortByConf(matchLang)) + // collapse percentage into confidence classes + for i, m := range matchLang { + matchLang[i].conf = toConf(m.conf) + } + b.writeSlice("matchLang", matchLang) + b.writeSlice("matchScript", matchScript) +} + +func (b *builder) writeRegionInclusionData() { + var ( + // mm holds for each group the set of groups with a distance of 1. + mm = make(map[int][]index) + + // containment holds for each group the transitive closure of + // containment of other groups. + containment = make(map[index][]index) + ) + for _, g := range b.supp.TerritoryContainment.Group { + group := b.region.index(g.Type) + groupIdx := b.groups[group] + for _, mem := range strings.Split(g.Contains, " ") { + r := b.region.index(mem) + mm[r] = append(mm[r], groupIdx) + if g, ok := b.groups[r]; ok { + mm[group] = append(mm[group], g) + containment[groupIdx] = append(containment[groupIdx], g) + } + } + } + + regionContainment := make([]uint32, len(b.groups)) + for _, g := range b.groups { + l := containment[g] + + // Compute the transitive closure of containment. + for i := 0; i < len(l); i++ { + l = append(l, containment[l[i]]...) + } + + // Compute the bitmask. + regionContainment[g] = 1 << g + for _, v := range l { + regionContainment[g] |= 1 << v + } + // log.Printf("%d: %X", g, regionContainment[g]) + } + b.writeSlice("regionContainment", regionContainment) + + regionInclusion := make([]uint8, len(b.region.s)) + bvs := make(map[uint32]index) + // Make the first bitvector positions correspond with the groups. + for r, i := range b.groups { + bv := uint32(1 << i) + for _, g := range mm[r] { + bv |= 1 << g + } + bvs[bv] = i + regionInclusion[r] = uint8(bvs[bv]) + } + for r := 1; r < len(b.region.s); r++ { + if _, ok := b.groups[r]; !ok { + bv := uint32(0) + for _, g := range mm[r] { + bv |= 1 << g + } + if bv == 0 { + // Pick the world for unspecified regions. + bv = 1 << b.groups[b.region.index("001")] + } + if _, ok := bvs[bv]; !ok { + bvs[bv] = index(len(bvs)) + } + regionInclusion[r] = uint8(bvs[bv]) + } + } + b.writeSlice("regionInclusion", regionInclusion) + regionInclusionBits := make([]uint32, len(bvs)) + for k, v := range bvs { + regionInclusionBits[v] = uint32(k) + } + // Add bit vectors for increasingly large distances until a fixed point is reached. + regionInclusionNext := []uint8{} + for i := 0; i < len(regionInclusionBits); i++ { + bits := regionInclusionBits[i] + next := bits + for i := uint(0); i < uint(len(b.groups)); i++ { + if bits&(1<<i) != 0 { + next |= regionInclusionBits[i] + } + } + if _, ok := bvs[next]; !ok { + bvs[next] = index(len(bvs)) + regionInclusionBits = append(regionInclusionBits, next) + } + regionInclusionNext = append(regionInclusionNext, uint8(bvs[next])) + } + b.writeSlice("regionInclusionBits", regionInclusionBits) + b.writeSlice("regionInclusionNext", regionInclusionNext) +} + +type parentRel struct { + lang uint16 + script uint8 + maxScript uint8 + toRegion uint16 + fromRegion []uint16 +} + +func (b *builder) writeParents() { + b.writeType(parentRel{}) + + parents := []parentRel{} + + // Construct parent overrides. + n := 0 + for _, p := range b.data.Supplemental().ParentLocales.ParentLocale { + // Skipping non-standard scripts to root is implemented using addTags. + if p.Parent == "root" { + continue + } + + sub := strings.Split(p.Parent, "_") + parent := parentRel{lang: b.langIndex(sub[0])} + if len(sub) == 2 { + // TODO: check that all undefined scripts are indeed Latn in these + // cases. + parent.maxScript = uint8(b.script.index("Latn")) + parent.toRegion = uint16(b.region.index(sub[1])) + } else { + parent.script = uint8(b.script.index(sub[1])) + parent.maxScript = parent.script + parent.toRegion = uint16(b.region.index(sub[2])) + } + for _, c := range strings.Split(p.Locales, " ") { + region := b.region.index(c[strings.LastIndex(c, "_")+1:]) + parent.fromRegion = append(parent.fromRegion, uint16(region)) + } + parents = append(parents, parent) + n += len(parent.fromRegion) + } + b.writeSliceAddSize("parents", n*2, parents) +} + +func main() { + gen.Init() + + gen.Repackage("gen_common.go", "common.go", "language") + + w := gen.NewCodeWriter() + defer w.WriteGoFile("tables.go", "language") + + fmt.Fprintln(w, `import "golang.org/x/text/internal/tag"`) + + b := newBuilder(w) + gen.WriteCLDRVersion(w) + + b.parseIndices() + b.writeType(fromTo{}) + b.writeLanguage() + b.writeScript() + b.writeRegion() + b.writeVariant() + // TODO: b.writeLocale() + b.computeRegionGroups() + b.writeLikelyData() + b.writeMatchData() + b.writeRegionInclusionData() + b.writeParents() +} diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go new file mode 100644 index 000000000..eec72bcc1 --- /dev/null +++ b/vendor/golang.org/x/text/language/match.go @@ -0,0 +1,840 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "errors" + +// Matcher is the interface that wraps the Match method. +// +// Match returns the best match for any of the given tags, along with +// a unique index associated with the returned tag and a confidence +// score. +type Matcher interface { + Match(t ...Tag) (tag Tag, index int, c Confidence) +} + +// Comprehends reports the confidence score for a speaker of a given language +// to being able to comprehend the written form of an alternative language. +func Comprehends(speaker, alternative Tag) Confidence { + _, _, c := NewMatcher([]Tag{alternative}).Match(speaker) + return c +} + +// NewMatcher returns a Matcher that matches an ordered list of preferred tags +// against a list of supported tags based on written intelligibility, closeness +// of dialect, equivalence of subtags and various other rules. It is initialized +// with the list of supported tags. The first element is used as the default +// value in case no match is found. +// +// Its Match method matches the first of the given Tags to reach a certain +// confidence threshold. The tags passed to Match should therefore be specified +// in order of preference. Extensions are ignored for matching. +// +// The index returned by the Match method corresponds to the index of the +// matched tag in t, but is augmented with the Unicode extension ('u')of the +// corresponding preferred tag. This allows user locale options to be passed +// transparently. +func NewMatcher(t []Tag) Matcher { + return newMatcher(t) +} + +func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { + match, w, c := m.getBest(want...) + if match == nil { + t = m.default_.tag + } else { + t, index = match.tag, match.index + } + // Copy options from the user-provided tag into the result tag. This is hard + // to do after the fact, so we do it here. + // TODO: consider also adding in variants that are compatible with the + // matched language. + // TODO: Add back region if it is non-ambiguous? Or create another tag to + // preserve the region? + if u, ok := w.Extension('u'); ok { + t, _ = Raw.Compose(t, u) + } + return t, index, c +} + +type scriptRegionFlags uint8 + +const ( + isList = 1 << iota + scriptInFrom + regionInFrom +) + +func (t *Tag) setUndefinedLang(id langID) { + if t.lang == 0 { + t.lang = id + } +} + +func (t *Tag) setUndefinedScript(id scriptID) { + if t.script == 0 { + t.script = id + } +} + +func (t *Tag) setUndefinedRegion(id regionID) { + if t.region == 0 || t.region.contains(id) { + t.region = id + } +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// addLikelySubtags sets subtags to their most likely value, given the locale. +// In most cases this means setting fields for unknown values, but in some +// cases it may alter a value. It returns a ErrMissingLikelyTagsData error +// if the given locale cannot be expanded. +func (t Tag) addLikelySubtags() (Tag, error) { + id, err := addTags(t) + if err != nil { + return t, err + } else if id.equalTags(t) { + return t, nil + } + id.remakeString() + return id, nil +} + +// specializeRegion attempts to specialize a group region. +func specializeRegion(t *Tag) bool { + if i := regionInclusion[t.region]; i < nRegionGroups { + x := likelyRegionGroup[i] + if langID(x.lang) == t.lang && scriptID(x.script) == t.script { + t.region = regionID(x.region) + } + return true + } + return false +} + +func addTags(t Tag) (Tag, error) { + // We leave private use identifiers alone. + if t.private() { + return t, nil + } + if t.script != 0 && t.region != 0 { + if t.lang != 0 { + // already fully specified + specializeRegion(&t) + return t, nil + } + // Search matches for und-script-region. Note that for these cases + // region will never be a group so there is no need to check for this. + list := likelyRegion[t.region : t.region+1] + if x := list[0]; x.flags&isList != 0 { + list = likelyRegionList[x.lang : x.lang+uint16(x.script)] + } + for _, x := range list { + // Deviating from the spec. See match_test.go for details. + if scriptID(x.script) == t.script { + t.setUndefinedLang(langID(x.lang)) + return t, nil + } + } + } + if t.lang != 0 { + // Search matches for lang-script and lang-region, where lang != und. + if t.lang < langNoIndexOffset { + x := likelyLang[t.lang] + if x.flags&isList != 0 { + list := likelyLangList[x.region : x.region+uint16(x.script)] + if t.script != 0 { + for _, x := range list { + if scriptID(x.script) == t.script && x.flags&scriptInFrom != 0 { + t.setUndefinedRegion(regionID(x.region)) + return t, nil + } + } + } else if t.region != 0 { + count := 0 + goodScript := true + tt := t + for _, x := range list { + // We visit all entries for which the script was not + // defined, including the ones where the region was not + // defined. This allows for proper disambiguation within + // regions. + if x.flags&scriptInFrom == 0 && t.region.contains(regionID(x.region)) { + tt.region = regionID(x.region) + tt.setUndefinedScript(scriptID(x.script)) + goodScript = goodScript && tt.script == scriptID(x.script) + count++ + } + } + if count == 1 { + return tt, nil + } + // Even if we fail to find a unique Region, we might have + // an unambiguous script. + if goodScript { + t.script = tt.script + } + } + } + } + } else { + // Search matches for und-script. + if t.script != 0 { + x := likelyScript[t.script] + if x.region != 0 { + t.setUndefinedRegion(regionID(x.region)) + t.setUndefinedLang(langID(x.lang)) + return t, nil + } + } + // Search matches for und-region. If und-script-region exists, it would + // have been found earlier. + if t.region != 0 { + if i := regionInclusion[t.region]; i < nRegionGroups { + x := likelyRegionGroup[i] + if x.region != 0 { + t.setUndefinedLang(langID(x.lang)) + t.setUndefinedScript(scriptID(x.script)) + t.region = regionID(x.region) + } + } else { + x := likelyRegion[t.region] + if x.flags&isList != 0 { + x = likelyRegionList[x.lang] + } + if x.script != 0 && x.flags != scriptInFrom { + t.setUndefinedLang(langID(x.lang)) + t.setUndefinedScript(scriptID(x.script)) + return t, nil + } + } + } + } + + // Search matches for lang. + if t.lang < langNoIndexOffset { + x := likelyLang[t.lang] + if x.flags&isList != 0 { + x = likelyLangList[x.region] + } + if x.region != 0 { + t.setUndefinedScript(scriptID(x.script)) + t.setUndefinedRegion(regionID(x.region)) + } + specializeRegion(&t) + if t.lang == 0 { + t.lang = _en // default language + } + return t, nil + } + return t, ErrMissingLikelyTagsData +} + +func (t *Tag) setTagsFrom(id Tag) { + t.lang = id.lang + t.script = id.script + t.region = id.region +} + +// minimize removes the region or script subtags from t such that +// t.addLikelySubtags() == t.minimize().addLikelySubtags(). +func (t Tag) minimize() (Tag, error) { + t, err := minimizeTags(t) + if err != nil { + return t, err + } + t.remakeString() + return t, nil +} + +// minimizeTags mimics the behavior of the ICU 51 C implementation. +func minimizeTags(t Tag) (Tag, error) { + if t.equalTags(und) { + return t, nil + } + max, err := addTags(t) + if err != nil { + return t, err + } + for _, id := range [...]Tag{ + {lang: t.lang}, + {lang: t.lang, region: t.region}, + {lang: t.lang, script: t.script}, + } { + if x, err := addTags(id); err == nil && max.equalTags(x) { + t.setTagsFrom(id) + break + } + } + return t, nil +} + +// Tag Matching +// CLDR defines an algorithm for finding the best match between two sets of language +// tags. The basic algorithm defines how to score a possible match and then find +// the match with the best score +// (see http://www.unicode.org/reports/tr35/#LanguageMatching). +// Using scoring has several disadvantages. The scoring obfuscates the importance of +// the various factors considered, making the algorithm harder to understand. Using +// scoring also requires the full score to be computed for each pair of tags. +// +// We will use a different algorithm which aims to have the following properties: +// - clarity on the precedence of the various selection factors, and +// - improved performance by allowing early termination of a comparison. +// +// Matching algorithm (overview) +// Input: +// - supported: a set of supported tags +// - default: the default tag to return in case there is no match +// - desired: list of desired tags, ordered by preference, starting with +// the most-preferred. +// +// Algorithm: +// 1) Set the best match to the lowest confidence level +// 2) For each tag in "desired": +// a) For each tag in "supported": +// 1) compute the match between the two tags. +// 2) if the match is better than the previous best match, replace it +// with the new match. (see next section) +// b) if the current best match is above a certain threshold, return this +// match without proceeding to the next tag in "desired". [See Note 1] +// 3) If the best match so far is below a certain threshold, return "default". +// +// Ranking: +// We use two phases to determine whether one pair of tags are a better match +// than another pair of tags. First, we determine a rough confidence level. If the +// levels are different, the one with the highest confidence wins. +// Second, if the rough confidence levels are identical, we use a set of tie-breaker +// rules. +// +// The confidence level of matching a pair of tags is determined by finding the +// lowest confidence level of any matches of the corresponding subtags (the +// result is deemed as good as its weakest link). +// We define the following levels: +// Exact - An exact match of a subtag, before adding likely subtags. +// MaxExact - An exact match of a subtag, after adding likely subtags. +// [See Note 2]. +// High - High level of mutual intelligibility between different subtag +// variants. +// Low - Low level of mutual intelligibility between different subtag +// variants. +// No - No mutual intelligibility. +// +// The following levels can occur for each type of subtag: +// Base: Exact, MaxExact, High, Low, No +// Script: Exact, MaxExact [see Note 3], Low, No +// Region: Exact, MaxExact, High +// Variant: Exact, High +// Private: Exact, No +// +// Any result with a confidence level of Low or higher is deemed a possible match. +// Once a desired tag matches any of the supported tags with a level of MaxExact +// or higher, the next desired tag is not considered (see Step 2.b). +// Note that CLDR provides languageMatching data that defines close equivalence +// classes for base languages, scripts and regions. +// +// Tie-breaking +// If we get the same confidence level for two matches, we apply a sequence of +// tie-breaking rules. The first that succeeds defines the result. The rules are +// applied in the following order. +// 1) Original language was defined and was identical. +// 2) Original region was defined and was identical. +// 3) Distance between two maximized regions was the smallest. +// 4) Original script was defined and was identical. +// 5) Distance from want tag to have tag using the parent relation [see Note 5.] +// If there is still no winner after these rules are applied, the first match +// found wins. +// +// Notes: +// [1] Note that even if we may not have a perfect match, if a match is above a +// certain threshold, it is considered a better match than any other match +// to a tag later in the list of preferred language tags. +// [2] In practice, as matching of Exact is done in a separate phase from +// matching the other levels, we reuse the Exact level to mean MaxExact in +// the second phase. As a consequence, we only need the levels defined by +// the Confidence type. The MaxExact confidence level is mapped to High in +// the public API. +// [3] We do not differentiate between maximized script values that were derived +// from suppressScript versus most likely tag data. We determined that in +// ranking the two, one ranks just after the other. Moreover, the two cannot +// occur concurrently. As a consequence, they are identical for practical +// purposes. +// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign +// the MaxExact level to allow iw vs he to still be a closer match than +// en-AU vs en-US, for example. +// [5] In CLDR a locale inherits fields that are unspecified for this locale +// from its parent. Therefore, if a locale is a parent of another locale, +// it is a strong measure for closeness, especially when no other tie +// breaker rule applies. One could also argue it is inconsistent, for +// example, when pt-AO matches pt (which CLDR equates with pt-BR), even +// though its parent is pt-PT according to the inheritance rules. +// +// Implementation Details: +// There are several performance considerations worth pointing out. Most notably, +// we preprocess as much as possible (within reason) at the time of creation of a +// matcher. This includes: +// - creating a per-language map, which includes data for the raw base language +// and its canonicalized variant (if applicable), +// - expanding entries for the equivalence classes defined in CLDR's +// languageMatch data. +// The per-language map ensures that typically only a very small number of tags +// need to be considered. The pre-expansion of canonicalized subtags and +// equivalence classes reduces the amount of map lookups that need to be done at +// runtime. + +// matcher keeps a set of supported language tags, indexed by language. +type matcher struct { + default_ *haveTag + index map[langID]*matchHeader + passSettings bool +} + +// matchHeader has the lists of tags for exact matches and matches based on +// maximized and canonicalized tags for a given language. +type matchHeader struct { + exact []haveTag + max []haveTag +} + +// haveTag holds a supported Tag and its maximized script and region. The maximized +// or canonicalized language is not stored as it is not needed during matching. +type haveTag struct { + tag Tag + + // index of this tag in the original list of supported tags. + index int + + // conf is the maximum confidence that can result from matching this haveTag. + // When conf < Exact this means it was inserted after applying a CLDR equivalence rule. + conf Confidence + + // Maximized region and script. + maxRegion regionID + maxScript scriptID + + // altScript may be checked as an alternative match to maxScript. If altScript + // matches, the confidence level for this match is Low. Theoretically there + // could be multiple alternative scripts. This does not occur in practice. + altScript scriptID + + // nextMax is the index of the next haveTag with the same maximized tags. + nextMax uint16 +} + +func makeHaveTag(tag Tag, index int) (haveTag, langID) { + max := tag + if tag.lang != 0 { + max, _ = max.canonicalize(All) + max, _ = addTags(max) + max.remakeString() + } + return haveTag{tag, index, Exact, max.region, max.script, altScript(max.lang, max.script), 0}, max.lang +} + +// altScript returns an alternative script that may match the given script with +// a low confidence. At the moment, the langMatch data allows for at most one +// script to map to another and we rely on this to keep the code simple. +func altScript(l langID, s scriptID) scriptID { + for _, alt := range matchScript { + if (alt.lang == 0 || langID(alt.lang) == l) && scriptID(alt.have) == s { + return scriptID(alt.want) + } + } + return 0 +} + +// addIfNew adds a haveTag to the list of tags only if it is a unique tag. +// Tags that have the same maximized values are linked by index. +func (h *matchHeader) addIfNew(n haveTag, exact bool) { + // Don't add new exact matches. + for _, v := range h.exact { + if v.tag.equalsRest(n.tag) { + return + } + } + if exact { + h.exact = append(h.exact, n) + } + // Allow duplicate maximized tags, but create a linked list to allow quickly + // comparing the equivalents and bail out. + for i, v := range h.max { + if v.maxScript == n.maxScript && + v.maxRegion == n.maxRegion && + v.tag.variantOrPrivateTagStr() == n.tag.variantOrPrivateTagStr() { + for h.max[i].nextMax != 0 { + i = int(h.max[i].nextMax) + } + h.max[i].nextMax = uint16(len(h.max)) + break + } + } + h.max = append(h.max, n) +} + +// header returns the matchHeader for the given language. It creates one if +// it doesn't already exist. +func (m *matcher) header(l langID) *matchHeader { + if h := m.index[l]; h != nil { + return h + } + h := &matchHeader{} + m.index[l] = h + return h +} + +// newMatcher builds an index for the given supported tags and returns it as +// a matcher. It also expands the index by considering various equivalence classes +// for a given tag. +func newMatcher(supported []Tag) *matcher { + m := &matcher{ + index: make(map[langID]*matchHeader), + } + if len(supported) == 0 { + m.default_ = &haveTag{} + return m + } + // Add supported languages to the index. Add exact matches first to give + // them precedence. + for i, tag := range supported { + pair, _ := makeHaveTag(tag, i) + m.header(tag.lang).addIfNew(pair, true) + } + m.default_ = &m.header(supported[0].lang).exact[0] + for i, tag := range supported { + pair, max := makeHaveTag(tag, i) + if max != tag.lang { + m.header(max).addIfNew(pair, false) + } + } + + // update is used to add indexes in the map for equivalent languages. + // If force is true, the update will also apply to derived entries. To + // avoid applying a "transitive closure", use false. + update := func(want, have uint16, conf Confidence, force bool) { + if hh := m.index[langID(have)]; hh != nil { + if !force && len(hh.exact) == 0 { + return + } + hw := m.header(langID(want)) + for _, v := range hh.max { + if conf < v.conf { + v.conf = conf + } + v.nextMax = 0 // this value needs to be recomputed + if v.altScript != 0 { + v.altScript = altScript(langID(want), v.maxScript) + } + hw.addIfNew(v, conf == Exact && len(hh.exact) > 0) + } + } + } + + // Add entries for languages with mutual intelligibility as defined by CLDR's + // languageMatch data. + for _, ml := range matchLang { + update(ml.want, ml.have, Confidence(ml.conf), false) + if !ml.oneway { + update(ml.have, ml.want, Confidence(ml.conf), false) + } + } + + // Add entries for possible canonicalizations. This is an optimization to + // ensure that only one map lookup needs to be done at runtime per desired tag. + // First we match deprecated equivalents. If they are perfect equivalents + // (their canonicalization simply substitutes a different language code, but + // nothing else), the match confidence is Exact, otherwise it is High. + for i, lm := range langAliasMap { + if lm.from == _sh { + continue + } + + // If deprecated codes match and there is no fiddling with the script or + // or region, we consider it an exact match. + conf := Exact + if langAliasTypes[i] != langMacro { + if !isExactEquivalent(langID(lm.from)) { + conf = High + } + update(lm.to, lm.from, conf, true) + } + update(lm.from, lm.to, conf, true) + } + return m +} + +// getBest gets the best matching tag in m for any of the given tags, taking into +// account the order of preference of the given tags. +func (m *matcher) getBest(want ...Tag) (got *haveTag, orig Tag, c Confidence) { + best := bestMatch{} + for _, w := range want { + var max Tag + // Check for exact match first. + h := m.index[w.lang] + if w.lang != 0 { + // Base language is defined. + if h == nil { + continue + } + for i := range h.exact { + have := &h.exact[i] + if have.tag.equalsRest(w) { + return have, w, Exact + } + } + max, _ = w.canonicalize(Legacy | Deprecated) + max, _ = addTags(max) + } else { + // Base language is not defined. + if h != nil { + for i := range h.exact { + have := &h.exact[i] + if have.tag.equalsRest(w) { + return have, w, Exact + } + } + } + if w.script == 0 && w.region == 0 { + // We skip all tags matching und for approximate matching, including + // private tags. + continue + } + max, _ = addTags(w) + if h = m.index[max.lang]; h == nil { + continue + } + } + // Check for match based on maximized tag. + for i := range h.max { + have := &h.max[i] + best.update(have, w, max.script, max.region) + if best.conf == Exact { + for have.nextMax != 0 { + have = &h.max[have.nextMax] + best.update(have, w, max.script, max.region) + } + return best.have, best.want, High + } + } + } + if best.conf <= No { + if len(want) != 0 { + return nil, want[0], No + } + return nil, Tag{}, No + } + return best.have, best.want, best.conf +} + +// bestMatch accumulates the best match so far. +type bestMatch struct { + have *haveTag + want Tag + conf Confidence + // Cached results from applying tie-breaking rules. + origLang bool + origReg bool + regDist uint8 + origScript bool + parentDist uint8 // 255 if have is not an ancestor of want tag. +} + +// update updates the existing best match if the new pair is considered to be a +// better match. +// To determine if the given pair is a better match, it first computes the rough +// confidence level. If this surpasses the current match, it will replace it and +// update the tie-breaker rule cache. If there is a tie, it proceeds with applying +// a series of tie-breaker rules. If there is no conclusive winner after applying +// the tie-breaker rules, it leaves the current match as the preferred match. +func (m *bestMatch) update(have *haveTag, tag Tag, maxScript scriptID, maxRegion regionID) { + // Bail if the maximum attainable confidence is below that of the current best match. + c := have.conf + if c < m.conf { + return + } + if have.maxScript != maxScript { + // There is usually very little comprehension between different scripts. + // In a few cases there may still be Low comprehension. This possibility is + // pre-computed and stored in have.altScript. + if Low < m.conf || have.altScript != maxScript { + return + } + c = Low + } else if have.maxRegion != maxRegion { + // There is usually a small difference between languages across regions. + // We use the region distance (below) to disambiguate between equal matches. + if High < c { + c = High + } + } + + // We store the results of the computations of the tie-breaker rules along + // with the best match. There is no need to do the checks once we determine + // we have a winner, but we do still need to do the tie-breaker computations. + // We use "beaten" to keep track if we still need to do the checks. + beaten := false // true if the new pair defeats the current one. + if c != m.conf { + if c < m.conf { + return + } + beaten = true + } + + // Tie-breaker rules: + // We prefer if the pre-maximized language was specified and identical. + origLang := have.tag.lang == tag.lang && tag.lang != 0 + if !beaten && m.origLang != origLang { + if m.origLang { + return + } + beaten = true + } + + // We prefer if the pre-maximized region was specified and identical. + origReg := have.tag.region == tag.region && tag.region != 0 + if !beaten && m.origReg != origReg { + if m.origReg { + return + } + beaten = true + } + + // Next we prefer smaller distances between regions, as defined by regionDist. + regDist := regionDist(have.maxRegion, maxRegion, tag.lang) + if !beaten && m.regDist != regDist { + if regDist > m.regDist { + return + } + beaten = true + } + + // Next we prefer if the pre-maximized script was specified and identical. + origScript := have.tag.script == tag.script && tag.script != 0 + if !beaten && m.origScript != origScript { + if m.origScript { + return + } + beaten = true + } + + // Finally we prefer tags which have a closer parent relationship. + parentDist := parentDistance(have.tag.region, tag) + if !beaten && m.parentDist != parentDist { + if parentDist > m.parentDist { + return + } + beaten = true + } + + // Update m to the newly found best match. + if beaten { + m.have = have + m.want = tag + m.conf = c + m.origLang = origLang + m.origReg = origReg + m.origScript = origScript + m.regDist = regDist + m.parentDist = parentDist + } +} + +// parentDistance returns the number of times Parent must be called before the +// regions match. It is assumed that it has already been checked that lang and +// script are identical. If haveRegion does not occur in the ancestor chain of +// tag, it returns 255. +func parentDistance(haveRegion regionID, tag Tag) uint8 { + p := tag.Parent() + d := uint8(1) + for haveRegion != p.region { + if p.region == 0 { + return 255 + } + p = p.Parent() + d++ + } + return d +} + +// regionDist wraps regionDistance with some exceptions to the algorithmic distance. +func regionDist(a, b regionID, lang langID) uint8 { + if lang == _en { + // Two variants of non-US English are close to each other, regardless of distance. + if a != _US && b != _US { + return 2 + } + } + return uint8(regionDistance(a, b)) +} + +// regionDistance computes the distance between two regions based on the +// distance in the graph of region containments as defined in CLDR. It iterates +// over increasingly inclusive sets of groups, represented as bit vectors, until +// the source bit vector has bits in common with the destination vector. +func regionDistance(a, b regionID) int { + if a == b { + return 0 + } + p, q := regionInclusion[a], regionInclusion[b] + if p < nRegionGroups { + p, q = q, p + } + set := regionInclusionBits + if q < nRegionGroups && set[p]&(1<<q) != 0 { + return 1 + } + d := 2 + for goal := set[q]; set[p]&goal == 0; p = regionInclusionNext[p] { + d++ + } + return d +} + +func (t Tag) variants() string { + if t.pVariant == 0 { + return "" + } + return t.str[t.pVariant:t.pExt] +} + +// variantOrPrivateTagStr returns variants or private use tags. +func (t Tag) variantOrPrivateTagStr() string { + if t.pExt > 0 { + return t.str[t.pVariant:t.pExt] + } + return t.str[t.pVariant:] +} + +// equalsRest compares everything except the language. +func (a Tag) equalsRest(b Tag) bool { + // TODO: don't include extensions in this comparison. To do this efficiently, + // though, we should handle private tags separately. + return a.script == b.script && a.region == b.region && a.variantOrPrivateTagStr() == b.variantOrPrivateTagStr() +} + +// isExactEquivalent returns true if canonicalizing the language will not alter +// the script or region of a tag. +func isExactEquivalent(l langID) bool { + for _, o := range notEquivalent { + if o == l { + return false + } + } + return true +} + +var notEquivalent []langID + +func init() { + // Create a list of all languages for which canonicalization may alter the + // script or region. + for _, lm := range langAliasMap { + tag := Tag{lang: langID(lm.from)} + if tag, _ = tag.canonicalize(All); tag.script != 0 || tag.region != 0 { + notEquivalent = append(notEquivalent, langID(lm.from)) + } + } +} diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go new file mode 100644 index 000000000..cfa28f56e --- /dev/null +++ b/vendor/golang.org/x/text/language/parse.go @@ -0,0 +1,859 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "errors" + "fmt" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/tag" +) + +// isAlpha returns true if the byte is not a digit. +// b must be an ASCII letter or digit. +func isAlpha(b byte) bool { + return b > '9' +} + +// isAlphaNum returns true if the string contains only ASCII letters or digits. +func isAlphaNum(s []byte) bool { + for _, c := range s { + if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { + return false + } + } + return true +} + +// errSyntax is returned by any of the parsing functions when the +// input is not well-formed, according to BCP 47. +// TODO: return the position at which the syntax error occurred? +var errSyntax = errors.New("language: tag is not well-formed") + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError struct { + v [8]byte +} + +func mkErrInvalid(s []byte) error { + var e ValueError + copy(e.v[:], s) + return e +} + +func (e ValueError) tag() []byte { + n := bytes.IndexByte(e.v[:], 0) + if n == -1 { + n = 8 + } + return e.v[:n] +} + +// Error implements the error interface. +func (e ValueError) Error() string { + return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) +} + +// Subtag returns the subtag for which the error occurred. +func (e ValueError) Subtag() string { + return string(e.tag()) +} + +// scanner is used to scan BCP 47 tokens, which are separated by _ or -. +type scanner struct { + b []byte + bytes [max99thPercentileSize]byte + token []byte + start int // start position of the current token + end int // end position of the current token + next int // next point for scan + err error + done bool +} + +func makeScannerString(s string) scanner { + scan := scanner{} + if len(s) <= len(scan.bytes) { + scan.b = scan.bytes[:copy(scan.bytes[:], s)] + } else { + scan.b = []byte(s) + } + scan.init() + return scan +} + +// makeScanner returns a scanner using b as the input buffer. +// b is not copied and may be modified by the scanner routines. +func makeScanner(b []byte) scanner { + scan := scanner{b: b} + scan.init() + return scan +} + +func (s *scanner) init() { + for i, c := range s.b { + if c == '_' { + s.b[i] = '-' + } + } + s.scan() +} + +// restToLower converts the string between start and end to lower case. +func (s *scanner) toLower(start, end int) { + for i := start; i < end; i++ { + c := s.b[i] + if 'A' <= c && c <= 'Z' { + s.b[i] += 'a' - 'A' + } + } +} + +func (s *scanner) setError(e error) { + if s.err == nil || (e == errSyntax && s.err != errSyntax) { + s.err = e + } +} + +// resizeRange shrinks or grows the array at position oldStart such that +// a new string of size newSize can fit between oldStart and oldEnd. +// Sets the scan point to after the resized range. +func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { + s.start = oldStart + if end := oldStart + newSize; end != oldEnd { + diff := end - oldEnd + if end < cap(s.b) { + b := make([]byte, len(s.b)+diff) + copy(b, s.b[:oldStart]) + copy(b[end:], s.b[oldEnd:]) + s.b = b + } else { + s.b = append(s.b[end:], s.b[oldEnd:]...) + } + s.next = end + (s.next - s.end) + s.end = end + } +} + +// replace replaces the current token with repl. +func (s *scanner) replace(repl string) { + s.resizeRange(s.start, s.end, len(repl)) + copy(s.b[s.start:], repl) +} + +// gobble removes the current token from the input. +// Caller must call scan after calling gobble. +func (s *scanner) gobble(e error) { + s.setError(e) + if s.start == 0 { + s.b = s.b[:+copy(s.b, s.b[s.next:])] + s.end = 0 + } else { + s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] + s.end = s.start - 1 + } + s.next = s.start +} + +// deleteRange removes the given range from s.b before the current token. +func (s *scanner) deleteRange(start, end int) { + s.setError(errSyntax) + s.b = s.b[:start+copy(s.b[start:], s.b[end:])] + diff := end - start + s.next -= diff + s.start -= diff + s.end -= diff +} + +// scan parses the next token of a BCP 47 string. Tokens that are larger +// than 8 characters or include non-alphanumeric characters result in an error +// and are gobbled and removed from the output. +// It returns the end position of the last token consumed. +func (s *scanner) scan() (end int) { + end = s.end + s.token = nil + for s.start = s.next; s.next < len(s.b); { + i := bytes.IndexByte(s.b[s.next:], '-') + if i == -1 { + s.end = len(s.b) + s.next = len(s.b) + i = s.end - s.start + } else { + s.end = s.next + i + s.next = s.end + 1 + } + token := s.b[s.start:s.end] + if i < 1 || i > 8 || !isAlphaNum(token) { + s.gobble(errSyntax) + continue + } + s.token = token + return end + } + if n := len(s.b); n > 0 && s.b[n-1] == '-' { + s.setError(errSyntax) + s.b = s.b[:len(s.b)-1] + } + s.done = true + return end +} + +// acceptMinSize parses multiple tokens of the given size or greater. +// It returns the end position of the last token consumed. +func (s *scanner) acceptMinSize(min int) (end int) { + end = s.end + s.scan() + for ; len(s.token) >= min; s.scan() { + end = s.end + } + return end +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the default canonicalization type. +func Parse(s string) (t Tag, err error) { + return Default.Parse(s) +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// http://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the the canonicalization type c. +func (c CanonType) Parse(s string) (t Tag, err error) { + // TODO: consider supporting old-style locale key-value pairs. + if s == "" { + return und, errSyntax + } + if len(s) <= maxAltTaglen { + b := [maxAltTaglen]byte{} + for i, c := range s { + // Generating invalid UTF-8 is okay as it won't match. + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } else if c == '_' { + c = '-' + } + b[i] = byte(c) + } + if t, ok := grandfathered(b); ok { + return t, nil + } + } + scan := makeScannerString(s) + t, err = parse(&scan, s) + t, changed := t.canonicalize(c) + if changed { + t.remakeString() + } + return t, err +} + +func parse(scan *scanner, s string) (t Tag, err error) { + t = und + var end int + if n := len(scan.token); n <= 1 { + scan.toLower(0, len(scan.b)) + if n == 0 || scan.token[0] != 'x' { + return t, errSyntax + } + end = parseExtensions(scan) + } else if n >= 4 { + return und, errSyntax + } else { // the usual case + t, end = parseTag(scan) + if n := len(scan.token); n == 1 { + t.pExt = uint16(end) + end = parseExtensions(scan) + } else if end < len(scan.b) { + scan.setError(errSyntax) + scan.b = scan.b[:end] + } + } + if int(t.pVariant) < len(scan.b) { + if end < len(s) { + s = s[:end] + } + if len(s) > 0 && tag.Compare(s, scan.b) == 0 { + t.str = s + } else { + t.str = string(scan.b) + } + } else { + t.pVariant, t.pExt = 0, 0 + } + return t, scan.err +} + +// parseTag parses language, script, region and variants. +// It returns a Tag and the end position in the input that was parsed. +func parseTag(scan *scanner) (t Tag, end int) { + var e error + // TODO: set an error if an unknown lang, script or region is encountered. + t.lang, e = getLangID(scan.token) + scan.setError(e) + scan.replace(t.lang.String()) + langStart := scan.start + end = scan.scan() + for len(scan.token) == 3 && isAlpha(scan.token[0]) { + // From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent + // to a tag of the form <extlang>. + lang, e := getLangID(scan.token) + if lang != 0 { + t.lang = lang + copy(scan.b[langStart:], lang.String()) + scan.b[langStart+3] = '-' + scan.start = langStart + 4 + } + scan.gobble(e) + end = scan.scan() + } + if len(scan.token) == 4 && isAlpha(scan.token[0]) { + t.script, e = getScriptID(script, scan.token) + if t.script == 0 { + scan.gobble(e) + } + end = scan.scan() + } + if n := len(scan.token); n >= 2 && n <= 3 { + t.region, e = getRegionID(scan.token) + if t.region == 0 { + scan.gobble(e) + } else { + scan.replace(t.region.String()) + } + end = scan.scan() + } + scan.toLower(scan.start, len(scan.b)) + t.pVariant = byte(end) + end = parseVariants(scan, end, t) + t.pExt = uint16(end) + return t, end +} + +var separator = []byte{'-'} + +// parseVariants scans tokens as long as each token is a valid variant string. +// Duplicate variants are removed. +func parseVariants(scan *scanner, end int, t Tag) int { + start := scan.start + varIDBuf := [4]uint8{} + variantBuf := [4][]byte{} + varID := varIDBuf[:0] + variant := variantBuf[:0] + last := -1 + needSort := false + for ; len(scan.token) >= 4; scan.scan() { + // TODO: measure the impact of needing this conversion and redesign + // the data structure if there is an issue. + v, ok := variantIndex[string(scan.token)] + if !ok { + // unknown variant + // TODO: allow user-defined variants? + scan.gobble(mkErrInvalid(scan.token)) + continue + } + varID = append(varID, v) + variant = append(variant, scan.token) + if !needSort { + if last < int(v) { + last = int(v) + } else { + needSort = true + // There is no legal combinations of more than 7 variants + // (and this is by no means a useful sequence). + const maxVariants = 8 + if len(varID) > maxVariants { + break + } + } + } + end = scan.end + } + if needSort { + sort.Sort(variantsSort{varID, variant}) + k, l := 0, -1 + for i, v := range varID { + w := int(v) + if l == w { + // Remove duplicates. + continue + } + varID[k] = varID[i] + variant[k] = variant[i] + k++ + l = w + } + if str := bytes.Join(variant[:k], separator); len(str) == 0 { + end = start - 1 + } else { + scan.resizeRange(start, end, len(str)) + copy(scan.b[scan.start:], str) + end = scan.end + } + } + return end +} + +type variantsSort struct { + i []uint8 + v [][]byte +} + +func (s variantsSort) Len() int { + return len(s.i) +} + +func (s variantsSort) Swap(i, j int) { + s.i[i], s.i[j] = s.i[j], s.i[i] + s.v[i], s.v[j] = s.v[j], s.v[i] +} + +func (s variantsSort) Less(i, j int) bool { + return s.i[i] < s.i[j] +} + +type bytesSort [][]byte + +func (b bytesSort) Len() int { + return len(b) +} + +func (b bytesSort) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bytesSort) Less(i, j int) bool { + return bytes.Compare(b[i], b[j]) == -1 +} + +// parseExtensions parses and normalizes the extensions in the buffer. +// It returns the last position of scan.b that is part of any extension. +// It also trims scan.b to remove excess parts accordingly. +func parseExtensions(scan *scanner) int { + start := scan.start + exts := [][]byte{} + private := []byte{} + end := scan.end + for len(scan.token) == 1 { + extStart := scan.start + ext := scan.token[0] + end = parseExtension(scan) + extension := scan.b[extStart:end] + if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { + scan.setError(errSyntax) + end = extStart + continue + } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { + scan.b = scan.b[:end] + return end + } else if ext == 'x' { + private = extension + break + } + exts = append(exts, extension) + } + sort.Sort(bytesSort(exts)) + if len(private) > 0 { + exts = append(exts, private) + } + scan.b = scan.b[:start] + if len(exts) > 0 { + scan.b = append(scan.b, bytes.Join(exts, separator)...) + } else if start > 0 { + // Strip trailing '-'. + scan.b = scan.b[:start-1] + } + return end +} + +// parseExtension parses a single extension and returns the position of +// the extension end. +func parseExtension(scan *scanner) int { + start, end := scan.start, scan.end + switch scan.token[0] { + case 'u': + attrStart := end + scan.scan() + for last := []byte{}; len(scan.token) > 2; scan.scan() { + if bytes.Compare(scan.token, last) != -1 { + // Attributes are unsorted. Start over from scratch. + p := attrStart + 1 + scan.next = p + attrs := [][]byte{} + for scan.scan(); len(scan.token) > 2; scan.scan() { + attrs = append(attrs, scan.token) + end = scan.end + } + sort.Sort(bytesSort(attrs)) + copy(scan.b[p:], bytes.Join(attrs, separator)) + break + } + last = scan.token + end = scan.end + } + var last, key []byte + for attrEnd := end; len(scan.token) == 2; last = key { + key = scan.token + keyEnd := scan.end + end = scan.acceptMinSize(3) + // TODO: check key value validity + if keyEnd == end || bytes.Compare(key, last) != 1 { + // We have an invalid key or the keys are not sorted. + // Start scanning keys from scratch and reorder. + p := attrEnd + 1 + scan.next = p + keys := [][]byte{} + for scan.scan(); len(scan.token) == 2; { + keyStart, keyEnd := scan.start, scan.end + end = scan.acceptMinSize(3) + if keyEnd != end { + keys = append(keys, scan.b[keyStart:end]) + } else { + scan.setError(errSyntax) + end = keyStart + } + } + sort.Sort(bytesSort(keys)) + reordered := bytes.Join(keys, separator) + if e := p + len(reordered); e < end { + scan.deleteRange(e, end) + end = e + } + copy(scan.b[p:], bytes.Join(keys, separator)) + break + } + } + case 't': + scan.scan() + if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { + _, end = parseTag(scan) + scan.toLower(start, end) + } + for len(scan.token) == 2 && !isAlpha(scan.token[1]) { + end = scan.acceptMinSize(3) + } + case 'x': + end = scan.acceptMinSize(1) + default: + end = scan.acceptMinSize(2) + } + return end +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. A Tag overwrites all former values and typically +// only makes sense as the first argument. The resulting tag is returned after +// canonicalizing using the Default CanonType. If one or more errors are +// encountered, one of the errors is returned. +func Compose(part ...interface{}) (t Tag, err error) { + return Default.Compose(part...) +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. A Tag overwrites all former values and typically +// only makes sense as the first argument. The resulting tag is returned after +// canonicalizing using CanonType c. If one or more errors are encountered, +// one of the errors is returned. +func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { + var b builder + if err = b.update(part...); err != nil { + return und, err + } + t, _ = b.tag.canonicalize(c) + + if len(b.ext) > 0 || len(b.variant) > 0 { + sort.Sort(sortVariant(b.variant)) + sort.Strings(b.ext) + if b.private != "" { + b.ext = append(b.ext, b.private) + } + n := maxCoreSize + tokenLen(b.variant...) + tokenLen(b.ext...) + buf := make([]byte, n) + p := t.genCoreBytes(buf) + t.pVariant = byte(p) + p += appendTokens(buf[p:], b.variant...) + t.pExt = uint16(p) + p += appendTokens(buf[p:], b.ext...) + t.str = string(buf[:p]) + } else if b.private != "" { + t.str = b.private + t.remakeString() + } + return +} + +type builder struct { + tag Tag + + private string // the x extension + ext []string + variant []string + + err error +} + +func (b *builder) addExt(e string) { + if e == "" { + } else if e[0] == 'x' { + b.private = e + } else { + b.ext = append(b.ext, e) + } +} + +var errInvalidArgument = errors.New("invalid Extension or Variant") + +func (b *builder) update(part ...interface{}) (err error) { + replace := func(l *[]string, s string, eq func(a, b string) bool) bool { + if s == "" { + b.err = errInvalidArgument + return true + } + for i, v := range *l { + if eq(v, s) { + (*l)[i] = s + return true + } + } + return false + } + for _, x := range part { + switch v := x.(type) { + case Tag: + b.tag.lang = v.lang + b.tag.region = v.region + b.tag.script = v.script + if v.str != "" { + b.variant = nil + for x, s := "", v.str[v.pVariant:v.pExt]; s != ""; { + x, s = nextToken(s) + b.variant = append(b.variant, x) + } + b.ext, b.private = nil, "" + for i, e := int(v.pExt), ""; i < len(v.str); { + i, e = getExtension(v.str, i) + b.addExt(e) + } + } + case Base: + b.tag.lang = v.langID + case Script: + b.tag.script = v.scriptID + case Region: + b.tag.region = v.regionID + case Variant: + if !replace(&b.variant, v.variant, func(a, b string) bool { return a == b }) { + b.variant = append(b.variant, v.variant) + } + case Extension: + if !replace(&b.ext, v.s, func(a, b string) bool { return a[0] == b[0] }) { + b.addExt(v.s) + } + case []Variant: + b.variant = nil + for _, x := range v { + b.update(x) + } + case []Extension: + b.ext, b.private = nil, "" + for _, e := range v { + b.update(e) + } + // TODO: support parsing of raw strings based on morphology or just extensions? + case error: + err = v + } + } + return +} + +func tokenLen(token ...string) (n int) { + for _, t := range token { + n += len(t) + 1 + } + return +} + +func appendTokens(b []byte, token ...string) int { + p := 0 + for _, t := range token { + b[p] = '-' + copy(b[p+1:], t) + p += 1 + len(t) + } + return p +} + +type sortVariant []string + +func (s sortVariant) Len() int { + return len(s) +} + +func (s sortVariant) Swap(i, j int) { + s[j], s[i] = s[i], s[j] +} + +func (s sortVariant) Less(i, j int) bool { + return variantIndex[s[i]] < variantIndex[s[j]] +} + +func findExt(list []string, x byte) int { + for i, e := range list { + if e[0] == x { + return i + } + } + return -1 +} + +// getExtension returns the name, body and end position of the extension. +func getExtension(s string, p int) (end int, ext string) { + if s[p] == '-' { + p++ + } + if s[p] == 'x' { + return len(s), s[p:] + } + end = nextExtension(s, p) + return end, s[p:end] +} + +// nextExtension finds the next extension within the string, searching +// for the -<char>- pattern from position p. +// In the fast majority of cases, language tags will have at most +// one extension and extensions tend to be small. +func nextExtension(s string, p int) int { + for n := len(s) - 3; p < n; { + if s[p] == '-' { + if s[p+2] == '-' { + return p + } + p += 3 + } else { + p++ + } + } + return len(s) +} + +var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") + +// ParseAcceptLanguage parses the contents of a Accept-Language header as +// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and +// a list of corresponding quality weights. It is more permissive than RFC 2616 +// and may return non-nil slices even if the input is not valid. +// The Tags will be sorted by highest weight first and then by first occurrence. +// Tags with a weight of zero will be dropped. An error will be returned if the +// input could not be parsed. +func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { + var entry string + for s != "" { + if entry, s = split(s, ','); entry == "" { + continue + } + + entry, weight := split(entry, ';') + + // Scan the language. + t, err := Parse(entry) + if err != nil { + id, ok := acceptFallback[entry] + if !ok { + return nil, nil, err + } + t = Tag{lang: id} + } + + // Scan the optional weight. + w := 1.0 + if weight != "" { + weight = consume(weight, 'q') + weight = consume(weight, '=') + // consume returns the empty string when a token could not be + // consumed, resulting in an error for ParseFloat. + if w, err = strconv.ParseFloat(weight, 32); err != nil { + return nil, nil, errInvalidWeight + } + // Drop tags with a quality weight of 0. + if w <= 0 { + continue + } + } + + tag = append(tag, t) + q = append(q, float32(w)) + } + sortStable(&tagSort{tag, q}) + return tag, q, nil +} + +// consume removes a leading token c from s and returns the result or the empty +// string if there is no such token. +func consume(s string, c byte) string { + if s == "" || s[0] != c { + return "" + } + return strings.TrimSpace(s[1:]) +} + +func split(s string, c byte) (head, tail string) { + if i := strings.IndexByte(s, c); i >= 0 { + return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:]) + } + return strings.TrimSpace(s), "" +} + +// Add hack mapping to deal with a small number of cases that that occur +// in Accept-Language (with reasonable frequency). +var acceptFallback = map[string]langID{ + "english": _en, + "deutsch": _de, + "italian": _it, + "french": _fr, + "*": _mul, // defined in the spec to match all languages. +} + +type tagSort struct { + tag []Tag + q []float32 +} + +func (s *tagSort) Len() int { + return len(s.q) +} + +func (s *tagSort) Less(i, j int) bool { + return s.q[i] > s.q[j] +} + +func (s *tagSort) Swap(i, j int) { + s.tag[i], s.tag[j] = s.tag[j], s.tag[i] + s.q[i], s.q[j] = s.q[j], s.q[i] +} diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go new file mode 100644 index 000000000..5de0f856a --- /dev/null +++ b/vendor/golang.org/x/text/language/tables.go @@ -0,0 +1,2791 @@ +// This file was generated by go generate; DO NOT EDIT + +package language + +import "golang.org/x/text/internal/tag" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "29" + +const numLanguages = 8654 + +const numScripts = 230 + +const numRegions = 354 + +type fromTo struct { + from uint16 + to uint16 +} + +const nonCanonicalUnd = 649 +const ( + _af = 10 + _am = 17 + _ar = 21 + _az = 36 + _bg = 56 + _bn = 75 + _ca = 97 + _cs = 121 + _da = 128 + _de = 133 + _el = 154 + _en = 155 + _es = 157 + _et = 159 + _fa = 164 + _fi = 168 + _fil = 170 + _fr = 175 + _gu = 211 + _he = 224 + _hi = 225 + _hr = 238 + _hu = 242 + _hy = 243 + _id = 248 + _is = 258 + _it = 259 + _ja = 263 + _ka = 273 + _kk = 303 + _km = 307 + _kn = 309 + _ko = 310 + _ky = 333 + _lo = 357 + _lt = 361 + _lv = 368 + _mk = 396 + _ml = 397 + _mn = 399 + _mo = 402 + _mr = 406 + _ms = 410 + _mul = 414 + _my = 421 + _nb = 431 + _ne = 436 + _nl = 445 + _no = 449 + _pa = 471 + _pl = 487 + _pt = 495 + _ro = 515 + _ru = 519 + _sh = 549 + _si = 552 + _sk = 554 + _sl = 556 + _sq = 570 + _sr = 571 + _sv = 583 + _sw = 584 + _ta = 593 + _te = 600 + _th = 605 + _tl = 616 + _tn = 619 + _tr = 623 + _uk = 646 + _ur = 652 + _uz = 653 + _vi = 658 + _zh = 708 + _zu = 710 + _jbo = 265 + _ami = 1033 + _bnn = 1740 + _hak = 221 + _tlh = 13850 + _lb = 340 + _nv = 458 + _pwn = 11438 + _tao = 13571 + _tay = 13581 + _tsu = 14045 + _nn = 447 + _sfb = 13012 + _vgt = 15084 + _sgg = 13043 + _cmn = 2390 + _nan = 428 + _hsn = 240 +) + +const langPrivateStart = 0x2d09 + +const langPrivateEnd = 0x2f10 + +// lang holds an alphabetically sorted list of ISO-639 language identifiers. +// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +// For 2-byte language identifiers, the two successive bytes have the following meaning: +// - if the first letter of the 2- and 3-letter ISO codes are the same: +// the second and third letter of the 3-letter ISO code. +// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// For 3-byte language identifiers the 4th byte is 0. +var lang tag.Index = "" + // Size: 2856 bytes + "---\x00aaarabbkabr\x00ace\x00ach\x00ada\x00ady\x00aeveaeb\x00affragq\x00" + + "aho\x00akkaakk\x00aln\x00alt\x00ammhamo\x00anrgaoz\x00arraarc\x00arn\x00" + + "aro\x00arq\x00ary\x00arz\x00assmasa\x00ase\x00ast\x00atj\x00avvaawa\x00a" + + "yymazzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bax\x00bbc\x00bbj\x00bci" + + "\x00beelbej\x00bem\x00bew\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc" + + "\x00bgn\x00bgx\x00bhihbhb\x00bhi\x00bhk\x00bho\x00biisbik\x00bin\x00bjj" + + "\x00bjn\x00bkm\x00bku\x00blt\x00bmambmq\x00bnenboodbpy\x00bqi\x00bqv\x00" + + "brrebra\x00brh\x00brx\x00bsosbsq\x00bss\x00bto\x00btv\x00bua\x00buc\x00b" + + "ug\x00bum\x00bvb\x00byn\x00byv\x00bze\x00caatcch\x00ccp\x00ceheceb\x00cg" + + "g\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00ckb\x00cooscop" + + "\x00cps\x00crrecrj\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd" + + "\x00cuhucvhvcyymdaandak\x00dar\x00dav\x00dcc\x00deeuden\x00dgr\x00dje" + + "\x00dnj\x00doi\x00dsb\x00dtm\x00dtp\x00dty\x00dua\x00dvivdyo\x00dyu\x00d" + + "zzoebu\x00eeweefi\x00egl\x00egy\x00eky\x00elllenngeopoes\x00\x05esu\x00e" + + "tstett\x00euusewo\x00ext\x00faasfan\x00ffulffm\x00fiinfia\x00fil\x00fit" + + "\x00fjijfoaofon\x00frrafrc\x00frp\x00frr\x00frs\x00fud\x00fuq\x00fur\x00" + + "fuv\x00fvr\x00fyrygalegaa\x00gag\x00gan\x00gay\x00gbm\x00gbz\x00gcr\x00g" + + "dlagez\x00ggn\x00gil\x00gjk\x00gju\x00gllgglk\x00gnrngom\x00gon\x00gor" + + "\x00gos\x00got\x00grc\x00grt\x00gsw\x00guujgub\x00guc\x00gur\x00guw\x00g" + + "uz\x00gvlvgvr\x00gwi\x00haauhak\x00haw\x00haz\x00heebhiinhif\x00hil\x00h" + + "lu\x00hmd\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homohoc\x00hoj\x00hrrvhs" + + "b\x00hsn\x00htathuunhyyehzerianaiba\x00ibb\x00idndieleigboiiiiikpkikt" + + "\x00ilo\x00inndinh\x00iodoisslittaiukuiw\x00\x03izh\x00japnjam\x00jbo" + + "\x00jgo\x00ji\x00\x06jmc\x00jml\x00jut\x00jvavjwavkaatkaa\x00kab\x00kac" + + "\x00kaj\x00kam\x00kao\x00kbd\x00kcg\x00kck\x00kde\x00kdt\x00kea\x00ken" + + "\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgp\x00kha\x00khb\x00khn\x00khq\x00k" + + "ht\x00khw\x00kiikkiu\x00kjuakjg\x00kkazkkj\x00klalkln\x00kmhmkmb\x00knan" + + "koorkoi\x00kok\x00kos\x00kpe\x00kraukrc\x00kri\x00krj\x00krl\x00kru\x00k" + + "sasksb\x00ksf\x00ksh\x00kuurkum\x00kvomkvr\x00kvx\x00kw\x00\x01kxm\x00kx" + + "p\x00kyirlaatlab\x00lad\x00lag\x00lah\x00laj\x00lbtzlbe\x00lbw\x00lcp" + + "\x00lep\x00lez\x00lgugliimlif\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lmn" + + "\x00lmo\x00lninloaolol\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy" + + "\x00luz\x00lvavlwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00m" + + "an\x00mas\x00maz\x00mdf\x00mdh\x00mdr\x00men\x00mer\x00mfa\x00mfe\x00mgl" + + "gmgh\x00mgo\x00mgp\x00mgy\x00mhahmirimin\x00mis\x00mkkdmlalmls\x00mnonmn" + + "i\x00mnw\x00moolmoe\x00moh\x00mos\x00mrarmrd\x00mrj\x00mro\x00mssamtltmt" + + "r\x00mua\x00mul\x00mus\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00myyamyv\x00" + + "myx\x00myz\x00mzn\x00naaunah\x00nan\x00nap\x00naq\x00nbobnch\x00nddendc" + + "\x00nds\x00neepnew\x00ngdongl\x00nhe\x00nhw\x00nij\x00niu\x00njo\x00nlld" + + "nmg\x00nnnonnh\x00noornod\x00noe\x00non\x00nqo\x00nrblnsk\x00nso\x00nus" + + "\x00nvavnxq\x00nyyanym\x00nyn\x00nzi\x00occiojjiomrmorriosssosa\x00otk" + + "\x00paanpag\x00pal\x00pam\x00pap\x00pau\x00pcd\x00pcm\x00pdc\x00pdt\x00p" + + "eo\x00pfl\x00phn\x00pilipka\x00pko\x00plolpms\x00pnt\x00pon\x00pra\x00pr" + + "d\x00prg\x00psusptorpuu\x00quuequc\x00qug\x00raj\x00rcf\x00rej\x00rgn" + + "\x00ria\x00rif\x00rjs\x00rkt\x00rmohrmf\x00rmo\x00rmt\x00rmu\x00rnunrng" + + "\x00roonrob\x00rof\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00ryu\x00s" + + "aansaf\x00sah\x00saq\x00sas\x00sat\x00saz\x00sbp\x00scrdsck\x00scn\x00sc" + + "o\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00sei\x00ses\x00sgagsga" + + "\x00sgs\x00sh\x00\x02shi\x00shn\x00siinsid\x00sklkskr\x00sllvsli\x00sly" + + "\x00smmosma\x00smi\x00smj\x00smn\x00smp\x00sms\x00snnasnk\x00soomsou\x00" + + "sqqisrrpsrb\x00srn\x00srr\x00srx\x00ssswssy\x00stotstq\x00suunsuk\x00sus" + + "\x00svweswwaswb\x00swc\x00swg\x00swv\x00sxn\x00syl\x00syr\x00szl\x00taam" + + "taj\x00tbw\x00tcy\x00tdd\x00tdg\x00tdh\x00teeltem\x00teo\x00tet\x00tggkt" + + "hhathl\x00thq\x00thr\x00tiirtig\x00tiv\x00tkuktkl\x00tkr\x00tkt\x00tlglt" + + "ly\x00tmh\x00tnsntoontog\x00tpi\x00trurtru\x00trv\x00tssotsd\x00tsf\x00t" + + "sg\x00tsj\x00ttatttj\x00tts\x00ttt\x00tum\x00tvl\x00twwitwq\x00txg\x00ty" + + "ahtyv\x00tzm\x00udm\x00ugiguga\x00ukkruli\x00umb\x00und\x00unr\x00unx" + + "\x00urrduzzbvai\x00veenvec\x00vep\x00viievic\x00vls\x00vmf\x00vmw\x00voo" + + "lvot\x00vro\x00vun\x00walnwae\x00wal\x00war\x00wbp\x00wbq\x00wbr\x00wls" + + "\x00wni\x00woolwtm\x00wuu\x00xav\x00xcr\x00xhhoxlc\x00xld\x00xmf\x00xmn" + + "\x00xmr\x00xna\x00xnr\x00xog\x00xpr\x00xsa\x00xsr\x00yao\x00yap\x00yav" + + "\x00ybb\x00yiidyooryrl\x00yua\x00yue\x00zahazag\x00zbl\x00zdj\x00zea\x00" + + "zgh\x00zhhozmi\x00zuulzxx\x00zza\x00\xff\xff\xff\xff" + +const langNoIndexOffset = 713 + +// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +// in lookup tables. The language ids for these language codes are derived directly +// from the letters and are not consecutive. +// Size: 2197 bytes, 2197 elements +var langNoIndex = [2197]uint8{ + // Entry 0 - 3F + 0xff, 0xfd, 0xfd, 0xfe, 0xef, 0xf7, 0xbf, 0xd2, + 0xfb, 0xbf, 0xfe, 0xfa, 0xb7, 0x1d, 0x3c, 0x57, + 0x6f, 0x97, 0x73, 0xf8, 0xff, 0xef, 0xff, 0x70, + 0xaf, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x85, 0x62, + 0xe9, 0xbf, 0xfd, 0xff, 0xff, 0xf7, 0xfd, 0x77, + 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, + 0xc9, 0xff, 0xff, 0xff, 0x4d, 0xb8, 0x0a, 0x6a, + 0x7e, 0xfa, 0xe3, 0xfe, 0x7e, 0xff, 0x77, 0xff, + // Entry 40 - 7F + 0xff, 0xff, 0xff, 0xdf, 0x2b, 0xf4, 0xf1, 0xe0, + 0x5d, 0xe7, 0x9f, 0x14, 0x07, 0x20, 0xdf, 0xed, + 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0xf7, + 0x7e, 0xff, 0xff, 0xff, 0xfe, 0x7f, 0xff, 0xff, + 0xff, 0xff, 0x5f, 0xfc, 0xdb, 0xfd, 0xbf, 0xb5, + 0x7b, 0xdf, 0x7f, 0xf7, 0xeb, 0xfe, 0xff, 0xa7, + 0xbd, 0xff, 0x7f, 0xf7, 0xff, 0xef, 0xef, 0xef, + 0xff, 0xff, 0x9f, 0xff, 0xff, 0xef, 0xff, 0xdf, + // Entry 80 - BF + 0xff, 0xff, 0xf3, 0xff, 0xfb, 0x2f, 0xff, 0xff, + 0xfb, 0xee, 0xff, 0xbd, 0xdb, 0xff, 0xdf, 0xf7, + 0xff, 0xfa, 0xfd, 0xff, 0x7e, 0xaf, 0x7b, 0xfe, + 0x7f, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xdf, 0xff, + 0xff, 0xdf, 0xfb, 0xff, 0xfd, 0xfc, 0xfb, 0xff, + 0xff, 0xff, 0xff, 0xf7, 0x7f, 0xbf, 0xfd, 0xd5, + 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c, + 0x08, 0x24, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80, + // Entry C0 - FF + 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96, + 0x9b, 0x14, 0x88, 0xf6, 0x7b, 0xe7, 0x17, 0x56, + 0x55, 0x7d, 0x0e, 0x1c, 0x37, 0x71, 0xf3, 0xef, + 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, + 0xbc, 0x87, 0xaf, 0xdf, 0xff, 0xf7, 0x73, 0x35, + 0x3e, 0x87, 0xc7, 0xdf, 0xff, 0x00, 0x81, 0x00, + 0xb0, 0x05, 0x80, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x40, 0x00, 0x40, 0x92, 0x21, 0xd0, 0xbf, 0x5d, + // Entry 100 - 13F + 0xfd, 0xde, 0xfe, 0x5e, 0x00, 0x00, 0x02, 0x64, + 0x8d, 0x19, 0xc1, 0xdf, 0x79, 0x22, 0x00, 0x00, + 0x00, 0xdf, 0x6d, 0xdc, 0x26, 0xe5, 0xd9, 0xf3, + 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x01, 0x0c, + 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc5, 0x67, 0x5f, + 0x56, 0x89, 0x5e, 0xb7, 0xec, 0xef, 0x03, 0x00, + 0x02, 0x00, 0x00, 0x00, 0xc0, 0x77, 0xda, 0x57, + 0x90, 0x69, 0x01, 0x2c, 0x96, 0x79, 0xe0, 0xff, + // Entry 140 - 17F + 0xff, 0x7f, 0x00, 0x00, 0x00, 0x01, 0x08, 0x56, + 0x01, 0x00, 0x00, 0xb0, 0x14, 0x03, 0x50, 0x16, + 0x0a, 0x00, 0x01, 0x00, 0x00, 0x00, 0x11, 0x09, + 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x04, + 0x08, 0x00, 0x00, 0x04, 0x00, 0x80, 0x28, 0x04, + 0x00, 0x00, 0x50, 0xd5, 0x2d, 0x00, 0x64, 0x35, + 0x24, 0x53, 0xf5, 0xd4, 0xbd, 0xe2, 0xcd, 0x03, + // Entry 180 - 1BF + 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x17, 0x39, 0x01, 0xdd, 0x57, 0x98, + 0x21, 0x98, 0xa5, 0x00, 0x00, 0x01, 0x40, 0x82, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0xb0, 0xfe, + 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x01, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x20, 0x04, 0xa6, 0x08, 0x04, 0x00, 0x08, + 0x81, 0x50, 0x00, 0x00, 0x08, 0x11, 0x86, 0x40, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x06, 0x55, + 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x60, + 0x3b, 0x83, 0x11, 0x00, 0x00, 0x00, 0x11, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xbe, 0xdf, 0xff, 0xfe, 0xbf, + // Entry 200 - 23F + 0xdf, 0xc7, 0x83, 0x82, 0xc0, 0xff, 0xdf, 0x27, + 0xcf, 0x5f, 0xe7, 0x01, 0x10, 0x20, 0xb2, 0xc5, + 0xa4, 0x45, 0x25, 0x9b, 0x03, 0xcf, 0xf0, 0xdf, + 0x03, 0xc4, 0x08, 0x10, 0x01, 0x0e, 0x01, 0xe3, + 0x92, 0x54, 0xdb, 0x38, 0xf1, 0x7f, 0xf7, 0x6d, + 0xf9, 0xff, 0x1c, 0x7d, 0x04, 0x08, 0x00, 0x01, + 0x21, 0x12, 0x6c, 0x5f, 0xdd, 0x0f, 0x85, 0x4f, + 0x40, 0x40, 0x00, 0x04, 0xf9, 0xfd, 0xbd, 0xd4, + // Entry 240 - 27F + 0xe8, 0x13, 0xf4, 0x27, 0xa3, 0x0d, 0x00, 0x00, + 0x20, 0x7b, 0x39, 0x02, 0x05, 0x84, 0x00, 0xf0, + 0xbf, 0x7f, 0xda, 0x00, 0x18, 0x04, 0x81, 0x00, + 0x00, 0x00, 0x80, 0x10, 0x94, 0x1c, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, + 0x08, 0xb4, 0x7c, 0xa5, 0x0c, 0x40, 0x00, 0x00, + 0x11, 0x04, 0x04, 0x6c, 0x00, 0x20, 0x70, 0xff, + 0xfb, 0x7f, 0x60, 0x00, 0x05, 0x9b, 0xdd, 0x6e, + // Entry 280 - 2BF + 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, + 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51, + 0xe2, 0xff, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x08, 0x00, 0x00, 0x00, 0x00, 0xa1, 0x02, 0x60, + 0xe5, 0x48, 0x14, 0x89, 0x20, 0xc0, 0x47, 0x80, + 0x07, 0x00, 0x00, 0x00, 0xcc, 0x50, 0x40, 0x24, + 0x85, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20, + // Entry 2C0 - 2FF + 0x02, 0x50, 0x88, 0x11, 0x00, 0xd1, 0x6c, 0xee, + 0x50, 0x27, 0x1d, 0x11, 0x69, 0x06, 0x59, 0xe9, + 0x33, 0x08, 0x00, 0x20, 0x05, 0x40, 0x10, 0x00, + 0x00, 0x00, 0x50, 0x44, 0x96, 0x49, 0xd6, 0x5d, + 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, + 0x08, 0x00, 0x80, 0x00, 0x40, 0x45, 0x00, 0x01, + 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x04, 0x08, + 0xf8, 0xeb, 0xf6, 0x39, 0xc4, 0x89, 0x16, 0x00, + // Entry 300 - 33F + 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa2, + 0x01, 0x00, 0x00, 0x00, 0x12, 0x04, 0x00, 0x00, + 0x04, 0x10, 0xf0, 0x9d, 0x95, 0x13, 0x04, 0x80, + 0x00, 0x01, 0xd0, 0x12, 0x40, 0x00, 0x10, 0xb0, + 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, + 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0xc0, + 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00, + // Entry 340 - 37F + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, + 0x00, 0x10, 0x00, 0x00, 0x00, 0xf8, 0x85, 0xe3, + 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0x7f, 0xfb, + 0xff, 0xfc, 0xfe, 0xdf, 0xff, 0xff, 0xff, 0xf6, + 0xfb, 0xfe, 0xf7, 0x1f, 0xff, 0xb3, 0xed, 0xff, + 0xdb, 0xed, 0xff, 0xfe, 0xff, 0xfe, 0xdf, 0xff, + 0xff, 0xff, 0xf7, 0xff, 0xfd, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xdf, 0xaf, 0x9c, 0xff, 0xfb, 0xff, + // Entry 380 - 3BF + 0xff, 0xff, 0xff, 0xff, 0xef, 0xd2, 0xbb, 0xdf, + 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xef, + 0xfd, 0xff, 0xff, 0xf7, 0xfd, 0xff, 0xff, 0xff, + 0xef, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0x5f, 0xd3, 0x7b, 0xfd, 0xd9, 0xdf, 0xef, + 0xbc, 0x18, 0x05, 0x2c, 0xff, 0x07, 0xf0, 0xff, + 0xf7, 0x5f, 0x00, 0x08, 0x00, 0xc3, 0x3d, 0x1b, + 0x06, 0xe6, 0x72, 0xf0, 0xdd, 0x3c, 0x7f, 0x44, + // Entry 3C0 - 3FF + 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xfd, 0xff, 0x57, + 0xf2, 0xff, 0x39, 0xff, 0xf2, 0x1e, 0x95, 0xf7, + 0xf7, 0xff, 0x45, 0x80, 0x01, 0x02, 0x00, 0x00, + 0x40, 0x54, 0x9f, 0x8a, 0xd9, 0xd9, 0x0e, 0x11, + 0x84, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x00, 0x01, + 0x05, 0xd1, 0x50, 0x58, 0x00, 0x00, 0x00, 0x10, + 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, + 0xf9, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + // Entry 400 - 43F + 0xd7, 0x6f, 0xff, 0xff, 0xdf, 0x7d, 0xbb, 0xff, + 0xff, 0xff, 0xf7, 0xf3, 0xef, 0xff, 0xff, 0xf7, + 0xff, 0xdf, 0xdb, 0x7f, 0xff, 0xff, 0x7f, 0xff, + 0xff, 0xff, 0xef, 0xff, 0xbc, 0xff, 0xff, 0xfb, + 0xff, 0xfb, 0xff, 0xde, 0x76, 0xbd, 0xff, 0xf7, + 0xff, 0xff, 0xf7, 0xff, 0xff, 0xdf, 0xf3, 0xfe, + 0xef, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x7f, 0xde, + 0xf7, 0xbb, 0xef, 0xf7, 0xff, 0xfb, 0xbf, 0xdf, + // Entry 440 - 47F + 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0x5f, 0x7d, + 0x7f, 0xff, 0xff, 0xf7, 0xe5, 0xfc, 0xff, 0xfd, + 0x7f, 0x7f, 0xff, 0x9e, 0xae, 0xff, 0xee, 0xff, + 0x7f, 0xf7, 0x7b, 0x02, 0x82, 0x04, 0xff, 0xf7, + 0xff, 0xbf, 0xd7, 0xef, 0xfe, 0xdf, 0xf7, 0xfe, + 0xe2, 0x8e, 0xe7, 0xff, 0xf7, 0xff, 0x56, 0xbd, + 0xcd, 0xff, 0xfb, 0xff, 0xff, 0xdf, 0xef, 0xff, + 0xe5, 0xdf, 0x7d, 0x0f, 0xa7, 0x51, 0x04, 0x44, + // Entry 480 - 4BF + 0x13, 0xd0, 0x5d, 0xaf, 0xa6, 0xfd, 0xb9, 0xff, + 0x63, 0x5d, 0x5b, 0xff, 0xff, 0xbf, 0x3f, 0x20, + 0x14, 0x00, 0x57, 0x51, 0x82, 0x65, 0xf5, 0x49, + 0xe2, 0xff, 0xfc, 0xdf, 0x00, 0x05, 0xc5, 0x05, + 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x04, + 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x51, 0x60, 0x05, 0x04, 0x01, 0x00, 0x00, + 0x06, 0x01, 0x20, 0x00, 0x18, 0x01, 0x92, 0xb1, + // Entry 4C0 - 4FF + 0xfd, 0x67, 0x4b, 0x06, 0x95, 0x06, 0x57, 0xed, + 0xfb, 0x4c, 0x9d, 0x7b, 0x83, 0x04, 0x62, 0x40, + 0x00, 0x15, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, + 0xf9, 0x4f, 0x10, 0x8c, 0xc9, 0x46, 0xde, 0xf7, + 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, + 0x01, 0x40, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x7d, + 0xba, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41, + // Entry 500 - 53F + 0xb0, 0xff, 0x79, 0x7a, 0x04, 0x00, 0x00, 0x49, + 0x2d, 0x14, 0x27, 0x77, 0xed, 0xf1, 0xbf, 0xef, + 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xfc, + 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xf5, 0xff, + 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7f, 0x10, + 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9, + 0x5f, 0x05, 0x86, 0xef, 0xf5, 0x77, 0xbd, 0x3c, + 0x00, 0x00, 0x00, 0x43, 0x71, 0x42, 0x00, 0x40, + // Entry 540 - 57F + 0x00, 0x00, 0x01, 0x43, 0x19, 0x00, 0x08, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // Entry 580 - 5BF + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d, + 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0xde, 0xff, 0xbf, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x30, 0x95, 0xe3, 0x10, 0x00, 0x00, 0x00, + 0x11, 0x04, 0x16, 0x00, 0x01, 0x02, 0x00, 0x81, + 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40, + // Entry 5C0 - 5FF + 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0x7e, 0x02, + 0xaa, 0x10, 0x5d, 0xd8, 0x52, 0x00, 0x80, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x02, 0x02, + 0x19, 0x00, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, + 0x31, 0x00, 0x00, 0x00, 0x01, 0x50, 0x02, 0x20, + 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, + 0x00, 0x1f, 0xdf, 0xf2, 0xfd, 0xff, 0xfd, 0x3f, + 0x9f, 0x18, 0xcf, 0x9c, 0xbf, 0xaf, 0x5f, 0xfe, + // Entry 600 - 63F + 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xfd, + 0xb7, 0xf7, 0xff, 0xf3, 0xdf, 0xff, 0x6f, 0xf1, + 0x7b, 0xf1, 0x7f, 0xdf, 0x7f, 0xbf, 0xfe, 0xb7, + 0xee, 0x1c, 0xfb, 0xdb, 0xef, 0xdf, 0xff, 0xfd, + 0x7e, 0xbe, 0x57, 0xff, 0x6f, 0x81, 0x76, 0x1f, + 0xd4, 0x77, 0xf5, 0xfd, 0xff, 0xff, 0xeb, 0xfe, + 0xbf, 0x5f, 0x57, 0x1b, 0xeb, 0x5f, 0x50, 0x18, + 0x02, 0xfa, 0xff, 0x9d, 0x15, 0x97, 0x15, 0x0f, + // Entry 640 - 67F + 0x75, 0xc4, 0x7d, 0x81, 0x82, 0xf1, 0xd7, 0x7e, + 0xff, 0xff, 0xff, 0xef, 0xff, 0xfd, 0xdd, 0xde, + 0xfc, 0xfd, 0xf6, 0x5f, 0x7a, 0x1f, 0x40, 0x98, + 0x02, 0xff, 0xe3, 0xff, 0xf3, 0xd6, 0xf2, 0xff, + 0xfb, 0xdf, 0x7d, 0x50, 0x1e, 0x15, 0x7b, 0xb4, + 0xf5, 0xbe, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xf7, + 0x7f, 0xff, 0xff, 0xbe, 0xdb, 0xf7, 0xd7, 0xf9, + 0xef, 0x2f, 0x80, 0xbf, 0xc5, 0xff, 0xff, 0xf3, + // Entry 680 - 6BF + 0x97, 0x9d, 0xff, 0xff, 0xf7, 0xcf, 0xfd, 0xbf, + 0xde, 0x7f, 0x06, 0x1d, 0x57, 0xff, 0xf8, 0xda, + 0x5d, 0xce, 0x7d, 0x16, 0xb9, 0xea, 0x69, 0xa0, + 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x48, + 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x01, 0x06, + 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, + 0x04, 0x00, 0x10, 0x8c, 0x58, 0xd5, 0x0d, 0x0f, + // Entry 6C0 - 6FF + 0x14, 0x4d, 0xf1, 0x16, 0x44, 0xd1, 0x42, 0x08, + 0x40, 0x00, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, + 0x00, 0xdc, 0xff, 0xeb, 0x1f, 0x58, 0x08, 0x41, + 0x04, 0xa0, 0x04, 0x00, 0x30, 0x12, 0x40, 0x22, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xaf, + 0x6f, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00, + // Entry 700 - 73F + 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x80, 0x86, 0xc2, 0x02, 0x00, 0x00, 0x00, 0x01, + 0xdf, 0x18, 0x00, 0x00, 0x02, 0xf0, 0xfd, 0x79, + 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, + 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x81, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 740 - 77F + 0x00, 0x00, 0x00, 0xef, 0xf7, 0xfd, 0xcf, 0x7e, + 0xa0, 0x11, 0x10, 0x00, 0x00, 0x92, 0x01, 0x44, + 0xcd, 0xf9, 0x5e, 0x00, 0x01, 0x00, 0x30, 0x14, + 0x04, 0x55, 0x10, 0x01, 0x04, 0xf6, 0x3f, 0x7a, + 0x05, 0x04, 0x00, 0xb0, 0x80, 0x00, 0x55, 0x55, + 0x97, 0x7c, 0x9f, 0x71, 0xcc, 0x78, 0xd1, 0x43, + 0xf5, 0x57, 0x67, 0x14, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2c, 0xf7, 0xdb, 0x1f, 0x54, 0x60, + // Entry 780 - 7BF + 0x03, 0x68, 0x01, 0x10, 0x8b, 0x38, 0xaa, 0x01, + 0x00, 0x00, 0x30, 0x00, 0x24, 0x44, 0x00, 0x00, + 0x10, 0x03, 0x11, 0x02, 0x01, 0x00, 0x00, 0xf0, + 0xf5, 0xff, 0xd5, 0xd7, 0xbc, 0x70, 0xd6, 0x78, + 0x78, 0x15, 0x50, 0x00, 0xa4, 0x84, 0xe9, 0x41, + 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x00, + 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, + 0xff, 0xef, 0xff, 0x4f, 0x85, 0x53, 0xf4, 0xed, + // Entry 7C0 - 7FF + 0xdd, 0xbf, 0x72, 0x19, 0xc7, 0x0c, 0xf5, 0x42, + 0x54, 0xdd, 0x77, 0x14, 0x00, 0x80, 0xc0, 0x56, + 0xcc, 0x16, 0x9e, 0xfb, 0x35, 0x7d, 0xef, 0xff, + 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x5d, + 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80, + 0x10, 0x20, 0x24, 0x00, 0xff, 0x3f, 0xdf, 0x67, + 0xfe, 0x01, 0x06, 0x88, 0x0a, 0x40, 0x16, 0x01, + 0x01, 0x15, 0x2b, 0x3e, 0x01, 0x00, 0x00, 0x10, + // Entry 800 - 83F + 0x90, 0x69, 0x45, 0x02, 0x02, 0x01, 0xe1, 0xbf, + 0xbf, 0x03, 0x00, 0x00, 0x10, 0xd4, 0xa7, 0xd1, + 0x54, 0x9e, 0x44, 0xdf, 0xfd, 0x8f, 0x66, 0xb3, + 0x55, 0x20, 0xd4, 0xc3, 0xd8, 0x30, 0x3d, 0x80, + 0x00, 0x00, 0x00, 0x4c, 0xd4, 0x11, 0xc5, 0x84, + 0x6e, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbf, 0xdb, + 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, + 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, + // Entry 840 - 87F + 0xf0, 0xfb, 0xfd, 0x3f, 0x05, 0x00, 0x12, 0x81, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, + 0x84, 0x00, 0x33, 0xc0, 0x23, 0x24, 0x00, 0x00, + 0x00, 0xcb, 0xe4, 0x3a, 0x42, 0xc8, 0x14, 0xf1, + 0xef, 0xff, 0x7f, 0x16, 0x01, 0x01, 0x84, 0x50, + 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, + 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, + // Entry 880 - 8BF + 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24, + 0x0a, 0x00, 0x80, 0x00, 0x00, +} + +// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +// to 2-letter language codes that cannot be derived using the method described above. +// Each 3-letter code is followed by its 1-byte langID. +var altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff" + +// altLangIndex is used to convert indexes in altLangISO3 to langIDs. +// Size: 12 bytes, 6 elements +var altLangIndex = [6]uint16{ + 0x014a, 0x0225, 0x0105, 0x020a, 0x009d, 0x010b, +} + +// langAliasMap maps langIDs to their suggested replacements. +// Size: 640 bytes, 160 elements +var langAliasMap = [160]fromTo{ + 0: {from: 0xc4, to: 0xda}, + 1: {from: 0xff, to: 0xf8}, + 2: {from: 0x105, to: 0xe0}, + 3: {from: 0x10b, to: 0x2b9}, + 4: {from: 0x110, to: 0x10f}, + 5: {from: 0x192, to: 0x203}, + 6: {from: 0x1af, to: 0x1c1}, + 7: {from: 0x225, to: 0x23b}, + 8: {from: 0x268, to: 0xaa}, + 9: {from: 0x274, to: 0x252}, + 10: {from: 0x27d, to: 0xd}, + 11: {from: 0x2d5, to: 0x2db}, + 12: {from: 0x326, to: 0x93}, + 13: {from: 0x3c7, to: 0x1c48}, + 14: {from: 0x3e8, to: 0x23a}, + 15: {from: 0x3f9, to: 0x23a}, + 16: {from: 0x484, to: 0x15}, + 17: {from: 0x48f, to: 0xf3}, + 18: {from: 0x4d5, to: 0x1f38}, + 19: {from: 0x54a, to: 0x23}, + 20: {from: 0x550, to: 0x2732}, + 21: {from: 0x55c, to: 0x24}, + 22: {from: 0x57d, to: 0xa1}, + 23: {from: 0x5a3, to: 0x26}, + 24: {from: 0x5ac, to: 0x42}, + 25: {from: 0x615, to: 0x5a7}, + 26: {from: 0x65a, to: 0xc7a}, + 27: {from: 0x786, to: 0x1a5}, + 28: {from: 0x7cd, to: 0x16e}, + 29: {from: 0x7d4, to: 0x59}, + 30: {from: 0x855, to: 0x30b9}, + 31: {from: 0x8cf, to: 0x2c4}, + 32: {from: 0x90c, to: 0x23f1}, + 33: {from: 0x915, to: 0x95a}, + 34: {from: 0x932, to: 0x24f}, + 35: {from: 0x953, to: 0x3fc0}, + 36: {from: 0x956, to: 0x2c4}, + 37: {from: 0x995, to: 0x2b3e}, + 38: {from: 0x9c5, to: 0x2f18}, + 39: {from: 0xa50, to: 0x73}, + 40: {from: 0xa9f, to: 0x79}, + 41: {from: 0xb5f, to: 0x8a}, + 42: {from: 0xb6e, to: 0x1a2}, + 43: {from: 0xb8f, to: 0xb92}, + 44: {from: 0xb95, to: 0x2c8}, + 45: {from: 0xc76, to: 0x1df1}, + 46: {from: 0xc85, to: 0x2c31}, + 47: {from: 0xcd0, to: 0x1bd}, + 48: {from: 0xe67, to: 0x9f}, + 49: {from: 0xe9b, to: 0x179}, + 50: {from: 0xf37, to: 0xfc}, + 51: {from: 0x1010, to: 0xd}, + 52: {from: 0x11bb, to: 0xaf}, + 53: {from: 0x1207, to: 0xa6}, + 54: {from: 0x12b6, to: 0xb32}, + 55: {from: 0x12ba, to: 0x1d2}, + 56: {from: 0x12c9, to: 0x145c}, + 57: {from: 0x1317, to: 0x111}, + 58: {from: 0x131a, to: 0x85}, + 59: {from: 0x133a, to: 0x3a46}, + 60: {from: 0x1401, to: 0xcc}, + 61: {from: 0x145f, to: 0x9a}, + 62: {from: 0x1497, to: 0x278f}, + 63: {from: 0x14af, to: 0xca}, + 64: {from: 0x14be, to: 0xcd6}, + 65: {from: 0x1511, to: 0x12bb}, + 66: {from: 0x15a0, to: 0x154d}, + 67: {from: 0x15ad, to: 0x168a}, + 68: {from: 0x1621, to: 0x23f}, + 69: {from: 0x1710, to: 0x1a98}, + 70: {from: 0x180b, to: 0x2947}, + 71: {from: 0x1821, to: 0x102}, + 72: {from: 0x18f1, to: 0x104}, + 73: {from: 0x191d, to: 0x12ac}, + 74: {from: 0x1dcf, to: 0x3548}, + 75: {from: 0x1dd4, to: 0x1e74}, + 76: {from: 0x1df1, to: 0x18f}, + 77: {from: 0x1e7a, to: 0x145}, + 78: {from: 0x1e85, to: 0x13b}, + 79: {from: 0x1e89, to: 0x122}, + 80: {from: 0x1e90, to: 0x138}, + 81: {from: 0x1ea6, to: 0x1f82}, + 82: {from: 0x1ecc, to: 0x147}, + 83: {from: 0x1f30, to: 0x8d}, + 84: {from: 0x1f65, to: 0x12f8}, + 85: {from: 0x1f7d, to: 0x4235}, + 86: {from: 0x1f8b, to: 0x371a}, + 87: {from: 0x1fc4, to: 0x8d}, + 88: {from: 0x1fce, to: 0x8d}, + 89: {from: 0x1ff9, to: 0x6c1}, + 90: {from: 0x20ad, to: 0x2fbd}, + 91: {from: 0x2119, to: 0x30fc}, + 92: {from: 0x2209, to: 0x170}, + 93: {from: 0x227b, to: 0x18c}, + 94: {from: 0x2287, to: 0x189}, + 95: {from: 0x2291, to: 0x19a}, + 96: {from: 0x22e7, to: 0x8f2}, + 97: {from: 0x2340, to: 0x69}, + 98: {from: 0x23d5, to: 0x179}, + 99: {from: 0x2460, to: 0x244b}, + 100: {from: 0x2490, to: 0x1f4}, + 101: {from: 0x24be, to: 0x3a46}, + 102: {from: 0x24fc, to: 0x244b}, + 103: {from: 0x2520, to: 0x40ef}, + 104: {from: 0x2686, to: 0x25ce}, + 105: {from: 0x26ab, to: 0x1b4}, + 106: {from: 0x271d, to: 0x2b3e}, + 107: {from: 0x28b1, to: 0x1d1}, + 108: {from: 0x2993, to: 0x1d3}, + 109: {from: 0x29d6, to: 0x3a46}, + 110: {from: 0x2a93, to: 0x1ee}, + 111: {from: 0x2aaa, to: 0x32e}, + 112: {from: 0x2ade, to: 0xa4}, + 113: {from: 0x2adf, to: 0xa4}, + 114: {from: 0x2b96, to: 0x183}, + 115: {from: 0x2b9f, to: 0x1763}, + 116: {from: 0x2bb1, to: 0x2b2c}, + 117: {from: 0x2bb8, to: 0x152}, + 118: {from: 0x2beb, to: 0x37}, + 119: {from: 0x2bfc, to: 0x2019}, + 120: {from: 0x2c37, to: 0x2c32}, + 121: {from: 0x2c86, to: 0x2c6e}, + 122: {from: 0x2f2a, to: 0x1f1}, + 123: {from: 0x30fd, to: 0x3125}, + 124: {from: 0x31c1, to: 0x203}, + 125: {from: 0x3285, to: 0x1667}, + 126: {from: 0x337d, to: 0x22a}, + 127: {from: 0x33ef, to: 0x132}, + 128: {from: 0x340d, to: 0x215}, + 129: {from: 0x3494, to: 0x248}, + 130: {from: 0x3557, to: 0x8d}, + 131: {from: 0x35ad, to: 0x3689}, + 132: {from: 0x35c2, to: 0x2a32}, + 133: {from: 0x35c6, to: 0x4c}, + 134: {from: 0x35c9, to: 0x2fbf}, + 135: {from: 0x3603, to: 0x373d}, + 136: {from: 0x3629, to: 0x3d57}, + 137: {from: 0x363c, to: 0x376e}, + 138: {from: 0x364b, to: 0x1d3b}, + 139: {from: 0x364c, to: 0x2c31}, + 140: {from: 0x36f3, to: 0x26a}, + 141: {from: 0x38e5, to: 0xb28}, + 142: {from: 0x390f, to: 0xe91}, + 143: {from: 0x3a30, to: 0x28d}, + 144: {from: 0x3d54, to: 0x7f}, + 145: {from: 0x3f9f, to: 0x828}, + 146: {from: 0x4055, to: 0x30a}, + 147: {from: 0x4090, to: 0x3cf7}, + 148: {from: 0x410f, to: 0x13a}, + 149: {from: 0x4162, to: 0x3462}, + 150: {from: 0x4164, to: 0x86}, + 151: {from: 0x4246, to: 0x30b9}, + 152: {from: 0x427a, to: 0x2b9}, + 153: {from: 0x4361, to: 0x21a0}, + 154: {from: 0x4374, to: 0x2473}, + 155: {from: 0x43a7, to: 0x4645}, + 156: {from: 0x4445, to: 0x4437}, + 157: {from: 0x44d5, to: 0x44dc}, + 158: {from: 0x46ad, to: 0x19a}, + 159: {from: 0x473e, to: 0x2be}, +} + +// Size: 160 bytes, 160 elements +var langAliasTypes = [160]langAliasType{ + // Entry 0 - 3F + 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 1, 2, 1, + 1, 2, 0, 1, 0, 1, 2, 1, 1, 0, 0, 2, 1, 1, 0, 2, + 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 1, 0, 0, 2, + 1, 1, 1, 1, 2, 1, 0, 1, 1, 2, 2, 0, 1, 2, 0, 1, + // Entry 40 - 7F + 0, 1, 1, 1, 1, 0, 0, 2, 1, 0, 0, 0, 1, 1, 1, 1, + 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 2, 2, 2, + 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, + 2, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 2, 0, 2, 1, + // Entry 80 - BF + 1, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, + 2, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, +} + +const ( + _Latn = 82 + _Hani = 50 + _Hans = 52 + _Hant = 53 + _Qaaa = 131 + _Qaai = 139 + _Qabx = 180 + _Zinh = 224 + _Zyyy = 229 + _Zzzz = 230 +) + +// script is an alphabetically sorted list of ISO 15924 codes. The index +// of the script in the string, divided by 4, is the internal scriptID. +var script tag.Index = "" + // Size: 928 bytes + "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + + "BrahBraiBugiBuhdCakmCansCariChamCherCirtCoptCprtCyrlCyrsDevaDsrtDuplEgyd" + + "EgyhEgypElbaEthiGeokGeorGlagGothGranGrekGujrGuruHanbHangHaniHanoHansHant" + + "HatrHebrHiraHluwHmngHrktHungIndsItalJamoJavaJpanJurcKaliKanaKharKhmrKhoj" + + "KitlKitsKndaKoreKpelKthiLanaLaooLatfLatgLatnLekeLepcLimbLinaLinbLisuLoma" + + "LyciLydiMahjMandManiMarcMayaMendMercMeroMlymModiMongMoonMrooMteiMultMymr" + + "NarbNbatNewaNkgbNkooNshuOgamOlckOrkhOryaOsgeOsmaPalmPaucPermPhagPhliPhlp" + + "PhlvPhnxPiqdPlrdPrtiQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaam" + + "QaanQaaoQaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabe" + + "QabfQabgQabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabw" + + "QabxRjngRoroRunrSamrSaraSarbSaurSgnwShawShrdSiddSindSinhSoraSundSyloSyrc" + + "SyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTeluTengTfngTglgThaaThaiTibtTirh" + + "UgarVaiiVispWaraWoleXpeoXsuxYiiiZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff" + + "\xff" + +// suppressScript is an index from langID to the dominant script for that language, +// if it exists. If a script is given, it should be suppressed from the language tag. +// Size: 713 bytes, 713 elements +var suppressScript = [713]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x27, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + // Entry 80 - BF + 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0xd4, 0x00, 0x00, 0xd6, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2d, 0x52, 0x52, 0x52, 0x00, 0x52, + 0x00, 0x52, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x52, 0x52, 0x00, 0x52, + 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry C0 - FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x2e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x37, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x52, + 0x00, 0x52, 0x52, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + // Entry 100 - 13F + 0x00, 0x00, 0x52, 0x52, 0x00, 0x37, 0x00, 0x41, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, + 0x00, 0x52, 0x00, 0x46, 0x00, 0x4a, 0x4b, 0x00, + 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 140 - 17F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x4f, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x1e, 0x64, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x20, 0x00, + 0x00, 0x00, 0x52, 0x52, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x52, 0x00, 0x52, 0x20, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x52, 0x00, 0x52, 0x00, 0x52, + // Entry 1C0 - 1FF + 0x00, 0x52, 0x00, 0x00, 0x00, 0x70, 0x52, 0x00, + 0x52, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x75, 0x00, 0x00, 0x00, 0x2f, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x52, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x1e, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xc1, 0x00, 0x52, 0x00, 0x52, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x52, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x52, + 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xd0, 0x52, 0x00, 0x00, 0x00, 0xd5, 0x00, 0x00, + 0x00, 0x27, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, + 0x52, 0x00, 0x52, 0x52, 0x52, 0x00, 0x52, 0x52, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x52, + 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x00, + 0x00, +} + +const ( + _001 = 1 + _419 = 30 + _BR = 64 + _CA = 72 + _ES = 109 + _GB = 121 + _MD = 186 + _PT = 236 + _UK = 304 + _US = 306 + _ZZ = 354 + _XA = 320 + _XC = 322 + _XK = 330 +) + +// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +// the UN.M49 codes used for groups.) +const isoRegionOffset = 31 + +// regionTypes defines the status of a region for various standards. +// Size: 355 bytes, 355 elements +var regionTypes = [355]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x04, 0x00, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, + 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 80 - BF + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x00, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, + // Entry C0 - FF + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, + 0x06, 0x06, 0x00, 0x06, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, + 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + // Entry 100 - 13F + 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x02, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, + // Entry 140 - 17F + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x04, 0x06, 0x06, 0x04, 0x06, 0x06, + 0x04, 0x06, 0x05, +} + +// regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +// Each 2-letter codes is followed by two bytes with the following meaning: +// - [A-Z}{2}: the first letter of the 2-letter code plus these two +// letters form the 3-letter ISO code. +// - 0, n: index into altRegionISO3. +var regionISO tag.Index = "" + // Size: 1300 bytes + "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + + "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + + "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" + + "CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADOOMDY" + + "HYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03FIINFJJIFKLKFMSMFOROFQ" + + "\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQNQGRRC" + + "GS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERLILSRIM" + + "MNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM\x00\x09" + + "KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSOLTTULUUX" + + "LVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNPMQTQMRRT" + + "MSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLDNOORNPPL" + + "NQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM\x00\x12" + + "PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSSQTTTQU" + + "\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLBSCYCSD" + + "DNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXMSYYRSZ" + + "WZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTTTOTVUV" + + "TWWNTZZAUAKRUGGAUK UMMIUSSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVNNMVUUTWFLF" + + "WKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXNNNXOOOXPPP" + + "XQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUGZAAFZMMBZR" + + "ARZWWEZZZZ\xff\xff\xff\xff" + +// altRegionISO3 holds a list of 3-letter region codes that cannot be +// mapped to 2-letter codes using the default algorithm. This is a short list. +var altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN" + +// altRegionIDs holds a list of regionIDs the positions of which match those +// of the 3-letter ISO codes in altRegionISO3. +// Size: 22 bytes, 11 elements +var altRegionIDs = [11]uint16{ + 0x0056, 0x006f, 0x0086, 0x00a6, 0x00a8, 0x00ab, 0x00e8, 0x0103, + 0x011f, 0x015c, 0x00da, +} + +// Size: 80 bytes, 20 elements +var regionOldMap = [20]fromTo{ + 0: {from: 0x43, to: 0xc2}, + 1: {from: 0x57, to: 0xa5}, + 2: {from: 0x5e, to: 0x5f}, + 3: {from: 0x65, to: 0x3a}, + 4: {from: 0x77, to: 0x76}, + 5: {from: 0x91, to: 0x36}, + 6: {from: 0xa1, to: 0x131}, + 7: {from: 0xbf, to: 0x131}, + 8: {from: 0xd5, to: 0x13c}, + 9: {from: 0xda, to: 0x2a}, + 10: {from: 0xed, to: 0x131}, + 11: {from: 0xf0, to: 0xe0}, + 12: {from: 0xfa, to: 0x6f}, + 13: {from: 0x101, to: 0x161}, + 14: {from: 0x128, to: 0x124}, + 15: {from: 0x130, to: 0x79}, + 16: {from: 0x137, to: 0x13b}, + 17: {from: 0x13e, to: 0x131}, + 18: {from: 0x15a, to: 0x15b}, + 19: {from: 0x160, to: 0x4a}, +} + +// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +// codes indicating collections of regions. +// Size: 710 bytes, 355 elements +var m49 = [355]int16{ + // Entry 0 - 3F + 0, 1, 2, 3, 5, 9, 11, 13, + 14, 15, 17, 18, 19, 21, 29, 30, + 34, 35, 39, 53, 54, 57, 61, 142, + 143, 145, 150, 151, 154, 155, 419, 958, + 0, 20, 784, 4, 28, 660, 8, 51, + 530, 24, 10, 32, 16, 40, 36, 533, + 248, 31, 70, 52, 50, 56, 854, 100, + 48, 108, 204, 652, 60, 96, 68, 535, + // Entry 40 - 7F + 76, 44, 64, 104, 74, 72, 112, 84, + 124, 166, 180, 140, 178, 756, 384, 184, + 152, 120, 156, 170, 0, 188, 891, 296, + 192, 132, 531, 162, 196, 203, 278, 276, + 0, 262, 208, 212, 214, 204, 12, 0, + 218, 233, 818, 732, 232, 724, 231, 967, + 246, 242, 238, 583, 234, 0, 250, 249, + 266, 826, 308, 268, 254, 831, 288, 292, + // Entry 80 - BF + 304, 270, 324, 312, 226, 300, 239, 320, + 316, 624, 328, 344, 334, 340, 191, 332, + 348, 854, 0, 360, 372, 376, 833, 356, + 86, 368, 364, 352, 380, 832, 388, 400, + 392, 581, 404, 417, 116, 296, 174, 659, + 408, 410, 414, 136, 398, 418, 422, 662, + 438, 144, 430, 426, 440, 442, 428, 434, + 504, 492, 498, 499, 663, 450, 584, 581, + // Entry C0 - FF + 807, 466, 104, 496, 446, 580, 474, 478, + 500, 470, 480, 462, 454, 484, 458, 508, + 516, 540, 562, 574, 566, 548, 558, 528, + 578, 524, 10, 520, 536, 570, 554, 512, + 591, 0, 604, 258, 598, 608, 586, 616, + 666, 612, 630, 275, 620, 581, 585, 600, + 591, 634, 959, 960, 961, 962, 963, 964, + 965, 966, 967, 968, 969, 970, 971, 972, + // Entry 100 - 13F + 638, 716, 642, 688, 643, 646, 682, 90, + 690, 729, 752, 702, 654, 705, 744, 703, + 694, 674, 686, 706, 740, 728, 678, 810, + 222, 534, 760, 748, 0, 796, 148, 260, + 768, 764, 762, 772, 626, 795, 788, 776, + 626, 792, 780, 798, 158, 834, 804, 800, + 826, 581, 840, 858, 860, 336, 670, 704, + 862, 92, 850, 704, 548, 876, 581, 882, + // Entry 140 - 17F + 973, 974, 975, 976, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 720, 887, 175, 891, 710, 894, + 180, 716, 999, +} + +// m49Index gives indexes into fromM49 based on the three most significant bits +// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +// The region code is stored in the 9 lsb of the indexed value. +// Size: 18 bytes, 9 elements +var m49Index = [9]int16{ + 0, 59, 107, 142, 180, 219, 258, 290, + 332, +} + +// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details. +// Size: 664 bytes, 332 elements +var fromM49 = [332]uint16{ + // Entry 0 - 3F + 0x0201, 0x0402, 0x0603, 0x0823, 0x0a04, 0x1026, 0x1205, 0x142a, + 0x1606, 0x1866, 0x1a07, 0x1c08, 0x1e09, 0x202c, 0x220a, 0x240b, + 0x260c, 0x2821, 0x2a0d, 0x3029, 0x3824, 0x3a0e, 0x3c0f, 0x3e31, + 0x402b, 0x4410, 0x4611, 0x482e, 0x4e12, 0x502d, 0x5841, 0x6038, + 0x6434, 0x6627, 0x6833, 0x6a13, 0x6c14, 0x7035, 0x7215, 0x783c, + 0x7a16, 0x8042, 0x883e, 0x8c32, 0x9045, 0x9444, 0x9840, 0xa847, + 0xac98, 0xb507, 0xb939, 0xc03d, 0xc837, 0xd0c2, 0xd839, 0xe046, + 0xe8a4, 0xf051, 0xf848, 0x0859, 0x10ab, 0x184b, 0x1c17, 0x1e18, + // Entry 40 - 7F + 0x20b1, 0x2219, 0x291e, 0x2c1a, 0x2e1b, 0x3050, 0x341c, 0x361d, + 0x3852, 0x3d2c, 0x445b, 0x4c49, 0x5453, 0x5ca6, 0x5f5c, 0x644c, + 0x684a, 0x704f, 0x7855, 0x7e8e, 0x8058, 0x885c, 0x965d, 0x983a, + 0xa062, 0xa863, 0xac64, 0xb468, 0xbd18, 0xc484, 0xcc6e, 0xce6e, + 0xd06c, 0xd269, 0xd474, 0xdc72, 0xde86, 0xe471, 0xec70, 0xf030, + 0xf277, 0xf476, 0xfc7c, 0x04e3, 0x091f, 0x0c61, 0x1478, 0x187b, + 0x1c81, 0x26eb, 0x285f, 0x2c5e, 0x305f, 0x407e, 0x487f, 0x50a5, + 0x5885, 0x6080, 0x687a, 0x7083, 0x7888, 0x8087, 0x8882, 0x908a, + // Entry 80 - BF + 0x988f, 0x9c8c, 0xa135, 0xa88d, 0xb08b, 0xb890, 0xc09b, 0xc897, + 0xd093, 0xd89a, 0xe099, 0xe894, 0xf095, 0xf89c, 0x004e, 0x089e, + 0x10a0, 0x1cac, 0x209f, 0x28a2, 0x30a8, 0x34a9, 0x3caa, 0x42a3, + 0x44ad, 0x461e, 0x4cae, 0x54b3, 0x58b6, 0x5cb2, 0x64b7, 0x6cb0, + 0x70b4, 0x74b5, 0x7cc4, 0x84bd, 0x8ccc, 0x94ce, 0x9ccb, 0xa4c1, + 0xacc9, 0xb4c6, 0xbcc7, 0xc0ca, 0xc8cd, 0xd8b9, 0xe0c3, 0xe4ba, + 0xe6bb, 0xe8c8, 0xf0b8, 0xf8cf, 0x00df, 0x08d0, 0x10db, 0x18d9, + 0x20d7, 0x2428, 0x265a, 0x2a2f, 0x2d19, 0x2e3f, 0x30dc, 0x38d1, + // Entry C0 - FF + 0x493c, 0x54de, 0x5cd6, 0x64d2, 0x6cd4, 0x74dd, 0x7cd3, 0x84d8, + 0x88c5, 0x8b31, 0x8e73, 0x90be, 0x92ee, 0x94e6, 0x9ee0, 0xace4, + 0xb0ef, 0xb8e2, 0xc0e5, 0xc8e9, 0xd0e7, 0xd8ec, 0xe089, 0xe524, + 0xecea, 0xf4f1, 0xfd00, 0x0502, 0x0704, 0x0d05, 0x183b, 0x1d0c, + 0x26a7, 0x2825, 0x2caf, 0x2ebc, 0x34e8, 0x3d36, 0x4511, 0x4d16, + 0x5506, 0x5d12, 0x6103, 0x6508, 0x6d10, 0x7d0b, 0x7f0f, 0x813b, + 0x830d, 0x8513, 0x8d5e, 0x9961, 0xa15a, 0xa86d, 0xb115, 0xb309, + 0xb86b, 0xc109, 0xc914, 0xd10e, 0xd91b, 0xe10a, 0xe84d, 0xf11a, + // Entry 100 - 13F + 0xf522, 0xf921, 0x0120, 0x0923, 0x1127, 0x192a, 0x2022, 0x2926, + 0x3129, 0x3725, 0x391d, 0x3d2b, 0x412f, 0x492e, 0x4ec0, 0x5517, + 0x646a, 0x7479, 0x7e7d, 0x809d, 0x8296, 0x852d, 0x9132, 0xa53a, + 0xac36, 0xb533, 0xb934, 0xbd38, 0xd93d, 0xe53f, 0xed5b, 0xef5b, + 0xf656, 0xfd5f, 0x7c1f, 0x7ef2, 0x80f3, 0x82f4, 0x84f5, 0x86f6, + 0x88f7, 0x8af8, 0x8cf9, 0x8e6f, 0x90fb, 0x92fc, 0x94fd, 0x96fe, + 0x98ff, 0x9b40, 0x9d41, 0x9f42, 0xa143, 0xa344, 0xa545, 0xa746, + 0xa947, 0xab48, 0xad49, 0xaf4a, 0xb14b, 0xb34c, 0xb54d, 0xb74e, + // Entry 140 - 17F + 0xb94f, 0xbb50, 0xbd51, 0xbf52, 0xc153, 0xc354, 0xc555, 0xc756, + 0xc957, 0xcb58, 0xcd59, 0xcf62, +} + +// Size: 1444 bytes +var variantIndex = map[string]uint8{ + "1606nict": 0x0, + "1694acad": 0x1, + "1901": 0x2, + "1959acad": 0x3, + "1994": 0x45, + "1996": 0x4, + "abl1943": 0x5, + "alalc97": 0x47, + "aluku": 0x6, + "ao1990": 0x7, + "arevela": 0x8, + "arevmda": 0x9, + "baku1926": 0xa, + "balanka": 0xb, + "barla": 0xc, + "basiceng": 0xd, + "bauddha": 0xe, + "biscayan": 0xf, + "biske": 0x40, + "bohoric": 0x10, + "boont": 0x11, + "colb1945": 0x12, + "cornu": 0x13, + "dajnko": 0x14, + "ekavsk": 0x15, + "emodeng": 0x16, + "fonipa": 0x48, + "fonupa": 0x49, + "fonxsamp": 0x4a, + "hepburn": 0x17, + "heploc": 0x46, + "hognorsk": 0x18, + "ijekavsk": 0x19, + "itihasa": 0x1a, + "jauer": 0x1b, + "jyutping": 0x1c, + "kkcor": 0x1d, + "kociewie": 0x1e, + "kscor": 0x1f, + "laukika": 0x20, + "lipaw": 0x41, + "luna1918": 0x21, + "metelko": 0x22, + "monoton": 0x23, + "ndyuka": 0x24, + "nedis": 0x25, + "newfound": 0x26, + "njiva": 0x42, + "nulik": 0x27, + "osojs": 0x43, + "oxendict": 0x28, + "pamaka": 0x29, + "petr1708": 0x2a, + "pinyin": 0x2b, + "polyton": 0x2c, + "puter": 0x2d, + "rigik": 0x2e, + "rozaj": 0x2f, + "rumgr": 0x30, + "scotland": 0x31, + "scouse": 0x32, + "simple": 0x4b, + "solba": 0x44, + "sotav": 0x33, + "surmiran": 0x34, + "sursilv": 0x35, + "sutsilv": 0x36, + "tarask": 0x37, + "uccor": 0x38, + "ucrcor": 0x39, + "ulster": 0x3a, + "unifon": 0x3b, + "vaidika": 0x3c, + "valencia": 0x3d, + "vallader": 0x3e, + "wadegile": 0x3f, +} + +// variantNumSpecialized is the number of specialized variants in variants. +const variantNumSpecialized = 71 + +// nRegionGroups is the number of region groups. +const nRegionGroups = 32 + +type likelyLangRegion struct { + lang uint16 + region uint16 +} + +// likelyScript is a lookup table, indexed by scriptID, for the most likely +// languages and regions given a script. +// Size: 928 bytes, 232 elements +var likelyScript = [232]likelyLangRegion{ + 1: {lang: 0xa6, region: 0x82}, + 3: {lang: 0x159, region: 0x104}, + 4: {lang: 0xc, region: 0x97}, + 5: {lang: 0x15, region: 0x6a}, + 7: {lang: 0x16, region: 0x9a}, + 8: {lang: 0xf3, region: 0x27}, + 9: {lang: 0x8, region: 0x9a}, + 10: {lang: 0x27, region: 0x93}, + 11: {lang: 0x2b, region: 0x51}, + 12: {lang: 0x55, region: 0xb2}, + 13: {lang: 0x2c, region: 0x93}, + 14: {lang: 0x4b, region: 0x34}, + 15: {lang: 0x20d, region: 0x97}, + 17: {lang: 0x2c4, region: 0x12c}, + 18: {lang: 0x1e5, region: 0x97}, + 19: {lang: 0xaf, region: 0x76}, + 20: {lang: 0x5b, region: 0x93}, + 21: {lang: 0x47, region: 0xe5}, + 22: {lang: 0x63, region: 0x34}, + 23: {lang: 0x73, region: 0x48}, + 24: {lang: 0x2a8, region: 0x129}, + 25: {lang: 0x6e, region: 0x13b}, + 26: {lang: 0x6c, region: 0x132}, + 28: {lang: 0x71, region: 0x6a}, + 29: {lang: 0xd0, region: 0x5c}, + 30: {lang: 0x207, region: 0x104}, + 32: {lang: 0xe1, region: 0x97}, + 34: {lang: 0xaf, region: 0x76}, + 37: {lang: 0x98, region: 0x6a}, + 38: {lang: 0x23a, region: 0x26}, + 39: {lang: 0x11, region: 0x6e}, + 41: {lang: 0x111, region: 0x7b}, + 42: {lang: 0x7d, region: 0x37}, + 43: {lang: 0xcf, region: 0x12e}, + 44: {lang: 0x20d, region: 0x97}, + 45: {lang: 0x9a, region: 0x85}, + 46: {lang: 0xd3, region: 0x97}, + 47: {lang: 0x1d7, region: 0x97}, + 48: {lang: 0x2c4, region: 0x12c}, + 49: {lang: 0x136, region: 0xa9}, + 50: {lang: 0x2c4, region: 0x52}, + 51: {lang: 0xe9, region: 0xe5}, + 52: {lang: 0x2c4, region: 0x52}, + 53: {lang: 0x2c4, region: 0x12c}, + 54: {lang: 0x18b, region: 0x99}, + 55: {lang: 0xe0, region: 0x95}, + 56: {lang: 0x107, region: 0xa0}, + 57: {lang: 0xe4, region: 0x129}, + 58: {lang: 0xe8, region: 0xad}, + 60: {lang: 0xf2, region: 0x90}, + 62: {lang: 0xa0, region: 0x9c}, + 63: {lang: 0x136, region: 0xa9}, + 64: {lang: 0x10f, region: 0x93}, + 65: {lang: 0x107, region: 0xa0}, + 67: {lang: 0x99, region: 0xc2}, + 68: {lang: 0x107, region: 0xa0}, + 69: {lang: 0x1eb, region: 0xe6}, + 70: {lang: 0x133, region: 0xa4}, + 71: {lang: 0x21a, region: 0x97}, + 74: {lang: 0x135, region: 0x97}, + 75: {lang: 0x136, region: 0xa9}, + 77: {lang: 0x40, region: 0x97}, + 78: {lang: 0x1c2, region: 0x121}, + 79: {lang: 0x165, region: 0xad}, + 84: {lang: 0x158, region: 0x97}, + 85: {lang: 0x15c, region: 0x97}, + 86: {lang: 0x14f, region: 0x85}, + 87: {lang: 0xd0, region: 0x85}, + 88: {lang: 0x15e, region: 0x52}, + 90: {lang: 0x2aa, region: 0x129}, + 91: {lang: 0x2ab, region: 0x129}, + 92: {lang: 0xe1, region: 0x97}, + 93: {lang: 0x1a8, region: 0x9a}, + 94: {lang: 0x2ad, region: 0x52}, + 95: {lang: 0x4c, region: 0x52}, + 97: {lang: 0x17f, region: 0x110}, + 98: {lang: 0x2ae, region: 0x109}, + 99: {lang: 0x2ae, region: 0x109}, + 100: {lang: 0x18d, region: 0x97}, + 101: {lang: 0x196, region: 0x97}, + 102: {lang: 0x18f, region: 0x52}, + 104: {lang: 0x199, region: 0x34}, + 105: {lang: 0x190, region: 0x97}, + 106: {lang: 0x22b, region: 0xe6}, + 107: {lang: 0x1a5, region: 0xc2}, + 108: {lang: 0x2af, region: 0x106}, + 109: {lang: 0x16, region: 0x9f}, + 110: {lang: 0x1b5, region: 0xd9}, + 112: {lang: 0x179, region: 0x82}, + 114: {lang: 0x223, region: 0x94}, + 115: {lang: 0x212, region: 0x97}, + 116: {lang: 0x1d6, region: 0xc3}, + 117: {lang: 0x1d3, region: 0x97}, + 118: {lang: 0x1d5, region: 0x132}, + 119: {lang: 0x238, region: 0x113}, + 120: {lang: 0x16, region: 0x11a}, + 121: {lang: 0x7c, region: 0xc2}, + 122: {lang: 0x147, region: 0x104}, + 123: {lang: 0x172, region: 0x52}, + 124: {lang: 0x1d9, region: 0x9a}, + 125: {lang: 0x1d9, region: 0x52}, + 127: {lang: 0x1e3, region: 0xae}, + 129: {lang: 0xe5, region: 0x52}, + 130: {lang: 0x2b2, region: 0x9a}, + 181: {lang: 0x1f6, region: 0x93}, + 183: {lang: 0x1c4, region: 0x10a}, + 184: {lang: 0x234, region: 0x95}, + 186: {lang: 0x2b3, region: 0x15b}, + 187: {lang: 0x213, region: 0x97}, + 188: {lang: 0x1e, region: 0x132}, + 189: {lang: 0x9b, region: 0x79}, + 190: {lang: 0x20d, region: 0x97}, + 191: {lang: 0x20d, region: 0x97}, + 192: {lang: 0x21a, region: 0x97}, + 193: {lang: 0x228, region: 0xb1}, + 194: {lang: 0x23c, region: 0x97}, + 195: {lang: 0x244, region: 0x93}, + 196: {lang: 0x24e, region: 0x34}, + 197: {lang: 0x24f, region: 0x99}, + 201: {lang: 0x253, region: 0xe5}, + 202: {lang: 0x8a, region: 0x97}, + 203: {lang: 0x255, region: 0x52}, + 204: {lang: 0x126, region: 0x52}, + 205: {lang: 0x251, region: 0x97}, + 206: {lang: 0x27f, region: 0x52}, + 207: {lang: 0x48, region: 0x13b}, + 208: {lang: 0x258, region: 0x97}, + 210: {lang: 0x2c3, region: 0xb8}, + 211: {lang: 0xaa, region: 0xe5}, + 212: {lang: 0x90, region: 0xcb}, + 213: {lang: 0x25d, region: 0x121}, + 214: {lang: 0x4c, region: 0x52}, + 215: {lang: 0x177, region: 0x97}, + 216: {lang: 0x285, region: 0x11a}, + 217: {lang: 0x28e, region: 0xb2}, + 219: {lang: 0xec, region: 0x97}, + 221: {lang: 0x1e1, region: 0x9a}, + 222: {lang: 0xe, region: 0x99}, + 223: {lang: 0xfb, region: 0x52}, +} + +type likelyScriptRegion struct { + region uint16 + script uint8 + flags uint8 +} + +// likelyLang is a lookup table, indexed by langID, for the most likely +// scripts and regions given incomplete information. If more entries exist for a +// given language, region and script are the index and size respectively +// of the list in likelyLangList. +// Size: 2852 bytes, 713 elements +var likelyLang = [713]likelyScriptRegion{ + 0: {region: 0x132, script: 0x52, flags: 0x0}, + 1: {region: 0x6e, script: 0x52, flags: 0x0}, + 2: {region: 0x7b, script: 0x1e, flags: 0x0}, + 3: {region: 0x7e, script: 0x52, flags: 0x0}, + 4: {region: 0x93, script: 0x52, flags: 0x0}, + 5: {region: 0x12f, script: 0x52, flags: 0x0}, + 6: {region: 0x7e, script: 0x52, flags: 0x0}, + 7: {region: 0x104, script: 0x1e, flags: 0x0}, + 8: {region: 0x9a, script: 0x9, flags: 0x0}, + 9: {region: 0x126, script: 0x5, flags: 0x0}, + 10: {region: 0x15e, script: 0x52, flags: 0x0}, + 11: {region: 0x51, script: 0x52, flags: 0x0}, + 12: {region: 0x97, script: 0x4, flags: 0x0}, + 13: {region: 0x7e, script: 0x52, flags: 0x0}, + 14: {region: 0x99, script: 0xde, flags: 0x0}, + 15: {region: 0x14a, script: 0x52, flags: 0x0}, + 16: {region: 0x104, script: 0x1e, flags: 0x0}, + 17: {region: 0x6e, script: 0x27, flags: 0x0}, + 18: {region: 0xd4, script: 0x52, flags: 0x0}, + 20: {region: 0x93, script: 0x52, flags: 0x0}, + 21: {region: 0x6a, script: 0x5, flags: 0x0}, + 22: {region: 0x0, script: 0x3, flags: 0x1}, + 23: {region: 0x50, script: 0x52, flags: 0x0}, + 24: {region: 0x3e, script: 0x52, flags: 0x0}, + 25: {region: 0x66, script: 0x5, flags: 0x0}, + 26: {region: 0xb8, script: 0x5, flags: 0x0}, + 27: {region: 0x6a, script: 0x5, flags: 0x0}, + 28: {region: 0x97, script: 0xe, flags: 0x0}, + 29: {region: 0x12d, script: 0x52, flags: 0x0}, + 30: {region: 0x132, script: 0xbc, flags: 0x0}, + 31: {region: 0x6d, script: 0x52, flags: 0x0}, + 32: {region: 0x48, script: 0x52, flags: 0x0}, + 33: {region: 0x104, script: 0x1e, flags: 0x0}, + 34: {region: 0x97, script: 0x20, flags: 0x0}, + 35: {region: 0x3e, script: 0x52, flags: 0x0}, + 36: {region: 0x3, script: 0x5, flags: 0x1}, + 37: {region: 0x104, script: 0x1e, flags: 0x0}, + 38: {region: 0xe6, script: 0x5, flags: 0x0}, + 39: {region: 0x93, script: 0x52, flags: 0x0}, + 40: {region: 0xd9, script: 0x20, flags: 0x0}, + 41: {region: 0x2d, script: 0x52, flags: 0x0}, + 42: {region: 0x51, script: 0x52, flags: 0x0}, + 43: {region: 0x51, script: 0xb, flags: 0x0}, + 44: {region: 0x93, script: 0x52, flags: 0x0}, + 45: {region: 0x51, script: 0x52, flags: 0x0}, + 46: {region: 0x4e, script: 0x52, flags: 0x0}, + 47: {region: 0x46, script: 0x1e, flags: 0x0}, + 48: {region: 0x109, script: 0x5, flags: 0x0}, + 49: {region: 0x15f, script: 0x52, flags: 0x0}, + 50: {region: 0x93, script: 0x52, flags: 0x0}, + 51: {region: 0x12d, script: 0x52, flags: 0x0}, + 52: {region: 0x51, script: 0x52, flags: 0x0}, + 53: {region: 0x97, script: 0xcd, flags: 0x0}, + 54: {region: 0xe6, script: 0x5, flags: 0x0}, + 55: {region: 0x97, script: 0x20, flags: 0x0}, + 56: {region: 0x37, script: 0x1e, flags: 0x0}, + 57: {region: 0x97, script: 0x20, flags: 0x0}, + 58: {region: 0xe6, script: 0x5, flags: 0x0}, + 59: {region: 0x129, script: 0x2d, flags: 0x0}, + 61: {region: 0x97, script: 0x20, flags: 0x0}, + 62: {region: 0x97, script: 0x20, flags: 0x0}, + 63: {region: 0xe5, script: 0x52, flags: 0x0}, + 64: {region: 0x97, script: 0x20, flags: 0x0}, + 65: {region: 0x13c, script: 0x52, flags: 0x0}, + 66: {region: 0xe5, script: 0x52, flags: 0x0}, + 67: {region: 0xd4, script: 0x52, flags: 0x0}, + 68: {region: 0x97, script: 0x20, flags: 0x0}, + 69: {region: 0x93, script: 0x52, flags: 0x0}, + 70: {region: 0x51, script: 0x52, flags: 0x0}, + 71: {region: 0xe5, script: 0x52, flags: 0x0}, + 72: {region: 0x13b, script: 0xcf, flags: 0x0}, + 73: {region: 0xc1, script: 0x52, flags: 0x0}, + 74: {region: 0xc1, script: 0x52, flags: 0x0}, + 75: {region: 0x34, script: 0xe, flags: 0x0}, + 76: {region: 0x52, script: 0xd6, flags: 0x0}, + 77: {region: 0x97, script: 0xe, flags: 0x0}, + 78: {region: 0x9a, script: 0x5, flags: 0x0}, + 79: {region: 0x4e, script: 0x52, flags: 0x0}, + 80: {region: 0x76, script: 0x52, flags: 0x0}, + 81: {region: 0x97, script: 0x20, flags: 0x0}, + 82: {region: 0xe6, script: 0x5, flags: 0x0}, + 83: {region: 0x97, script: 0x20, flags: 0x0}, + 84: {region: 0x32, script: 0x52, flags: 0x0}, + 85: {region: 0xb2, script: 0xc, flags: 0x0}, + 86: {region: 0x51, script: 0x52, flags: 0x0}, + 87: {region: 0xe5, script: 0x52, flags: 0x0}, + 88: {region: 0xe6, script: 0x20, flags: 0x0}, + 89: {region: 0x104, script: 0x1e, flags: 0x0}, + 90: {region: 0x15c, script: 0x52, flags: 0x0}, + 91: {region: 0x93, script: 0x52, flags: 0x0}, + 92: {region: 0x51, script: 0x52, flags: 0x0}, + 93: {region: 0x84, script: 0x52, flags: 0x0}, + 94: {region: 0x6c, script: 0x27, flags: 0x0}, + 95: {region: 0x51, script: 0x52, flags: 0x0}, + 96: {region: 0xc1, script: 0x52, flags: 0x0}, + 97: {region: 0x6d, script: 0x52, flags: 0x0}, + 98: {region: 0xd4, script: 0x52, flags: 0x0}, + 99: {region: 0x8, script: 0x2, flags: 0x1}, + 100: {region: 0x104, script: 0x1e, flags: 0x0}, + 101: {region: 0xe5, script: 0x52, flags: 0x0}, + 102: {region: 0x12f, script: 0x52, flags: 0x0}, + 103: {region: 0x88, script: 0x52, flags: 0x0}, + 104: {region: 0x73, script: 0x52, flags: 0x0}, + 105: {region: 0x104, script: 0x1e, flags: 0x0}, + 106: {region: 0x132, script: 0x52, flags: 0x0}, + 107: {region: 0x48, script: 0x52, flags: 0x0}, + 108: {region: 0x132, script: 0x1a, flags: 0x0}, + 109: {region: 0xa4, script: 0x5, flags: 0x0}, + 110: {region: 0x13b, script: 0x19, flags: 0x0}, + 111: {region: 0x99, script: 0x5, flags: 0x0}, + 112: {region: 0x76, script: 0x52, flags: 0x0}, + 113: {region: 0x6a, script: 0x1c, flags: 0x0}, + 114: {region: 0xe5, script: 0x52, flags: 0x0}, + 115: {region: 0x48, script: 0x17, flags: 0x0}, + 116: {region: 0x48, script: 0x17, flags: 0x0}, + 117: {region: 0x48, script: 0x17, flags: 0x0}, + 118: {region: 0x48, script: 0x17, flags: 0x0}, + 119: {region: 0x48, script: 0x17, flags: 0x0}, + 120: {region: 0x108, script: 0x52, flags: 0x0}, + 121: {region: 0x5d, script: 0x52, flags: 0x0}, + 122: {region: 0xe7, script: 0x52, flags: 0x0}, + 123: {region: 0x48, script: 0x17, flags: 0x0}, + 124: {region: 0xc2, script: 0x79, flags: 0x0}, + 125: {region: 0xa, script: 0x2, flags: 0x1}, + 126: {region: 0x104, script: 0x1e, flags: 0x0}, + 127: {region: 0x79, script: 0x52, flags: 0x0}, + 128: {region: 0x62, script: 0x52, flags: 0x0}, + 129: {region: 0x132, script: 0x52, flags: 0x0}, + 130: {region: 0x104, script: 0x1e, flags: 0x0}, + 131: {region: 0xa2, script: 0x52, flags: 0x0}, + 132: {region: 0x97, script: 0x5, flags: 0x0}, + 133: {region: 0x5f, script: 0x52, flags: 0x0}, + 134: {region: 0x48, script: 0x52, flags: 0x0}, + 135: {region: 0x48, script: 0x52, flags: 0x0}, + 136: {region: 0xd2, script: 0x52, flags: 0x0}, + 137: {region: 0x4e, script: 0x52, flags: 0x0}, + 138: {region: 0x97, script: 0x5, flags: 0x0}, + 139: {region: 0x5f, script: 0x52, flags: 0x0}, + 140: {region: 0xc1, script: 0x52, flags: 0x0}, + 141: {region: 0xce, script: 0x52, flags: 0x0}, + 142: {region: 0xd9, script: 0x20, flags: 0x0}, + 143: {region: 0x51, script: 0x52, flags: 0x0}, + 144: {region: 0xcb, script: 0xd4, flags: 0x0}, + 145: {region: 0x112, script: 0x52, flags: 0x0}, + 146: {region: 0x36, script: 0x52, flags: 0x0}, + 147: {region: 0x42, script: 0xd6, flags: 0x0}, + 148: {region: 0xa2, script: 0x52, flags: 0x0}, + 149: {region: 0x7e, script: 0x52, flags: 0x0}, + 150: {region: 0xd4, script: 0x52, flags: 0x0}, + 151: {region: 0x9c, script: 0x52, flags: 0x0}, + 152: {region: 0x6a, script: 0x25, flags: 0x0}, + 153: {region: 0xc2, script: 0x43, flags: 0x0}, + 154: {region: 0x85, script: 0x2d, flags: 0x0}, + 155: {region: 0xc, script: 0x2, flags: 0x1}, + 156: {region: 0x1, script: 0x52, flags: 0x0}, + 157: {region: 0x6d, script: 0x52, flags: 0x0}, + 158: {region: 0x132, script: 0x52, flags: 0x0}, + 159: {region: 0x69, script: 0x52, flags: 0x0}, + 160: {region: 0x9c, script: 0x3e, flags: 0x0}, + 161: {region: 0x6d, script: 0x52, flags: 0x0}, + 162: {region: 0x51, script: 0x52, flags: 0x0}, + 163: {region: 0x6d, script: 0x52, flags: 0x0}, + 164: {region: 0x9a, script: 0x5, flags: 0x0}, + 165: {region: 0x84, script: 0x52, flags: 0x0}, + 166: {region: 0xe, script: 0x2, flags: 0x1}, + 167: {region: 0xc1, script: 0x52, flags: 0x0}, + 168: {region: 0x70, script: 0x52, flags: 0x0}, + 169: {region: 0x109, script: 0x5, flags: 0x0}, + 170: {region: 0xe5, script: 0x52, flags: 0x0}, + 171: {region: 0x10a, script: 0x52, flags: 0x0}, + 172: {region: 0x71, script: 0x52, flags: 0x0}, + 173: {region: 0x74, script: 0x52, flags: 0x0}, + 174: {region: 0x3a, script: 0x52, flags: 0x0}, + 175: {region: 0x76, script: 0x52, flags: 0x0}, + 176: {region: 0x132, script: 0x52, flags: 0x0}, + 177: {region: 0x76, script: 0x52, flags: 0x0}, + 178: {region: 0x5f, script: 0x52, flags: 0x0}, + 179: {region: 0x5f, script: 0x52, flags: 0x0}, + 180: {region: 0x13d, script: 0x52, flags: 0x0}, + 181: {region: 0xd2, script: 0x52, flags: 0x0}, + 182: {region: 0x9c, script: 0x52, flags: 0x0}, + 183: {region: 0xd4, script: 0x52, flags: 0x0}, + 184: {region: 0x109, script: 0x52, flags: 0x0}, + 185: {region: 0xd7, script: 0x52, flags: 0x0}, + 186: {region: 0x94, script: 0x52, flags: 0x0}, + 187: {region: 0x7e, script: 0x52, flags: 0x0}, + 188: {region: 0xba, script: 0x52, flags: 0x0}, + 189: {region: 0x52, script: 0x34, flags: 0x0}, + 190: {region: 0x93, script: 0x52, flags: 0x0}, + 191: {region: 0x97, script: 0x20, flags: 0x0}, + 192: {region: 0x9a, script: 0x5, flags: 0x0}, + 193: {region: 0x7c, script: 0x52, flags: 0x0}, + 194: {region: 0x79, script: 0x52, flags: 0x0}, + 195: {region: 0x6e, script: 0x27, flags: 0x0}, + 196: {region: 0xd9, script: 0x20, flags: 0x0}, + 197: {region: 0xa5, script: 0x52, flags: 0x0}, + 198: {region: 0xe6, script: 0x5, flags: 0x0}, + 199: {region: 0xe6, script: 0x5, flags: 0x0}, + 200: {region: 0x6d, script: 0x52, flags: 0x0}, + 201: {region: 0x9a, script: 0x5, flags: 0x0}, + 202: {region: 0xef, script: 0x52, flags: 0x0}, + 203: {region: 0x97, script: 0x20, flags: 0x0}, + 204: {region: 0x97, script: 0xd0, flags: 0x0}, + 205: {region: 0x93, script: 0x52, flags: 0x0}, + 206: {region: 0xd7, script: 0x52, flags: 0x0}, + 207: {region: 0x12e, script: 0x2b, flags: 0x0}, + 208: {region: 0x10, script: 0x2, flags: 0x1}, + 209: {region: 0x97, script: 0xe, flags: 0x0}, + 210: {region: 0x4d, script: 0x52, flags: 0x0}, + 211: {region: 0x97, script: 0x2e, flags: 0x0}, + 212: {region: 0x40, script: 0x52, flags: 0x0}, + 213: {region: 0x53, script: 0x52, flags: 0x0}, + 214: {region: 0x7e, script: 0x52, flags: 0x0}, + 216: {region: 0xa2, script: 0x52, flags: 0x0}, + 217: {region: 0x96, script: 0x52, flags: 0x0}, + 218: {region: 0xd9, script: 0x20, flags: 0x0}, + 219: {region: 0x48, script: 0x52, flags: 0x0}, + 220: {region: 0x12, script: 0x3, flags: 0x1}, + 221: {region: 0x52, script: 0x34, flags: 0x0}, + 222: {region: 0x132, script: 0x52, flags: 0x0}, + 223: {region: 0x23, script: 0x5, flags: 0x0}, + 224: {region: 0x95, script: 0x37, flags: 0x0}, + 225: {region: 0x97, script: 0x20, flags: 0x0}, + 226: {region: 0x71, script: 0x52, flags: 0x0}, + 227: {region: 0xe5, script: 0x52, flags: 0x0}, + 228: {region: 0x129, script: 0x39, flags: 0x0}, + 229: {region: 0x52, script: 0x81, flags: 0x0}, + 230: {region: 0xe6, script: 0x5, flags: 0x0}, + 231: {region: 0x97, script: 0x20, flags: 0x0}, + 232: {region: 0xad, script: 0x3a, flags: 0x0}, + 233: {region: 0xe5, script: 0x52, flags: 0x0}, + 234: {region: 0xe6, script: 0x5, flags: 0x0}, + 235: {region: 0xe4, script: 0x52, flags: 0x0}, + 236: {region: 0x97, script: 0x20, flags: 0x0}, + 237: {region: 0x97, script: 0x20, flags: 0x0}, + 238: {region: 0x8e, script: 0x52, flags: 0x0}, + 239: {region: 0x5f, script: 0x52, flags: 0x0}, + 240: {region: 0x52, script: 0x34, flags: 0x0}, + 241: {region: 0x8f, script: 0x52, flags: 0x0}, + 242: {region: 0x90, script: 0x52, flags: 0x0}, + 243: {region: 0x27, script: 0x8, flags: 0x0}, + 244: {region: 0xd0, script: 0x52, flags: 0x0}, + 245: {region: 0x76, script: 0x52, flags: 0x0}, + 246: {region: 0xce, script: 0x52, flags: 0x0}, + 247: {region: 0xd4, script: 0x52, flags: 0x0}, + 248: {region: 0x93, script: 0x52, flags: 0x0}, + 250: {region: 0xd4, script: 0x52, flags: 0x0}, + 251: {region: 0x52, script: 0xdf, flags: 0x0}, + 252: {region: 0x132, script: 0x52, flags: 0x0}, + 253: {region: 0x48, script: 0x52, flags: 0x0}, + 254: {region: 0xe5, script: 0x52, flags: 0x0}, + 255: {region: 0x93, script: 0x52, flags: 0x0}, + 256: {region: 0x104, script: 0x1e, flags: 0x0}, + 258: {region: 0x9b, script: 0x52, flags: 0x0}, + 259: {region: 0x9c, script: 0x52, flags: 0x0}, + 260: {region: 0x48, script: 0x17, flags: 0x0}, + 261: {region: 0x95, script: 0x37, flags: 0x0}, + 262: {region: 0x104, script: 0x52, flags: 0x0}, + 263: {region: 0xa0, script: 0x41, flags: 0x0}, + 264: {region: 0x9e, script: 0x52, flags: 0x0}, + 266: {region: 0x51, script: 0x52, flags: 0x0}, + 267: {region: 0x12e, script: 0x37, flags: 0x0}, + 268: {region: 0x12d, script: 0x52, flags: 0x0}, + 269: {region: 0xd9, script: 0x20, flags: 0x0}, + 270: {region: 0x62, script: 0x52, flags: 0x0}, + 271: {region: 0x93, script: 0x52, flags: 0x0}, + 272: {region: 0x93, script: 0x52, flags: 0x0}, + 273: {region: 0x7b, script: 0x29, flags: 0x0}, + 274: {region: 0x134, script: 0x1e, flags: 0x0}, + 275: {region: 0x66, script: 0x52, flags: 0x0}, + 276: {region: 0xc2, script: 0x52, flags: 0x0}, + 277: {region: 0xd4, script: 0x52, flags: 0x0}, + 278: {region: 0xa2, script: 0x52, flags: 0x0}, + 279: {region: 0xc1, script: 0x52, flags: 0x0}, + 280: {region: 0x104, script: 0x1e, flags: 0x0}, + 281: {region: 0xd4, script: 0x52, flags: 0x0}, + 282: {region: 0x161, script: 0x52, flags: 0x0}, + 283: {region: 0x12d, script: 0x52, flags: 0x0}, + 284: {region: 0x121, script: 0xd5, flags: 0x0}, + 285: {region: 0x59, script: 0x52, flags: 0x0}, + 286: {region: 0x51, script: 0x52, flags: 0x0}, + 287: {region: 0x4e, script: 0x52, flags: 0x0}, + 288: {region: 0x97, script: 0x20, flags: 0x0}, + 289: {region: 0x97, script: 0x20, flags: 0x0}, + 290: {region: 0x4a, script: 0x52, flags: 0x0}, + 291: {region: 0x93, script: 0x52, flags: 0x0}, + 292: {region: 0x40, script: 0x52, flags: 0x0}, + 293: {region: 0x97, script: 0x52, flags: 0x0}, + 294: {region: 0x52, script: 0xcc, flags: 0x0}, + 295: {region: 0x97, script: 0x20, flags: 0x0}, + 296: {region: 0xc1, script: 0x52, flags: 0x0}, + 297: {region: 0x97, script: 0x6b, flags: 0x0}, + 298: {region: 0xe6, script: 0x5, flags: 0x0}, + 299: {region: 0xa2, script: 0x52, flags: 0x0}, + 300: {region: 0x129, script: 0x52, flags: 0x0}, + 301: {region: 0xd0, script: 0x52, flags: 0x0}, + 302: {region: 0xad, script: 0x4f, flags: 0x0}, + 303: {region: 0x15, script: 0x6, flags: 0x1}, + 304: {region: 0x51, script: 0x52, flags: 0x0}, + 305: {region: 0x80, script: 0x52, flags: 0x0}, + 306: {region: 0xa2, script: 0x52, flags: 0x0}, + 307: {region: 0xa4, script: 0x46, flags: 0x0}, + 308: {region: 0x29, script: 0x52, flags: 0x0}, + 309: {region: 0x97, script: 0x4a, flags: 0x0}, + 310: {region: 0xa9, script: 0x4b, flags: 0x0}, + 311: {region: 0x104, script: 0x1e, flags: 0x0}, + 312: {region: 0x97, script: 0x20, flags: 0x0}, + 313: {region: 0x73, script: 0x52, flags: 0x0}, + 314: {region: 0xb2, script: 0x52, flags: 0x0}, + 316: {region: 0x104, script: 0x1e, flags: 0x0}, + 317: {region: 0x110, script: 0x52, flags: 0x0}, + 318: {region: 0xe5, script: 0x52, flags: 0x0}, + 319: {region: 0x104, script: 0x52, flags: 0x0}, + 320: {region: 0x97, script: 0x20, flags: 0x0}, + 321: {region: 0x97, script: 0x5, flags: 0x0}, + 322: {region: 0x12d, script: 0x52, flags: 0x0}, + 323: {region: 0x51, script: 0x52, flags: 0x0}, + 324: {region: 0x5f, script: 0x52, flags: 0x0}, + 325: {region: 0x1b, script: 0x3, flags: 0x1}, + 326: {region: 0x104, script: 0x1e, flags: 0x0}, + 327: {region: 0x104, script: 0x1e, flags: 0x0}, + 328: {region: 0x93, script: 0x52, flags: 0x0}, + 329: {region: 0xe6, script: 0x5, flags: 0x0}, + 330: {region: 0x79, script: 0x52, flags: 0x0}, + 331: {region: 0x121, script: 0xd5, flags: 0x0}, + 332: {region: 0xe6, script: 0x5, flags: 0x0}, + 333: {region: 0x1e, script: 0x5, flags: 0x1}, + 334: {region: 0x135, script: 0x52, flags: 0x0}, + 335: {region: 0x85, script: 0x56, flags: 0x0}, + 336: {region: 0x95, script: 0x37, flags: 0x0}, + 337: {region: 0x12d, script: 0x52, flags: 0x0}, + 338: {region: 0xe6, script: 0x5, flags: 0x0}, + 339: {region: 0x12f, script: 0x52, flags: 0x0}, + 340: {region: 0xb5, script: 0x52, flags: 0x0}, + 341: {region: 0x104, script: 0x1e, flags: 0x0}, + 342: {region: 0x93, script: 0x52, flags: 0x0}, + 343: {region: 0x52, script: 0xd5, flags: 0x0}, + 344: {region: 0x97, script: 0x54, flags: 0x0}, + 345: {region: 0x104, script: 0x1e, flags: 0x0}, + 346: {region: 0x12f, script: 0x52, flags: 0x0}, + 347: {region: 0xd7, script: 0x52, flags: 0x0}, + 348: {region: 0x23, script: 0x2, flags: 0x1}, + 349: {region: 0x9c, script: 0x52, flags: 0x0}, + 350: {region: 0x52, script: 0x58, flags: 0x0}, + 351: {region: 0x93, script: 0x52, flags: 0x0}, + 352: {region: 0x9a, script: 0x5, flags: 0x0}, + 353: {region: 0x132, script: 0x52, flags: 0x0}, + 354: {region: 0x97, script: 0xd0, flags: 0x0}, + 355: {region: 0x9c, script: 0x52, flags: 0x0}, + 356: {region: 0x4a, script: 0x52, flags: 0x0}, + 357: {region: 0xad, script: 0x4f, flags: 0x0}, + 358: {region: 0x4a, script: 0x52, flags: 0x0}, + 359: {region: 0x15f, script: 0x52, flags: 0x0}, + 360: {region: 0x9a, script: 0x5, flags: 0x0}, + 361: {region: 0xb4, script: 0x52, flags: 0x0}, + 362: {region: 0xb6, script: 0x52, flags: 0x0}, + 363: {region: 0x4a, script: 0x52, flags: 0x0}, + 364: {region: 0x4a, script: 0x52, flags: 0x0}, + 365: {region: 0xa2, script: 0x52, flags: 0x0}, + 366: {region: 0xa2, script: 0x52, flags: 0x0}, + 367: {region: 0x9a, script: 0x5, flags: 0x0}, + 368: {region: 0xb6, script: 0x52, flags: 0x0}, + 369: {region: 0x121, script: 0xd5, flags: 0x0}, + 370: {region: 0x52, script: 0x34, flags: 0x0}, + 371: {region: 0x129, script: 0x52, flags: 0x0}, + 372: {region: 0x93, script: 0x52, flags: 0x0}, + 373: {region: 0x51, script: 0x52, flags: 0x0}, + 374: {region: 0x97, script: 0x20, flags: 0x0}, + 375: {region: 0x97, script: 0x20, flags: 0x0}, + 376: {region: 0x93, script: 0x52, flags: 0x0}, + 377: {region: 0x25, script: 0x3, flags: 0x1}, + 378: {region: 0xa2, script: 0x52, flags: 0x0}, + 379: {region: 0xcd, script: 0x52, flags: 0x0}, + 380: {region: 0x104, script: 0x1e, flags: 0x0}, + 381: {region: 0xe5, script: 0x52, flags: 0x0}, + 382: {region: 0x93, script: 0x52, flags: 0x0}, + 383: {region: 0x110, script: 0x52, flags: 0x0}, + 384: {region: 0xa2, script: 0x52, flags: 0x0}, + 385: {region: 0x121, script: 0x5, flags: 0x0}, + 386: {region: 0xca, script: 0x52, flags: 0x0}, + 387: {region: 0xbd, script: 0x52, flags: 0x0}, + 388: {region: 0xcf, script: 0x52, flags: 0x0}, + 389: {region: 0x51, script: 0x52, flags: 0x0}, + 390: {region: 0xd9, script: 0x20, flags: 0x0}, + 391: {region: 0x12d, script: 0x52, flags: 0x0}, + 392: {region: 0xbe, script: 0x52, flags: 0x0}, + 393: {region: 0xde, script: 0x52, flags: 0x0}, + 394: {region: 0x93, script: 0x52, flags: 0x0}, + 395: {region: 0x99, script: 0x36, flags: 0x0}, + 396: {region: 0xc0, script: 0x1e, flags: 0x0}, + 397: {region: 0x97, script: 0x64, flags: 0x0}, + 398: {region: 0x109, script: 0x52, flags: 0x0}, + 399: {region: 0x28, script: 0x3, flags: 0x1}, + 400: {region: 0x97, script: 0xe, flags: 0x0}, + 401: {region: 0xc2, script: 0x6b, flags: 0x0}, + 403: {region: 0x48, script: 0x52, flags: 0x0}, + 404: {region: 0x48, script: 0x52, flags: 0x0}, + 405: {region: 0x36, script: 0x52, flags: 0x0}, + 406: {region: 0x97, script: 0x20, flags: 0x0}, + 407: {region: 0xd9, script: 0x20, flags: 0x0}, + 408: {region: 0x104, script: 0x1e, flags: 0x0}, + 409: {region: 0x34, script: 0x68, flags: 0x0}, + 410: {region: 0x2b, script: 0x3, flags: 0x1}, + 411: {region: 0xc9, script: 0x52, flags: 0x0}, + 412: {region: 0x97, script: 0x20, flags: 0x0}, + 413: {region: 0x51, script: 0x52, flags: 0x0}, + 415: {region: 0x132, script: 0x52, flags: 0x0}, + 416: {region: 0xe6, script: 0x5, flags: 0x0}, + 417: {region: 0xc1, script: 0x52, flags: 0x0}, + 418: {region: 0x97, script: 0x20, flags: 0x0}, + 419: {region: 0x93, script: 0x52, flags: 0x0}, + 420: {region: 0x161, script: 0x52, flags: 0x0}, + 421: {region: 0xc2, script: 0x6b, flags: 0x0}, + 422: {region: 0x104, script: 0x1e, flags: 0x0}, + 423: {region: 0x12f, script: 0x52, flags: 0x0}, + 424: {region: 0x9a, script: 0x5d, flags: 0x0}, + 425: {region: 0x9a, script: 0x5, flags: 0x0}, + 426: {region: 0xdb, script: 0x52, flags: 0x0}, + 428: {region: 0x52, script: 0x34, flags: 0x0}, + 429: {region: 0x9c, script: 0x52, flags: 0x0}, + 430: {region: 0xd0, script: 0x52, flags: 0x0}, + 431: {region: 0xd8, script: 0x52, flags: 0x0}, + 432: {region: 0xcd, script: 0x52, flags: 0x0}, + 433: {region: 0x161, script: 0x52, flags: 0x0}, + 434: {region: 0xcf, script: 0x52, flags: 0x0}, + 435: {region: 0x5f, script: 0x52, flags: 0x0}, + 436: {region: 0xd9, script: 0x20, flags: 0x0}, + 437: {region: 0xd9, script: 0x20, flags: 0x0}, + 438: {region: 0xd0, script: 0x52, flags: 0x0}, + 439: {region: 0xcf, script: 0x52, flags: 0x0}, + 440: {region: 0xcd, script: 0x52, flags: 0x0}, + 441: {region: 0xcd, script: 0x52, flags: 0x0}, + 442: {region: 0x93, script: 0x52, flags: 0x0}, + 443: {region: 0xdd, script: 0x52, flags: 0x0}, + 444: {region: 0x97, script: 0x52, flags: 0x0}, + 445: {region: 0xd7, script: 0x52, flags: 0x0}, + 446: {region: 0x51, script: 0x52, flags: 0x0}, + 447: {region: 0xd8, script: 0x52, flags: 0x0}, + 448: {region: 0x51, script: 0x52, flags: 0x0}, + 449: {region: 0xd8, script: 0x52, flags: 0x0}, + 450: {region: 0x121, script: 0x4e, flags: 0x0}, + 451: {region: 0x97, script: 0x20, flags: 0x0}, + 452: {region: 0x10a, script: 0xb7, flags: 0x0}, + 453: {region: 0x82, script: 0x70, flags: 0x0}, + 454: {region: 0x15e, script: 0x52, flags: 0x0}, + 455: {region: 0x48, script: 0x17, flags: 0x0}, + 456: {region: 0x15e, script: 0x52, flags: 0x0}, + 457: {region: 0x115, script: 0x52, flags: 0x0}, + 458: {region: 0x132, script: 0x52, flags: 0x0}, + 459: {region: 0x52, script: 0x52, flags: 0x0}, + 460: {region: 0xcc, script: 0x52, flags: 0x0}, + 461: {region: 0x12d, script: 0x52, flags: 0x0}, + 462: {region: 0x12f, script: 0x52, flags: 0x0}, + 463: {region: 0x7e, script: 0x52, flags: 0x0}, + 464: {region: 0x76, script: 0x52, flags: 0x0}, + 466: {region: 0x6e, script: 0x52, flags: 0x0}, + 467: {region: 0x97, script: 0x75, flags: 0x0}, + 468: {region: 0x7b, script: 0x1e, flags: 0x0}, + 469: {region: 0x132, script: 0x76, flags: 0x0}, + 470: {region: 0xc3, script: 0x74, flags: 0x0}, + 471: {region: 0x2e, script: 0x3, flags: 0x1}, + 472: {region: 0xe5, script: 0x52, flags: 0x0}, + 473: {region: 0x31, script: 0x2, flags: 0x1}, + 474: {region: 0xe5, script: 0x52, flags: 0x0}, + 475: {region: 0x2f, script: 0x52, flags: 0x0}, + 476: {region: 0xee, script: 0x52, flags: 0x0}, + 477: {region: 0x76, script: 0x52, flags: 0x0}, + 478: {region: 0xd4, script: 0x52, flags: 0x0}, + 479: {region: 0x132, script: 0x52, flags: 0x0}, + 480: {region: 0x48, script: 0x52, flags: 0x0}, + 481: {region: 0x9a, script: 0xdd, flags: 0x0}, + 482: {region: 0x5f, script: 0x52, flags: 0x0}, + 483: {region: 0xae, script: 0x7f, flags: 0x0}, + 485: {region: 0x97, script: 0x12, flags: 0x0}, + 486: {region: 0xa2, script: 0x52, flags: 0x0}, + 487: {region: 0xe7, script: 0x52, flags: 0x0}, + 488: {region: 0x9c, script: 0x52, flags: 0x0}, + 489: {region: 0x85, script: 0x2d, flags: 0x0}, + 490: {region: 0x73, script: 0x52, flags: 0x0}, + 491: {region: 0xe6, script: 0x45, flags: 0x0}, + 492: {region: 0x9a, script: 0x5, flags: 0x0}, + 493: {region: 0x1, script: 0x52, flags: 0x0}, + 494: {region: 0x23, script: 0x5, flags: 0x0}, + 495: {region: 0x40, script: 0x52, flags: 0x0}, + 496: {region: 0x78, script: 0x52, flags: 0x0}, + 497: {region: 0xe2, script: 0x52, flags: 0x0}, + 498: {region: 0x87, script: 0x52, flags: 0x0}, + 499: {region: 0x68, script: 0x52, flags: 0x0}, + 500: {region: 0x97, script: 0x20, flags: 0x0}, + 501: {region: 0x100, script: 0x52, flags: 0x0}, + 502: {region: 0x93, script: 0x52, flags: 0x0}, + 503: {region: 0x9c, script: 0x52, flags: 0x0}, + 504: {region: 0x97, script: 0x52, flags: 0x0}, + 505: {region: 0x33, script: 0x2, flags: 0x1}, + 506: {region: 0xd9, script: 0x20, flags: 0x0}, + 507: {region: 0x34, script: 0xe, flags: 0x0}, + 508: {region: 0x4d, script: 0x52, flags: 0x0}, + 509: {region: 0x70, script: 0x52, flags: 0x0}, + 510: {region: 0x4d, script: 0x52, flags: 0x0}, + 511: {region: 0x9a, script: 0x5, flags: 0x0}, + 512: {region: 0x10a, script: 0x52, flags: 0x0}, + 513: {region: 0x39, script: 0x52, flags: 0x0}, + 514: {region: 0xcf, script: 0x52, flags: 0x0}, + 515: {region: 0x102, script: 0x52, flags: 0x0}, + 516: {region: 0x93, script: 0x52, flags: 0x0}, + 517: {region: 0x12d, script: 0x52, flags: 0x0}, + 518: {region: 0x71, script: 0x52, flags: 0x0}, + 519: {region: 0x104, script: 0x1e, flags: 0x0}, + 520: {region: 0x12e, script: 0x1e, flags: 0x0}, + 521: {region: 0x107, script: 0x52, flags: 0x0}, + 522: {region: 0x105, script: 0x52, flags: 0x0}, + 523: {region: 0x12d, script: 0x52, flags: 0x0}, + 524: {region: 0xa0, script: 0x44, flags: 0x0}, + 525: {region: 0x97, script: 0x20, flags: 0x0}, + 526: {region: 0x7e, script: 0x52, flags: 0x0}, + 527: {region: 0x104, script: 0x1e, flags: 0x0}, + 528: {region: 0xa2, script: 0x52, flags: 0x0}, + 529: {region: 0x93, script: 0x52, flags: 0x0}, + 530: {region: 0x97, script: 0x52, flags: 0x0}, + 531: {region: 0x97, script: 0xbb, flags: 0x0}, + 532: {region: 0x12d, script: 0x52, flags: 0x0}, + 533: {region: 0x9c, script: 0x52, flags: 0x0}, + 534: {region: 0x97, script: 0x20, flags: 0x0}, + 535: {region: 0x9c, script: 0x52, flags: 0x0}, + 536: {region: 0x79, script: 0x52, flags: 0x0}, + 537: {region: 0x48, script: 0x52, flags: 0x0}, + 538: {region: 0x35, script: 0x4, flags: 0x1}, + 539: {region: 0x9c, script: 0x52, flags: 0x0}, + 540: {region: 0x9a, script: 0x5, flags: 0x0}, + 541: {region: 0xd8, script: 0x52, flags: 0x0}, + 542: {region: 0x4e, script: 0x52, flags: 0x0}, + 543: {region: 0xcf, script: 0x52, flags: 0x0}, + 544: {region: 0xcd, script: 0x52, flags: 0x0}, + 545: {region: 0xc1, script: 0x52, flags: 0x0}, + 546: {region: 0x4b, script: 0x52, flags: 0x0}, + 547: {region: 0x94, script: 0x72, flags: 0x0}, + 548: {region: 0xb4, script: 0x52, flags: 0x0}, + 550: {region: 0xb8, script: 0xd2, flags: 0x0}, + 551: {region: 0xc2, script: 0x6b, flags: 0x0}, + 552: {region: 0xb1, script: 0xc1, flags: 0x0}, + 553: {region: 0x6e, script: 0x52, flags: 0x0}, + 554: {region: 0x10f, script: 0x52, flags: 0x0}, + 555: {region: 0xe6, script: 0x5, flags: 0x0}, + 556: {region: 0x10d, script: 0x52, flags: 0x0}, + 557: {region: 0xe7, script: 0x52, flags: 0x0}, + 558: {region: 0x93, script: 0x52, flags: 0x0}, + 559: {region: 0x13f, script: 0x52, flags: 0x0}, + 560: {region: 0x10a, script: 0x52, flags: 0x0}, + 562: {region: 0x10a, script: 0x52, flags: 0x0}, + 563: {region: 0x70, script: 0x52, flags: 0x0}, + 564: {region: 0x95, script: 0xb8, flags: 0x0}, + 565: {region: 0x70, script: 0x52, flags: 0x0}, + 566: {region: 0x161, script: 0x52, flags: 0x0}, + 567: {region: 0xc1, script: 0x52, flags: 0x0}, + 568: {region: 0x113, script: 0x52, flags: 0x0}, + 569: {region: 0x121, script: 0xd5, flags: 0x0}, + 570: {region: 0x26, script: 0x52, flags: 0x0}, + 571: {region: 0x39, script: 0x5, flags: 0x1}, + 572: {region: 0x97, script: 0xc2, flags: 0x0}, + 573: {region: 0x114, script: 0x52, flags: 0x0}, + 574: {region: 0x112, script: 0x52, flags: 0x0}, + 575: {region: 0x97, script: 0x20, flags: 0x0}, + 576: {region: 0x15e, script: 0x52, flags: 0x0}, + 577: {region: 0x6c, script: 0x52, flags: 0x0}, + 578: {region: 0x15e, script: 0x52, flags: 0x0}, + 579: {region: 0x5f, script: 0x52, flags: 0x0}, + 580: {region: 0x93, script: 0x52, flags: 0x0}, + 581: {region: 0x12d, script: 0x52, flags: 0x0}, + 582: {region: 0x82, script: 0x52, flags: 0x0}, + 583: {region: 0x10a, script: 0x52, flags: 0x0}, + 584: {region: 0x12d, script: 0x52, flags: 0x0}, + 585: {region: 0x15c, script: 0x5, flags: 0x0}, + 586: {region: 0x4a, script: 0x52, flags: 0x0}, + 587: {region: 0x5f, script: 0x52, flags: 0x0}, + 588: {region: 0x97, script: 0x20, flags: 0x0}, + 589: {region: 0x93, script: 0x52, flags: 0x0}, + 590: {region: 0x34, script: 0xe, flags: 0x0}, + 591: {region: 0x99, script: 0xc5, flags: 0x0}, + 592: {region: 0xe7, script: 0x52, flags: 0x0}, + 593: {region: 0x97, script: 0xcd, flags: 0x0}, + 594: {region: 0xd9, script: 0x20, flags: 0x0}, + 595: {region: 0xe5, script: 0x52, flags: 0x0}, + 596: {region: 0x97, script: 0x4a, flags: 0x0}, + 597: {region: 0x52, script: 0xcb, flags: 0x0}, + 598: {region: 0xd9, script: 0x20, flags: 0x0}, + 599: {region: 0xd9, script: 0x20, flags: 0x0}, + 600: {region: 0x97, script: 0xd0, flags: 0x0}, + 601: {region: 0x110, script: 0x52, flags: 0x0}, + 602: {region: 0x12f, script: 0x52, flags: 0x0}, + 603: {region: 0x124, script: 0x52, flags: 0x0}, + 604: {region: 0x3e, script: 0x3, flags: 0x1}, + 605: {region: 0x121, script: 0xd5, flags: 0x0}, + 606: {region: 0xd9, script: 0x20, flags: 0x0}, + 607: {region: 0xd9, script: 0x20, flags: 0x0}, + 608: {region: 0xd9, script: 0x20, flags: 0x0}, + 609: {region: 0x6e, script: 0x27, flags: 0x0}, + 610: {region: 0x6c, script: 0x27, flags: 0x0}, + 611: {region: 0xd4, script: 0x52, flags: 0x0}, + 612: {region: 0x125, script: 0x52, flags: 0x0}, + 613: {region: 0x123, script: 0x52, flags: 0x0}, + 614: {region: 0x31, script: 0x52, flags: 0x0}, + 615: {region: 0xd9, script: 0x20, flags: 0x0}, + 616: {region: 0xe5, script: 0x52, flags: 0x0}, + 617: {region: 0x31, script: 0x52, flags: 0x0}, + 618: {region: 0xd2, script: 0x52, flags: 0x0}, + 619: {region: 0x15e, script: 0x52, flags: 0x0}, + 620: {region: 0x127, script: 0x52, flags: 0x0}, + 621: {region: 0xcc, script: 0x52, flags: 0x0}, + 622: {region: 0xe4, script: 0x52, flags: 0x0}, + 623: {region: 0x129, script: 0x52, flags: 0x0}, + 624: {region: 0x129, script: 0x52, flags: 0x0}, + 625: {region: 0x12c, script: 0x52, flags: 0x0}, + 626: {region: 0x15e, script: 0x52, flags: 0x0}, + 627: {region: 0x85, script: 0x2d, flags: 0x0}, + 628: {region: 0xd9, script: 0x20, flags: 0x0}, + 629: {region: 0xe5, script: 0x52, flags: 0x0}, + 630: {region: 0x42, script: 0xd6, flags: 0x0}, + 631: {region: 0x104, script: 0x1e, flags: 0x0}, + 632: {region: 0x12f, script: 0x52, flags: 0x0}, + 633: {region: 0x121, script: 0xd5, flags: 0x0}, + 634: {region: 0x31, script: 0x52, flags: 0x0}, + 635: {region: 0xcc, script: 0x52, flags: 0x0}, + 636: {region: 0x12b, script: 0x52, flags: 0x0}, + 638: {region: 0xd2, script: 0x52, flags: 0x0}, + 639: {region: 0x52, script: 0xce, flags: 0x0}, + 640: {region: 0xe3, script: 0x52, flags: 0x0}, + 641: {region: 0x104, script: 0x1e, flags: 0x0}, + 642: {region: 0xb8, script: 0x52, flags: 0x0}, + 643: {region: 0x104, script: 0x1e, flags: 0x0}, + 644: {region: 0x41, script: 0x4, flags: 0x1}, + 645: {region: 0x11a, script: 0xd8, flags: 0x0}, + 646: {region: 0x12e, script: 0x1e, flags: 0x0}, + 647: {region: 0x73, script: 0x52, flags: 0x0}, + 648: {region: 0x29, script: 0x52, flags: 0x0}, + 650: {region: 0x45, script: 0x3, flags: 0x1}, + 651: {region: 0x97, script: 0xe, flags: 0x0}, + 652: {region: 0xe6, script: 0x5, flags: 0x0}, + 653: {region: 0x48, script: 0x4, flags: 0x1}, + 654: {region: 0xb2, script: 0xd9, flags: 0x0}, + 655: {region: 0x15e, script: 0x52, flags: 0x0}, + 656: {region: 0x9c, script: 0x52, flags: 0x0}, + 657: {region: 0x104, script: 0x52, flags: 0x0}, + 658: {region: 0x13b, script: 0x52, flags: 0x0}, + 659: {region: 0x119, script: 0x52, flags: 0x0}, + 660: {region: 0x35, script: 0x52, flags: 0x0}, + 661: {region: 0x5f, script: 0x52, flags: 0x0}, + 662: {region: 0xcf, script: 0x52, flags: 0x0}, + 663: {region: 0x1, script: 0x52, flags: 0x0}, + 664: {region: 0x104, script: 0x52, flags: 0x0}, + 665: {region: 0x69, script: 0x52, flags: 0x0}, + 666: {region: 0x12d, script: 0x52, flags: 0x0}, + 667: {region: 0x35, script: 0x52, flags: 0x0}, + 668: {region: 0x4d, script: 0x52, flags: 0x0}, + 669: {region: 0x6e, script: 0x27, flags: 0x0}, + 670: {region: 0xe5, script: 0x52, flags: 0x0}, + 671: {region: 0x2e, script: 0x52, flags: 0x0}, + 672: {region: 0x97, script: 0xd0, flags: 0x0}, + 673: {region: 0x97, script: 0x20, flags: 0x0}, + 674: {region: 0x13d, script: 0x52, flags: 0x0}, + 675: {region: 0xa6, script: 0x5, flags: 0x0}, + 676: {region: 0x112, script: 0x52, flags: 0x0}, + 677: {region: 0x97, script: 0x20, flags: 0x0}, + 678: {region: 0x52, script: 0x34, flags: 0x0}, + 679: {region: 0x40, script: 0x52, flags: 0x0}, + 680: {region: 0x129, script: 0x18, flags: 0x0}, + 681: {region: 0x15e, script: 0x52, flags: 0x0}, + 682: {region: 0x129, script: 0x5a, flags: 0x0}, + 683: {region: 0x129, script: 0x5b, flags: 0x0}, + 684: {region: 0x7b, script: 0x29, flags: 0x0}, + 685: {region: 0x52, script: 0x5e, flags: 0x0}, + 686: {region: 0x109, script: 0x62, flags: 0x0}, + 687: {region: 0x106, script: 0x6c, flags: 0x0}, + 688: {region: 0x97, script: 0x20, flags: 0x0}, + 689: {region: 0x12f, script: 0x52, flags: 0x0}, + 690: {region: 0x9a, script: 0x82, flags: 0x0}, + 691: {region: 0x15b, script: 0xba, flags: 0x0}, + 692: {region: 0xd9, script: 0x20, flags: 0x0}, + 693: {region: 0xcf, script: 0x52, flags: 0x0}, + 694: {region: 0x73, script: 0x52, flags: 0x0}, + 695: {region: 0x51, script: 0x52, flags: 0x0}, + 696: {region: 0x51, script: 0x52, flags: 0x0}, + 697: {region: 0x1, script: 0x37, flags: 0x0}, + 698: {region: 0xd4, script: 0x52, flags: 0x0}, + 699: {region: 0x40, script: 0x52, flags: 0x0}, + 700: {region: 0xcd, script: 0x52, flags: 0x0}, + 701: {region: 0x4c, script: 0x3, flags: 0x1}, + 702: {region: 0x52, script: 0x52, flags: 0x0}, + 703: {region: 0x109, script: 0x52, flags: 0x0}, + 705: {region: 0xa6, script: 0x5, flags: 0x0}, + 706: {region: 0xd7, script: 0x52, flags: 0x0}, + 707: {region: 0xb8, script: 0xd2, flags: 0x0}, + 708: {region: 0x4f, script: 0x14, flags: 0x1}, + 709: {region: 0xce, script: 0x52, flags: 0x0}, + 710: {region: 0x15e, script: 0x52, flags: 0x0}, + 712: {region: 0x129, script: 0x52, flags: 0x0}, +} + +// likelyLangList holds lists info associated with likelyLang. +// Size: 396 bytes, 99 elements +var likelyLangList = [99]likelyScriptRegion{ + 0: {region: 0x9a, script: 0x7, flags: 0x0}, + 1: {region: 0x9f, script: 0x6d, flags: 0x2}, + 2: {region: 0x11a, script: 0x78, flags: 0x2}, + 3: {region: 0x31, script: 0x52, flags: 0x0}, + 4: {region: 0x99, script: 0x5, flags: 0x4}, + 5: {region: 0x9a, script: 0x5, flags: 0x4}, + 6: {region: 0x104, script: 0x1e, flags: 0x4}, + 7: {region: 0x9a, script: 0x5, flags: 0x2}, + 8: {region: 0x97, script: 0xe, flags: 0x0}, + 9: {region: 0x34, script: 0x16, flags: 0x2}, + 10: {region: 0x104, script: 0x1e, flags: 0x0}, + 11: {region: 0x37, script: 0x2a, flags: 0x2}, + 12: {region: 0x132, script: 0x52, flags: 0x0}, + 13: {region: 0x79, script: 0xbd, flags: 0x2}, + 14: {region: 0x112, script: 0x52, flags: 0x0}, + 15: {region: 0x82, script: 0x1, flags: 0x2}, + 16: {region: 0x5c, script: 0x1d, flags: 0x0}, + 17: {region: 0x85, script: 0x57, flags: 0x2}, + 18: {region: 0xd4, script: 0x52, flags: 0x0}, + 19: {region: 0x51, script: 0x5, flags: 0x4}, + 20: {region: 0x109, script: 0x5, flags: 0x4}, + 21: {region: 0xac, script: 0x1e, flags: 0x0}, + 22: {region: 0x23, script: 0x5, flags: 0x4}, + 23: {region: 0x52, script: 0x5, flags: 0x4}, + 24: {region: 0x9a, script: 0x5, flags: 0x4}, + 25: {region: 0xc3, script: 0x5, flags: 0x4}, + 26: {region: 0x52, script: 0x5, flags: 0x2}, + 27: {region: 0x129, script: 0x52, flags: 0x0}, + 28: {region: 0xae, script: 0x5, flags: 0x4}, + 29: {region: 0x99, script: 0x5, flags: 0x2}, + 30: {region: 0xa3, script: 0x1e, flags: 0x0}, + 31: {region: 0x52, script: 0x5, flags: 0x4}, + 32: {region: 0x129, script: 0x52, flags: 0x4}, + 33: {region: 0x52, script: 0x5, flags: 0x2}, + 34: {region: 0x129, script: 0x52, flags: 0x2}, + 35: {region: 0xd9, script: 0x20, flags: 0x0}, + 36: {region: 0x97, script: 0x55, flags: 0x2}, + 37: {region: 0x81, script: 0x52, flags: 0x0}, + 38: {region: 0x82, script: 0x70, flags: 0x4}, + 39: {region: 0x82, script: 0x70, flags: 0x2}, + 40: {region: 0xc3, script: 0x1e, flags: 0x0}, + 41: {region: 0x52, script: 0x66, flags: 0x4}, + 42: {region: 0x52, script: 0x66, flags: 0x2}, + 43: {region: 0xce, script: 0x52, flags: 0x0}, + 44: {region: 0x49, script: 0x5, flags: 0x4}, + 45: {region: 0x93, script: 0x5, flags: 0x4}, + 46: {region: 0x97, script: 0x2f, flags: 0x0}, + 47: {region: 0xe6, script: 0x5, flags: 0x4}, + 48: {region: 0xe6, script: 0x5, flags: 0x2}, + 49: {region: 0x9a, script: 0x7c, flags: 0x0}, + 50: {region: 0x52, script: 0x7d, flags: 0x2}, + 51: {region: 0xb8, script: 0xd2, flags: 0x0}, + 52: {region: 0xd7, script: 0x52, flags: 0x4}, + 53: {region: 0xe6, script: 0x5, flags: 0x0}, + 54: {region: 0x97, script: 0x20, flags: 0x2}, + 55: {region: 0x97, script: 0x47, flags: 0x2}, + 56: {region: 0x97, script: 0xc0, flags: 0x2}, + 57: {region: 0x103, script: 0x1e, flags: 0x0}, + 58: {region: 0xbb, script: 0x52, flags: 0x4}, + 59: {region: 0x102, script: 0x52, flags: 0x4}, + 60: {region: 0x104, script: 0x52, flags: 0x4}, + 61: {region: 0x129, script: 0x52, flags: 0x4}, + 62: {region: 0x122, script: 0x1e, flags: 0x0}, + 63: {region: 0xe6, script: 0x5, flags: 0x4}, + 64: {region: 0xe6, script: 0x5, flags: 0x2}, + 65: {region: 0x52, script: 0x5, flags: 0x0}, + 66: {region: 0xac, script: 0x1e, flags: 0x4}, + 67: {region: 0xc3, script: 0x1e, flags: 0x4}, + 68: {region: 0xac, script: 0x1e, flags: 0x2}, + 69: {region: 0x97, script: 0xe, flags: 0x0}, + 70: {region: 0xd9, script: 0x20, flags: 0x4}, + 71: {region: 0xd9, script: 0x20, flags: 0x2}, + 72: {region: 0x134, script: 0x52, flags: 0x0}, + 73: {region: 0x23, script: 0x5, flags: 0x4}, + 74: {region: 0x52, script: 0x1e, flags: 0x4}, + 75: {region: 0x23, script: 0x5, flags: 0x2}, + 76: {region: 0x8b, script: 0x35, flags: 0x0}, + 77: {region: 0x52, script: 0x34, flags: 0x4}, + 78: {region: 0x52, script: 0x34, flags: 0x2}, + 79: {region: 0x52, script: 0x34, flags: 0x0}, + 80: {region: 0x2e, script: 0x35, flags: 0x4}, + 81: {region: 0x3d, script: 0x35, flags: 0x4}, + 82: {region: 0x79, script: 0x35, flags: 0x4}, + 83: {region: 0x7c, script: 0x35, flags: 0x4}, + 84: {region: 0x8b, script: 0x35, flags: 0x4}, + 85: {region: 0x93, script: 0x35, flags: 0x4}, + 86: {region: 0xc4, script: 0x35, flags: 0x4}, + 87: {region: 0xce, script: 0x35, flags: 0x4}, + 88: {region: 0xe0, script: 0x35, flags: 0x4}, + 89: {region: 0xe3, script: 0x35, flags: 0x4}, + 90: {region: 0xe5, script: 0x35, flags: 0x4}, + 91: {region: 0x114, script: 0x35, flags: 0x4}, + 92: {region: 0x121, script: 0x35, flags: 0x4}, + 93: {region: 0x12c, script: 0x35, flags: 0x4}, + 94: {region: 0x132, script: 0x35, flags: 0x4}, + 95: {region: 0x13b, script: 0x35, flags: 0x4}, + 96: {region: 0x12c, script: 0x11, flags: 0x2}, + 97: {region: 0x12c, script: 0x30, flags: 0x2}, + 98: {region: 0x12c, script: 0x35, flags: 0x2}, +} + +type likelyLangScript struct { + lang uint16 + script uint8 + flags uint8 +} + +// likelyRegion is a lookup table, indexed by regionID, for the most likely +// languages and scripts given incomplete information. If more entries exist +// for a given regionID, lang and script are the index and size respectively +// of the list in likelyRegionList. +// TODO: exclude containers and user-definable regions from the list. +// Size: 1420 bytes, 355 elements +var likelyRegion = [355]likelyLangScript{ + 33: {lang: 0x61, script: 0x52, flags: 0x0}, + 34: {lang: 0x15, script: 0x5, flags: 0x0}, + 35: {lang: 0x0, script: 0x2, flags: 0x1}, + 38: {lang: 0x2, script: 0x2, flags: 0x1}, + 39: {lang: 0x4, script: 0x2, flags: 0x1}, + 41: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 42: {lang: 0x0, script: 0x52, flags: 0x0}, + 43: {lang: 0x9d, script: 0x52, flags: 0x0}, + 44: {lang: 0x22f, script: 0x52, flags: 0x0}, + 45: {lang: 0x85, script: 0x52, flags: 0x0}, + 47: {lang: 0x1bd, script: 0x52, flags: 0x0}, + 48: {lang: 0x247, script: 0x52, flags: 0x0}, + 49: {lang: 0x24, script: 0x52, flags: 0x0}, + 50: {lang: 0x6, script: 0x2, flags: 0x1}, + 52: {lang: 0x4b, script: 0xe, flags: 0x0}, + 53: {lang: 0x1bd, script: 0x52, flags: 0x0}, + 54: {lang: 0xaf, script: 0x52, flags: 0x0}, + 55: {lang: 0x38, script: 0x1e, flags: 0x0}, + 56: {lang: 0x15, script: 0x5, flags: 0x0}, + 57: {lang: 0x201, script: 0x52, flags: 0x0}, + 58: {lang: 0xaf, script: 0x52, flags: 0x0}, + 59: {lang: 0xaf, script: 0x52, flags: 0x0}, + 61: {lang: 0x19a, script: 0x52, flags: 0x0}, + 62: {lang: 0x9d, script: 0x52, flags: 0x0}, + 63: {lang: 0x1db, script: 0x52, flags: 0x0}, + 64: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 66: {lang: 0x8, script: 0x2, flags: 0x1}, + 68: {lang: 0x0, script: 0x52, flags: 0x0}, + 70: {lang: 0x2f, script: 0x1e, flags: 0x0}, + 72: {lang: 0x2b9, script: 0x37, flags: 0x2}, + 73: {lang: 0x19a, script: 0x5, flags: 0x2}, + 74: {lang: 0x248, script: 0x52, flags: 0x0}, + 75: {lang: 0xaf, script: 0x52, flags: 0x0}, + 76: {lang: 0xaf, script: 0x52, flags: 0x0}, + 77: {lang: 0x85, script: 0x52, flags: 0x0}, + 78: {lang: 0xaf, script: 0x52, flags: 0x0}, + 80: {lang: 0x9d, script: 0x52, flags: 0x0}, + 81: {lang: 0xaf, script: 0x52, flags: 0x0}, + 82: {lang: 0xa, script: 0x5, flags: 0x1}, + 83: {lang: 0x9d, script: 0x52, flags: 0x0}, + 84: {lang: 0x0, script: 0x52, flags: 0x0}, + 85: {lang: 0x9d, script: 0x52, flags: 0x0}, + 88: {lang: 0x9d, script: 0x52, flags: 0x0}, + 89: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 90: {lang: 0x1db, script: 0x52, flags: 0x0}, + 92: {lang: 0xf, script: 0x2, flags: 0x1}, + 93: {lang: 0x79, script: 0x52, flags: 0x0}, + 95: {lang: 0x85, script: 0x52, flags: 0x0}, + 97: {lang: 0x1, script: 0x52, flags: 0x0}, + 98: {lang: 0x80, script: 0x52, flags: 0x0}, + 100: {lang: 0x9d, script: 0x52, flags: 0x0}, + 102: {lang: 0x11, script: 0x2, flags: 0x1}, + 103: {lang: 0x9d, script: 0x52, flags: 0x0}, + 104: {lang: 0x9d, script: 0x52, flags: 0x0}, + 105: {lang: 0x9f, script: 0x52, flags: 0x0}, + 106: {lang: 0x15, script: 0x5, flags: 0x0}, + 107: {lang: 0x15, script: 0x5, flags: 0x0}, + 108: {lang: 0x261, script: 0x27, flags: 0x0}, + 109: {lang: 0x9d, script: 0x52, flags: 0x0}, + 110: {lang: 0x13, script: 0x2, flags: 0x1}, + 112: {lang: 0xa8, script: 0x52, flags: 0x0}, + 113: {lang: 0xe2, script: 0x20, flags: 0x2}, + 116: {lang: 0xad, script: 0x52, flags: 0x0}, + 118: {lang: 0xaf, script: 0x52, flags: 0x0}, + 120: {lang: 0xaf, script: 0x52, flags: 0x0}, + 121: {lang: 0x15, script: 0x2, flags: 0x1}, + 123: {lang: 0x17, script: 0x3, flags: 0x1}, + 124: {lang: 0xaf, script: 0x52, flags: 0x0}, + 126: {lang: 0xd, script: 0x52, flags: 0x0}, + 128: {lang: 0x131, script: 0x52, flags: 0x0}, + 130: {lang: 0xaf, script: 0x52, flags: 0x0}, + 131: {lang: 0xaf, script: 0x52, flags: 0x0}, + 132: {lang: 0x9d, script: 0x52, flags: 0x0}, + 133: {lang: 0x1a, script: 0x2, flags: 0x1}, + 134: {lang: 0x0, script: 0x52, flags: 0x0}, + 135: {lang: 0x9d, script: 0x52, flags: 0x0}, + 137: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 139: {lang: 0x2c4, script: 0x35, flags: 0x0}, + 140: {lang: 0x0, script: 0x52, flags: 0x0}, + 141: {lang: 0x9d, script: 0x52, flags: 0x0}, + 142: {lang: 0xee, script: 0x52, flags: 0x0}, + 143: {lang: 0xf1, script: 0x52, flags: 0x0}, + 144: {lang: 0xf2, script: 0x52, flags: 0x0}, + 146: {lang: 0x9d, script: 0x52, flags: 0x0}, + 147: {lang: 0x1c, script: 0x2, flags: 0x1}, + 149: {lang: 0xe0, script: 0x37, flags: 0x0}, + 151: {lang: 0x1e, script: 0x3, flags: 0x1}, + 153: {lang: 0x15, script: 0x5, flags: 0x0}, + 154: {lang: 0x21, script: 0x2, flags: 0x1}, + 155: {lang: 0x102, script: 0x52, flags: 0x0}, + 156: {lang: 0x103, script: 0x52, flags: 0x0}, + 159: {lang: 0x15, script: 0x5, flags: 0x0}, + 160: {lang: 0x107, script: 0x41, flags: 0x0}, + 162: {lang: 0x248, script: 0x52, flags: 0x0}, + 163: {lang: 0x14d, script: 0x1e, flags: 0x0}, + 164: {lang: 0x23, script: 0x3, flags: 0x1}, + 166: {lang: 0x26, script: 0x2, flags: 0x1}, + 168: {lang: 0x136, script: 0x4b, flags: 0x0}, + 169: {lang: 0x136, script: 0x4b, flags: 0x0}, + 170: {lang: 0x15, script: 0x5, flags: 0x0}, + 172: {lang: 0x207, script: 0x1e, flags: 0x0}, + 173: {lang: 0x28, script: 0x2, flags: 0x1}, + 174: {lang: 0x15, script: 0x5, flags: 0x0}, + 176: {lang: 0x85, script: 0x52, flags: 0x0}, + 177: {lang: 0x228, script: 0xc1, flags: 0x0}, + 179: {lang: 0x242, script: 0x52, flags: 0x0}, + 180: {lang: 0x169, script: 0x52, flags: 0x0}, + 181: {lang: 0xaf, script: 0x52, flags: 0x0}, + 182: {lang: 0x170, script: 0x52, flags: 0x0}, + 183: {lang: 0x15, script: 0x5, flags: 0x0}, + 184: {lang: 0x2a, script: 0x2, flags: 0x1}, + 185: {lang: 0xaf, script: 0x52, flags: 0x0}, + 186: {lang: 0x2c, script: 0x2, flags: 0x1}, + 187: {lang: 0x23b, script: 0x52, flags: 0x0}, + 188: {lang: 0xaf, script: 0x52, flags: 0x0}, + 189: {lang: 0x183, script: 0x52, flags: 0x0}, + 192: {lang: 0x2e, script: 0x2, flags: 0x1}, + 193: {lang: 0x49, script: 0x52, flags: 0x0}, + 194: {lang: 0x30, script: 0x2, flags: 0x1}, + 195: {lang: 0x32, script: 0x2, flags: 0x1}, + 196: {lang: 0x34, script: 0x2, flags: 0x1}, + 198: {lang: 0xaf, script: 0x52, flags: 0x0}, + 199: {lang: 0x36, script: 0x2, flags: 0x1}, + 201: {lang: 0x19b, script: 0x52, flags: 0x0}, + 202: {lang: 0x38, script: 0x3, flags: 0x1}, + 203: {lang: 0x90, script: 0xd4, flags: 0x0}, + 205: {lang: 0x9d, script: 0x52, flags: 0x0}, + 206: {lang: 0x19a, script: 0x52, flags: 0x0}, + 207: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 208: {lang: 0xa, script: 0x52, flags: 0x0}, + 209: {lang: 0xaf, script: 0x52, flags: 0x0}, + 210: {lang: 0xdc, script: 0x52, flags: 0x0}, + 212: {lang: 0xdc, script: 0x5, flags: 0x2}, + 214: {lang: 0x9d, script: 0x52, flags: 0x0}, + 215: {lang: 0x1bd, script: 0x52, flags: 0x0}, + 216: {lang: 0x1af, script: 0x52, flags: 0x0}, + 217: {lang: 0x1b4, script: 0x20, flags: 0x0}, + 223: {lang: 0x15, script: 0x5, flags: 0x0}, + 224: {lang: 0x9d, script: 0x52, flags: 0x0}, + 226: {lang: 0x9d, script: 0x52, flags: 0x0}, + 227: {lang: 0xaf, script: 0x52, flags: 0x0}, + 228: {lang: 0x26e, script: 0x52, flags: 0x0}, + 229: {lang: 0xaa, script: 0x52, flags: 0x0}, + 230: {lang: 0x3b, script: 0x3, flags: 0x1}, + 231: {lang: 0x3e, script: 0x2, flags: 0x1}, + 232: {lang: 0xaf, script: 0x52, flags: 0x0}, + 234: {lang: 0x9d, script: 0x52, flags: 0x0}, + 235: {lang: 0x15, script: 0x5, flags: 0x0}, + 236: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 238: {lang: 0x1dc, script: 0x52, flags: 0x0}, + 239: {lang: 0xca, script: 0x52, flags: 0x0}, + 241: {lang: 0x15, script: 0x5, flags: 0x0}, + 256: {lang: 0xaf, script: 0x52, flags: 0x0}, + 258: {lang: 0x40, script: 0x2, flags: 0x1}, + 259: {lang: 0x23b, script: 0x1e, flags: 0x0}, + 260: {lang: 0x42, script: 0x2, flags: 0x1}, + 261: {lang: 0x20a, script: 0x52, flags: 0x0}, + 262: {lang: 0x15, script: 0x5, flags: 0x0}, + 264: {lang: 0xaf, script: 0x52, flags: 0x0}, + 265: {lang: 0x15, script: 0x5, flags: 0x0}, + 266: {lang: 0x44, script: 0x2, flags: 0x1}, + 269: {lang: 0x22c, script: 0x52, flags: 0x0}, + 270: {lang: 0x1af, script: 0x52, flags: 0x0}, + 271: {lang: 0x46, script: 0x2, flags: 0x1}, + 273: {lang: 0x103, script: 0x52, flags: 0x0}, + 274: {lang: 0xaf, script: 0x52, flags: 0x0}, + 275: {lang: 0x238, script: 0x52, flags: 0x0}, + 276: {lang: 0x1bd, script: 0x52, flags: 0x0}, + 278: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 280: {lang: 0x9d, script: 0x52, flags: 0x0}, + 282: {lang: 0x48, script: 0x2, flags: 0x1}, + 286: {lang: 0xaf, script: 0x52, flags: 0x0}, + 287: {lang: 0xaf, script: 0x52, flags: 0x0}, + 288: {lang: 0xaf, script: 0x52, flags: 0x0}, + 289: {lang: 0x4a, script: 0x3, flags: 0x1}, + 290: {lang: 0x4d, script: 0x2, flags: 0x1}, + 291: {lang: 0x265, script: 0x52, flags: 0x0}, + 292: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 293: {lang: 0x264, script: 0x52, flags: 0x0}, + 294: {lang: 0x4f, script: 0x2, flags: 0x1}, + 295: {lang: 0x26c, script: 0x52, flags: 0x0}, + 297: {lang: 0x51, script: 0x4, flags: 0x1}, + 299: {lang: 0x27c, script: 0x52, flags: 0x0}, + 300: {lang: 0x55, script: 0x2, flags: 0x1}, + 301: {lang: 0x248, script: 0x52, flags: 0x0}, + 302: {lang: 0x57, script: 0x3, flags: 0x1}, + 303: {lang: 0x248, script: 0x52, flags: 0x0}, + 306: {lang: 0x2b9, script: 0x37, flags: 0x2}, + 307: {lang: 0x9d, script: 0x52, flags: 0x0}, + 308: {lang: 0x28d, script: 0x52, flags: 0x0}, + 309: {lang: 0x103, script: 0x52, flags: 0x0}, + 312: {lang: 0x9d, script: 0x52, flags: 0x0}, + 315: {lang: 0x292, script: 0x52, flags: 0x0}, + 316: {lang: 0x41, script: 0x52, flags: 0x0}, + 317: {lang: 0xaf, script: 0x52, flags: 0x0}, + 319: {lang: 0x22f, script: 0x52, flags: 0x0}, + 330: {lang: 0x5a, script: 0x2, flags: 0x1}, + 347: {lang: 0x15, script: 0x5, flags: 0x0}, + 348: {lang: 0x5c, script: 0x2, flags: 0x1}, + 353: {lang: 0x236, script: 0x52, flags: 0x0}, +} + +// likelyRegionList holds lists info associated with likelyRegion. +// Size: 376 bytes, 94 elements +var likelyRegionList = [94]likelyLangScript{ + 0: {lang: 0xa4, script: 0x5, flags: 0x0}, + 1: {lang: 0x264, script: 0x52, flags: 0x0}, + 2: {lang: 0x23a, script: 0x52, flags: 0x0}, + 3: {lang: 0x18c, script: 0x1e, flags: 0x0}, + 4: {lang: 0xf3, script: 0x8, flags: 0x0}, + 5: {lang: 0x145, script: 0x52, flags: 0x0}, + 6: {lang: 0x54, script: 0x52, flags: 0x0}, + 7: {lang: 0x23b, script: 0x1e, flags: 0x0}, + 8: {lang: 0x93, script: 0xd6, flags: 0x0}, + 9: {lang: 0x1b4, script: 0x20, flags: 0x0}, + 10: {lang: 0x2c4, script: 0x34, flags: 0x0}, + 11: {lang: 0x284, script: 0x5, flags: 0x0}, + 12: {lang: 0x2bd, script: 0x35, flags: 0x0}, + 13: {lang: 0x2be, script: 0x52, flags: 0x0}, + 14: {lang: 0x157, script: 0xd5, flags: 0x0}, + 15: {lang: 0x9a, script: 0x2d, flags: 0x0}, + 16: {lang: 0x26f, script: 0x52, flags: 0x0}, + 17: {lang: 0x15, script: 0x5, flags: 0x0}, + 18: {lang: 0xaf, script: 0x52, flags: 0x0}, + 19: {lang: 0x11, script: 0x27, flags: 0x0}, + 20: {lang: 0x9b, script: 0x52, flags: 0x0}, + 21: {lang: 0x141, script: 0x5, flags: 0x2}, + 22: {lang: 0x2b9, script: 0x37, flags: 0x2}, + 23: {lang: 0x111, script: 0x29, flags: 0x0}, + 24: {lang: 0x2, script: 0x1e, flags: 0x0}, + 25: {lang: 0x145, script: 0x52, flags: 0x0}, + 26: {lang: 0x9a, script: 0x2d, flags: 0x0}, + 27: {lang: 0x18c, script: 0x1e, flags: 0x0}, + 28: {lang: 0xf8, script: 0x52, flags: 0x0}, + 29: {lang: 0x19a, script: 0x5, flags: 0x0}, + 30: {lang: 0xe1, script: 0x20, flags: 0x0}, + 31: {lang: 0x28c, script: 0x5, flags: 0x0}, + 32: {lang: 0x129, script: 0x6b, flags: 0x0}, + 33: {lang: 0xa4, script: 0x5, flags: 0x0}, + 34: {lang: 0x264, script: 0x52, flags: 0x0}, + 35: {lang: 0x133, script: 0x46, flags: 0x0}, + 36: {lang: 0x6d, script: 0x5, flags: 0x0}, + 37: {lang: 0x11c, script: 0xd5, flags: 0x0}, + 38: {lang: 0x15, script: 0x5, flags: 0x0}, + 39: {lang: 0xaf, script: 0x52, flags: 0x0}, + 40: {lang: 0x165, script: 0x4f, flags: 0x0}, + 41: {lang: 0x11c, script: 0xd5, flags: 0x0}, + 42: {lang: 0x15, script: 0x5, flags: 0x0}, + 43: {lang: 0xaf, script: 0x52, flags: 0x0}, + 44: {lang: 0x203, script: 0x52, flags: 0x0}, + 45: {lang: 0x286, script: 0x1e, flags: 0x0}, + 46: {lang: 0x18c, script: 0x1e, flags: 0x0}, + 47: {lang: 0x23a, script: 0x52, flags: 0x0}, + 48: {lang: 0x1a5, script: 0x6b, flags: 0x0}, + 49: {lang: 0x114, script: 0x52, flags: 0x0}, + 50: {lang: 0x18f, script: 0x1e, flags: 0x0}, + 51: {lang: 0x12f, script: 0x5, flags: 0x0}, + 52: {lang: 0x2c4, script: 0x35, flags: 0x0}, + 53: {lang: 0x1ef, script: 0x52, flags: 0x0}, + 54: {lang: 0x15, script: 0x5, flags: 0x0}, + 55: {lang: 0xaf, script: 0x52, flags: 0x0}, + 56: {lang: 0x182, script: 0x52, flags: 0x0}, + 57: {lang: 0x28c, script: 0x5, flags: 0x0}, + 58: {lang: 0x40, script: 0x20, flags: 0x0}, + 59: {lang: 0x28c, script: 0x5, flags: 0x0}, + 60: {lang: 0x28c, script: 0x5, flags: 0x0}, + 61: {lang: 0x58, script: 0x20, flags: 0x0}, + 62: {lang: 0x1e7, script: 0x52, flags: 0x0}, + 63: {lang: 0x2f, script: 0x1e, flags: 0x0}, + 64: {lang: 0x203, script: 0x52, flags: 0x0}, + 65: {lang: 0x38, script: 0x1e, flags: 0x0}, + 66: {lang: 0x207, script: 0x1e, flags: 0x0}, + 67: {lang: 0x13f, script: 0x52, flags: 0x0}, + 68: {lang: 0x247, script: 0x52, flags: 0x0}, + 69: {lang: 0x2b9, script: 0x37, flags: 0x0}, + 70: {lang: 0x22a, script: 0x52, flags: 0x0}, + 71: {lang: 0x286, script: 0x1e, flags: 0x0}, + 72: {lang: 0x15, script: 0x5, flags: 0x0}, + 73: {lang: 0xaf, script: 0x52, flags: 0x0}, + 74: {lang: 0x25d, script: 0xd5, flags: 0x0}, + 75: {lang: 0x181, script: 0x5, flags: 0x0}, + 76: {lang: 0x191, script: 0x6b, flags: 0x0}, + 77: {lang: 0x25c, script: 0x1e, flags: 0x0}, + 78: {lang: 0xa4, script: 0x5, flags: 0x0}, + 79: {lang: 0x15, script: 0x5, flags: 0x0}, + 80: {lang: 0xaf, script: 0x52, flags: 0x0}, + 81: {lang: 0x26f, script: 0x52, flags: 0x0}, + 82: {lang: 0x24, script: 0x5, flags: 0x0}, + 83: {lang: 0x118, script: 0x1e, flags: 0x0}, + 84: {lang: 0x3b, script: 0x2d, flags: 0x0}, + 85: {lang: 0x2c4, script: 0x35, flags: 0x0}, + 86: {lang: 0x271, script: 0x52, flags: 0x0}, + 87: {lang: 0x286, script: 0x1e, flags: 0x0}, + 88: {lang: 0x2b9, script: 0x37, flags: 0x0}, + 89: {lang: 0x1e7, script: 0x52, flags: 0x0}, + 90: {lang: 0x23a, script: 0x52, flags: 0x0}, + 91: {lang: 0x23b, script: 0x1e, flags: 0x0}, + 92: {lang: 0xaf, script: 0x52, flags: 0x0}, + 93: {lang: 0x249, script: 0x5, flags: 0x0}, +} + +type likelyTag struct { + lang uint16 + region uint16 + script uint8 +} + +// Size: 192 bytes, 32 elements +var likelyRegionGroup = [32]likelyTag{ + 1: {lang: 0x9b, region: 0xd4, script: 0x52}, + 2: {lang: 0x9b, region: 0x132, script: 0x52}, + 3: {lang: 0x1ef, region: 0x40, script: 0x52}, + 4: {lang: 0x9b, region: 0x2e, script: 0x52}, + 5: {lang: 0x9b, region: 0xd4, script: 0x52}, + 6: {lang: 0x9d, region: 0xcd, script: 0x52}, + 7: {lang: 0x248, region: 0x12d, script: 0x52}, + 8: {lang: 0x15, region: 0x6a, script: 0x5}, + 9: {lang: 0x248, region: 0x4a, script: 0x52}, + 10: {lang: 0x9b, region: 0x15e, script: 0x52}, + 11: {lang: 0x9b, region: 0x132, script: 0x52}, + 12: {lang: 0x9b, region: 0x132, script: 0x52}, + 13: {lang: 0x9d, region: 0x58, script: 0x52}, + 14: {lang: 0x2c4, region: 0x52, script: 0x34}, + 15: {lang: 0xe1, region: 0x97, script: 0x20}, + 16: {lang: 0xf8, region: 0x93, script: 0x52}, + 17: {lang: 0x103, region: 0x9c, script: 0x52}, + 18: {lang: 0x9b, region: 0x2e, script: 0x52}, + 19: {lang: 0x9b, region: 0xe4, script: 0x52}, + 20: {lang: 0x9b, region: 0x88, script: 0x52}, + 21: {lang: 0x22f, region: 0x13f, script: 0x52}, + 22: {lang: 0x2c4, region: 0x52, script: 0x34}, + 23: {lang: 0x28d, region: 0x134, script: 0x52}, + 24: {lang: 0x15, region: 0x106, script: 0x5}, + 25: {lang: 0x207, region: 0x104, script: 0x1e}, + 26: {lang: 0x207, region: 0x104, script: 0x1e}, + 27: {lang: 0x9b, region: 0x79, script: 0x52}, + 28: {lang: 0x85, region: 0x5f, script: 0x52}, + 29: {lang: 0x9d, region: 0x1e, script: 0x52}, + 30: {lang: 0x9b, region: 0x98, script: 0x52}, + 31: {lang: 0x9b, region: 0x79, script: 0x52}, +} + +type mutualIntelligibility struct { + want uint16 + have uint16 + conf uint8 + oneway bool +} + +type scriptIntelligibility struct { + lang uint16 + want uint8 + have uint8 + conf uint8 +} + +// matchLang holds pairs of langIDs of base languages that are typically +// mutually intelligible. Each pair is associated with a confidence and +// whether the intelligibility goes one or both ways. +// Size: 708 bytes, 118 elements +var matchLang = [118]mutualIntelligibility{ + 0: {want: 0x1c1, have: 0x1af, conf: 0x2, oneway: false}, + 1: {want: 0x145, have: 0x6f, conf: 0x2, oneway: false}, + 2: {want: 0xee, have: 0x54, conf: 0x2, oneway: false}, + 3: {want: 0x225, have: 0x54, conf: 0x2, oneway: false}, + 4: {want: 0x23b, have: 0x54, conf: 0x2, oneway: false}, + 5: {want: 0x225, have: 0xee, conf: 0x2, oneway: false}, + 6: {want: 0x23b, have: 0xee, conf: 0x2, oneway: false}, + 7: {want: 0x225, have: 0x23b, conf: 0x2, oneway: false}, + 8: {want: 0x241, have: 0x1, conf: 0x2, oneway: false}, + 9: {want: 0xd2, have: 0x85, conf: 0x2, oneway: true}, + 10: {want: 0x154, have: 0x85, conf: 0x2, oneway: true}, + 11: {want: 0x80, have: 0x1c1, conf: 0x2, oneway: false}, + 12: {want: 0x80, have: 0x1af, conf: 0x2, oneway: false}, + 13: {want: 0x6f, have: 0x145, conf: 0x2, oneway: false}, + 14: {want: 0x2, have: 0x207, conf: 0x2, oneway: true}, + 15: {want: 0x5, have: 0x9b, conf: 0x2, oneway: true}, + 16: {want: 0xa, have: 0x1bd, conf: 0x2, oneway: true}, + 17: {want: 0xd, have: 0x9b, conf: 0x2, oneway: true}, + 18: {want: 0x23, have: 0x9d, conf: 0x2, oneway: true}, + 19: {want: 0x24, have: 0x207, conf: 0x2, oneway: true}, + 20: {want: 0x2f, have: 0x207, conf: 0x2, oneway: true}, + 21: {want: 0x31, have: 0x9b, conf: 0x2, oneway: true}, + 22: {want: 0x3c, have: 0xe1, conf: 0x2, oneway: true}, + 23: {want: 0x4b, have: 0x9b, conf: 0x2, oneway: true}, + 24: {want: 0x50, have: 0xaf, conf: 0x2, oneway: true}, + 25: {want: 0x65, have: 0xaa, conf: 0x2, oneway: true}, + 26: {want: 0x6c, have: 0x9b, conf: 0x2, oneway: true}, + 27: {want: 0x6f, have: 0x15, conf: 0x2, oneway: true}, + 28: {want: 0x70, have: 0xaf, conf: 0x2, oneway: true}, + 29: {want: 0x78, have: 0xaf, conf: 0x2, oneway: true}, + 30: {want: 0x7f, have: 0x9b, conf: 0x2, oneway: true}, + 31: {want: 0x95, have: 0x9b, conf: 0x2, oneway: true}, + 32: {want: 0x9c, have: 0x9b, conf: 0x2, oneway: true}, + 33: {want: 0x9f, have: 0xa8, conf: 0x2, oneway: true}, + 34: {want: 0xa1, have: 0x9d, conf: 0x2, oneway: true}, + 35: {want: 0xad, have: 0x80, conf: 0x2, oneway: true}, + 36: {want: 0xb9, have: 0x1bd, conf: 0x2, oneway: true}, + 37: {want: 0xba, have: 0x9b, conf: 0x2, oneway: true}, + 38: {want: 0xbb, have: 0x9b, conf: 0x2, oneway: true}, + 39: {want: 0xc2, have: 0x9b, conf: 0x2, oneway: true}, + 40: {want: 0xc8, have: 0x9d, conf: 0x2, oneway: true}, + 41: {want: 0xca, have: 0x9d, conf: 0x2, oneway: true}, + 42: {want: 0xd3, have: 0xe1, conf: 0x2, oneway: true}, + 43: {want: 0xdc, have: 0x9b, conf: 0x2, oneway: true}, + 44: {want: 0xde, have: 0x9b, conf: 0x2, oneway: true}, + 45: {want: 0xf1, have: 0xaf, conf: 0x2, oneway: true}, + 46: {want: 0xf3, have: 0x207, conf: 0x2, oneway: true}, + 47: {want: 0xf5, have: 0x9b, conf: 0x2, oneway: true}, + 48: {want: 0xfa, have: 0x9b, conf: 0x2, oneway: true}, + 49: {want: 0x102, have: 0x9b, conf: 0x2, oneway: true}, + 50: {want: 0x10f, have: 0xf8, conf: 0x2, oneway: true}, + 51: {want: 0x111, have: 0x9b, conf: 0x2, oneway: true}, + 52: {want: 0x122, have: 0xaf, conf: 0x2, oneway: true}, + 53: {want: 0x12f, have: 0x207, conf: 0x2, oneway: true}, + 54: {want: 0x133, have: 0x9b, conf: 0x2, oneway: true}, + 55: {want: 0x135, have: 0x9b, conf: 0x2, oneway: true}, + 56: {want: 0x13d, have: 0x9b, conf: 0x2, oneway: true}, + 57: {want: 0x145, have: 0x26f, conf: 0x2, oneway: true}, + 58: {want: 0x14d, have: 0x207, conf: 0x2, oneway: true}, + 59: {want: 0x14e, have: 0x103, conf: 0x2, oneway: true}, + 60: {want: 0x15a, have: 0x9b, conf: 0x2, oneway: true}, + 61: {want: 0x164, have: 0xaf, conf: 0x2, oneway: true}, + 62: {want: 0x165, have: 0x9b, conf: 0x2, oneway: true}, + 63: {want: 0x167, have: 0x9b, conf: 0x2, oneway: true}, + 64: {want: 0x16c, have: 0xaf, conf: 0x2, oneway: true}, + 65: {want: 0x182, have: 0x9b, conf: 0x2, oneway: true}, + 66: {want: 0x183, have: 0xaf, conf: 0x2, oneway: true}, + 67: {want: 0x189, have: 0x9b, conf: 0x2, oneway: true}, + 68: {want: 0x18c, have: 0x38, conf: 0x2, oneway: true}, + 69: {want: 0x18d, have: 0x9b, conf: 0x2, oneway: true}, + 70: {want: 0x18f, have: 0x207, conf: 0x2, oneway: true}, + 71: {want: 0x196, have: 0xe1, conf: 0x2, oneway: true}, + 72: {want: 0x19a, have: 0xf8, conf: 0x2, oneway: true}, + 73: {want: 0x19b, have: 0x9b, conf: 0x2, oneway: true}, + 74: {want: 0x1a5, have: 0x9b, conf: 0x2, oneway: true}, + 75: {want: 0x1b4, have: 0x9b, conf: 0x2, oneway: true}, + 76: {want: 0x1bf, have: 0x1af, conf: 0x2, oneway: false}, + 77: {want: 0x1bf, have: 0x1c1, conf: 0x2, oneway: true}, + 78: {want: 0x1c8, have: 0x9b, conf: 0x2, oneway: true}, + 79: {want: 0x1cc, have: 0x9b, conf: 0x2, oneway: true}, + 80: {want: 0x1ce, have: 0x9b, conf: 0x2, oneway: true}, + 81: {want: 0x1d0, have: 0xaf, conf: 0x2, oneway: true}, + 82: {want: 0x1d2, have: 0x9b, conf: 0x2, oneway: true}, + 83: {want: 0x1d3, have: 0x9b, conf: 0x2, oneway: true}, + 84: {want: 0x1d7, have: 0x9b, conf: 0x2, oneway: true}, + 85: {want: 0x1de, have: 0x9b, conf: 0x2, oneway: true}, + 86: {want: 0x1ee, have: 0x9b, conf: 0x2, oneway: true}, + 87: {want: 0x1f1, have: 0x9d, conf: 0x2, oneway: true}, + 88: {want: 0x1fc, have: 0x85, conf: 0x2, oneway: true}, + 89: {want: 0x201, have: 0x9b, conf: 0x2, oneway: true}, + 90: {want: 0x20a, have: 0xaf, conf: 0x2, oneway: true}, + 91: {want: 0x20d, have: 0xe1, conf: 0x2, oneway: true}, + 92: {want: 0x21a, have: 0x9b, conf: 0x2, oneway: true}, + 93: {want: 0x228, have: 0x9b, conf: 0x2, oneway: true}, + 94: {want: 0x236, have: 0x9b, conf: 0x2, oneway: true}, + 95: {want: 0x238, have: 0x9b, conf: 0x2, oneway: true}, + 96: {want: 0x23a, have: 0x9b, conf: 0x2, oneway: true}, + 97: {want: 0x242, have: 0x9b, conf: 0x2, oneway: true}, + 98: {want: 0x244, have: 0xf8, conf: 0x2, oneway: true}, + 99: {want: 0x248, have: 0x9b, conf: 0x2, oneway: true}, + 100: {want: 0x251, have: 0x9b, conf: 0x2, oneway: true}, + 101: {want: 0x258, have: 0x9b, conf: 0x2, oneway: true}, + 102: {want: 0x25c, have: 0x207, conf: 0x2, oneway: true}, + 103: {want: 0x261, have: 0x9b, conf: 0x2, oneway: true}, + 104: {want: 0x264, have: 0x207, conf: 0x2, oneway: true}, + 105: {want: 0x361a, have: 0x9b, conf: 0x2, oneway: true}, + 106: {want: 0x26b, have: 0x9b, conf: 0x2, oneway: true}, + 107: {want: 0x26c, have: 0x9b, conf: 0x2, oneway: true}, + 108: {want: 0x277, have: 0x207, conf: 0x2, oneway: true}, + 109: {want: 0x27b, have: 0x9b, conf: 0x2, oneway: true}, + 110: {want: 0x284, have: 0x2c4, conf: 0x2, oneway: true}, + 111: {want: 0x28c, have: 0x9b, conf: 0x2, oneway: true}, + 112: {want: 0x28d, have: 0x207, conf: 0x2, oneway: true}, + 113: {want: 0x2a4, have: 0xaf, conf: 0x2, oneway: true}, + 114: {want: 0x2a9, have: 0x9b, conf: 0x2, oneway: true}, + 115: {want: 0x2b9, have: 0x9b, conf: 0x2, oneway: true}, + 116: {want: 0x2ba, have: 0x9b, conf: 0x2, oneway: true}, + 117: {want: 0x2c6, have: 0x9b, conf: 0x2, oneway: true}, +} + +// matchScript holds pairs of scriptIDs where readers of one script +// can typically also read the other. Each is associated with a confidence. +// Size: 24 bytes, 4 elements +var matchScript = [4]scriptIntelligibility{ + 0: {lang: 0x23b, want: 0x52, have: 0x1e, conf: 0x2}, + 1: {lang: 0x23b, want: 0x1e, have: 0x52, conf: 0x2}, + 2: {lang: 0x0, want: 0x34, have: 0x35, conf: 0x1}, + 3: {lang: 0x0, want: 0x35, have: 0x34, conf: 0x1}, +} + +// Size: 128 bytes, 32 elements +var regionContainment = [32]uint32{ + 0xffffffff, 0x000007a2, 0x00003044, 0x00000008, + 0x403c0010, 0x00000020, 0x00000040, 0x00000080, + 0x00000100, 0x00000200, 0x00000400, 0x2000384c, + 0x00001000, 0x00002000, 0x00004000, 0x00008000, + 0x00010000, 0x00020000, 0x00040000, 0x00080000, + 0x00100000, 0x00200000, 0x01c1c000, 0x00800000, + 0x01000000, 0x1e020000, 0x04000000, 0x08000000, + 0x10000000, 0x20002048, 0x40000000, 0x80000000, +} + +// regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +// where each set holds all groupings that are directly connected in a region +// containment graph. +// Size: 355 bytes, 355 elements +var regionInclusion = [355]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x25, 0x22, 0x23, + 0x25, 0x26, 0x21, 0x27, 0x28, 0x29, 0x2a, 0x25, + 0x2b, 0x23, 0x22, 0x25, 0x24, 0x29, 0x2c, 0x2d, + 0x23, 0x2e, 0x2c, 0x25, 0x2f, 0x30, 0x27, 0x25, + // Entry 40 - 7F + 0x27, 0x25, 0x24, 0x30, 0x21, 0x31, 0x32, 0x33, + 0x2f, 0x21, 0x26, 0x26, 0x26, 0x34, 0x2c, 0x28, + 0x27, 0x26, 0x35, 0x27, 0x21, 0x33, 0x22, 0x20, + 0x25, 0x2c, 0x25, 0x21, 0x36, 0x2d, 0x34, 0x29, + 0x21, 0x2e, 0x37, 0x25, 0x25, 0x20, 0x38, 0x38, + 0x27, 0x37, 0x38, 0x38, 0x2e, 0x39, 0x2e, 0x1f, + 0x37, 0x3a, 0x27, 0x3b, 0x2b, 0x20, 0x29, 0x34, + 0x26, 0x37, 0x25, 0x23, 0x27, 0x2b, 0x2c, 0x22, + // Entry 80 - BF + 0x2f, 0x2c, 0x2c, 0x25, 0x26, 0x39, 0x21, 0x33, + 0x3b, 0x2c, 0x27, 0x35, 0x21, 0x33, 0x39, 0x25, + 0x2d, 0x20, 0x38, 0x30, 0x37, 0x23, 0x2b, 0x24, + 0x21, 0x23, 0x24, 0x2b, 0x39, 0x2b, 0x25, 0x23, + 0x35, 0x20, 0x2e, 0x3c, 0x30, 0x3b, 0x2e, 0x25, + 0x35, 0x35, 0x23, 0x25, 0x3c, 0x30, 0x23, 0x25, + 0x34, 0x24, 0x2c, 0x31, 0x37, 0x29, 0x37, 0x38, + 0x38, 0x34, 0x32, 0x22, 0x25, 0x2e, 0x3b, 0x20, + // Entry C0 - FF + 0x22, 0x2c, 0x30, 0x35, 0x35, 0x3b, 0x25, 0x2c, + 0x25, 0x39, 0x2e, 0x24, 0x2e, 0x33, 0x30, 0x2e, + 0x31, 0x3a, 0x2c, 0x2a, 0x2c, 0x20, 0x33, 0x29, + 0x2b, 0x24, 0x20, 0x3b, 0x23, 0x28, 0x2a, 0x23, + 0x33, 0x20, 0x27, 0x28, 0x3a, 0x30, 0x24, 0x2d, + 0x2f, 0x28, 0x25, 0x23, 0x39, 0x20, 0x3b, 0x27, + 0x20, 0x23, 0x20, 0x20, 0x1e, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + // Entry 100 - 13F + 0x2e, 0x20, 0x2d, 0x22, 0x32, 0x2e, 0x23, 0x3a, + 0x2e, 0x38, 0x37, 0x30, 0x2c, 0x39, 0x2b, 0x2d, + 0x2c, 0x22, 0x2c, 0x2e, 0x27, 0x2e, 0x26, 0x32, + 0x33, 0x25, 0x23, 0x31, 0x21, 0x25, 0x26, 0x21, + 0x2c, 0x30, 0x3c, 0x28, 0x30, 0x3c, 0x38, 0x28, + 0x30, 0x23, 0x25, 0x28, 0x35, 0x2e, 0x32, 0x2e, + 0x20, 0x21, 0x2f, 0x27, 0x3c, 0x22, 0x25, 0x20, + 0x27, 0x25, 0x25, 0x30, 0x3a, 0x28, 0x20, 0x28, + // Entry 140 - 17F + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x23, 0x23, 0x2e, 0x22, 0x31, 0x2e, + 0x26, 0x2e, 0x20, +} + +// regionInclusionBits is an array of bit vectors where every vector represents +// a set of region groupings. These sets are used to compute the distance +// between two regions for the purpose of language matching. +// Size: 288 bytes, 72 elements +var regionInclusionBits = [72]uint32{ + // Entry 0 - 1F + 0x82400813, 0x000007a3, 0x00003844, 0x20000808, + 0x403c0011, 0x00000022, 0x20000844, 0x00000082, + 0x00000102, 0x00000202, 0x00000402, 0x2000384d, + 0x00001804, 0x20002804, 0x00404000, 0x00408000, + 0x00410000, 0x02020000, 0x00040010, 0x00080010, + 0x00100010, 0x00200010, 0x01c1c001, 0x00c00000, + 0x01400000, 0x1e020001, 0x06000000, 0x0a000000, + 0x12000000, 0x20002848, 0x40000010, 0x80000001, + // Entry 20 - 3F + 0x00000001, 0x40000000, 0x00020000, 0x01000000, + 0x00008000, 0x00002000, 0x00000200, 0x00000008, + 0x00200000, 0x90000000, 0x00040000, 0x08000000, + 0x00000020, 0x84000000, 0x00000080, 0x00001000, + 0x00010000, 0x00000400, 0x04000000, 0x00000040, + 0x10000000, 0x00004000, 0x81000000, 0x88000000, + 0x00000100, 0x80020000, 0x00080000, 0x00100000, + 0x00800000, 0xffffffff, 0x82400fb3, 0xc27c0813, + // Entry 40 - 5F + 0xa240385f, 0x83c1c813, 0x9e420813, 0x92000001, + 0x86000001, 0x81400001, 0x8a000001, 0x82020001, +} + +// regionInclusionNext marks, for each entry in regionInclusionBits, the set of +// all groups that are reachable from the groups set in the respective entry. +// Size: 72 bytes, 72 elements +var regionInclusionNext = [72]uint8{ + // Entry 0 - 3F + 0x3d, 0x3e, 0x0b, 0x0b, 0x3f, 0x01, 0x0b, 0x01, + 0x01, 0x01, 0x01, 0x40, 0x0b, 0x0b, 0x16, 0x16, + 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x41, 0x16, + 0x16, 0x42, 0x19, 0x19, 0x19, 0x0b, 0x04, 0x00, + 0x00, 0x1e, 0x11, 0x18, 0x0f, 0x0d, 0x09, 0x03, + 0x15, 0x43, 0x12, 0x1b, 0x05, 0x44, 0x07, 0x0c, + 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x45, 0x46, + 0x08, 0x47, 0x13, 0x14, 0x17, 0x3d, 0x3d, 0x3d, + // Entry 40 - 7F + 0x3d, 0x3d, 0x3d, 0x42, 0x42, 0x41, 0x42, 0x42, +} + +type parentRel struct { + lang uint16 + script uint8 + maxScript uint8 + toRegion uint16 + fromRegion []uint16 +} + +// Size: 412 bytes, 5 elements +var parents = [5]parentRel{ + 0: {lang: 0x9b, script: 0x0, maxScript: 0x52, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x24, 0x25, 0x2e, 0x33, 0x35, 0x3c, 0x41, 0x45, 0x47, 0x48, 0x49, 0x4f, 0x51, 0x5b, 0x5c, 0x60, 0x63, 0x6c, 0x71, 0x72, 0x73, 0x79, 0x7a, 0x7d, 0x7e, 0x7f, 0x81, 0x8a, 0x8b, 0x94, 0x95, 0x96, 0x97, 0x98, 0x9d, 0x9e, 0xa2, 0xa5, 0xa7, 0xab, 0xaf, 0xb2, 0xb3, 0xbd, 0xc4, 0xc8, 0xc9, 0xca, 0xcc, 0xce, 0xd0, 0xd3, 0xd4, 0xdb, 0xdd, 0xde, 0xe4, 0xe5, 0xe6, 0xe9, 0xee, 0x105, 0x107, 0x108, 0x109, 0x10b, 0x10c, 0x110, 0x115, 0x119, 0x11b, 0x11d, 0x123, 0x127, 0x12a, 0x12b, 0x12d, 0x12f, 0x136, 0x139, 0x13c, 0x13f, 0x15e, 0x15f, 0x161}}, + 1: {lang: 0x9b, script: 0x0, maxScript: 0x52, toRegion: 0x1a, fromRegion: []uint16{0x2d, 0x4d, 0x5f, 0x62, 0x70, 0xd7, 0x10a, 0x10d}}, + 2: {lang: 0x9d, script: 0x0, maxScript: 0x52, toRegion: 0x1e, fromRegion: []uint16{0x2b, 0x3e, 0x40, 0x50, 0x53, 0x55, 0x58, 0x64, 0x68, 0x87, 0x8d, 0xcd, 0xd6, 0xe0, 0xe2, 0xea, 0xef, 0x118, 0x132, 0x133, 0x138}}, + 3: {lang: 0x1ef, script: 0x0, maxScript: 0x52, toRegion: 0xec, fromRegion: []uint16{0x29, 0x4d, 0x59, 0x84, 0x89, 0xb5, 0xc4, 0xcf, 0x116, 0x124}}, + 4: {lang: 0x2c4, script: 0x35, maxScript: 0x35, toRegion: 0x8b, fromRegion: []uint16{0xc4}}, +} + +// Total table size 20315 bytes (19KiB); checksum: C16EF251 diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go new file mode 100644 index 000000000..de30155a2 --- /dev/null +++ b/vendor/golang.org/x/text/language/tags.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// TODO: Various sets of commonly use tags and regions. + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func (c CanonType) MustParse(s string) Tag { + t, err := c.Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Base { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag{lang: _af} // af + Amharic Tag = Tag{lang: _am} // am + Arabic Tag = Tag{lang: _ar} // ar + ModernStandardArabic Tag = Tag{lang: _ar, region: _001} // ar-001 + Azerbaijani Tag = Tag{lang: _az} // az + Bulgarian Tag = Tag{lang: _bg} // bg + Bengali Tag = Tag{lang: _bn} // bn + Catalan Tag = Tag{lang: _ca} // ca + Czech Tag = Tag{lang: _cs} // cs + Danish Tag = Tag{lang: _da} // da + German Tag = Tag{lang: _de} // de + Greek Tag = Tag{lang: _el} // el + English Tag = Tag{lang: _en} // en + AmericanEnglish Tag = Tag{lang: _en, region: _US} // en-US + BritishEnglish Tag = Tag{lang: _en, region: _GB} // en-GB + Spanish Tag = Tag{lang: _es} // es + EuropeanSpanish Tag = Tag{lang: _es, region: _ES} // es-ES + LatinAmericanSpanish Tag = Tag{lang: _es, region: _419} // es-419 + Estonian Tag = Tag{lang: _et} // et + Persian Tag = Tag{lang: _fa} // fa + Finnish Tag = Tag{lang: _fi} // fi + Filipino Tag = Tag{lang: _fil} // fil + French Tag = Tag{lang: _fr} // fr + CanadianFrench Tag = Tag{lang: _fr, region: _CA} // fr-CA + Gujarati Tag = Tag{lang: _gu} // gu + Hebrew Tag = Tag{lang: _he} // he + Hindi Tag = Tag{lang: _hi} // hi + Croatian Tag = Tag{lang: _hr} // hr + Hungarian Tag = Tag{lang: _hu} // hu + Armenian Tag = Tag{lang: _hy} // hy + Indonesian Tag = Tag{lang: _id} // id + Icelandic Tag = Tag{lang: _is} // is + Italian Tag = Tag{lang: _it} // it + Japanese Tag = Tag{lang: _ja} // ja + Georgian Tag = Tag{lang: _ka} // ka + Kazakh Tag = Tag{lang: _kk} // kk + Khmer Tag = Tag{lang: _km} // km + Kannada Tag = Tag{lang: _kn} // kn + Korean Tag = Tag{lang: _ko} // ko + Kirghiz Tag = Tag{lang: _ky} // ky + Lao Tag = Tag{lang: _lo} // lo + Lithuanian Tag = Tag{lang: _lt} // lt + Latvian Tag = Tag{lang: _lv} // lv + Macedonian Tag = Tag{lang: _mk} // mk + Malayalam Tag = Tag{lang: _ml} // ml + Mongolian Tag = Tag{lang: _mn} // mn + Marathi Tag = Tag{lang: _mr} // mr + Malay Tag = Tag{lang: _ms} // ms + Burmese Tag = Tag{lang: _my} // my + Nepali Tag = Tag{lang: _ne} // ne + Dutch Tag = Tag{lang: _nl} // nl + Norwegian Tag = Tag{lang: _no} // no + Punjabi Tag = Tag{lang: _pa} // pa + Polish Tag = Tag{lang: _pl} // pl + Portuguese Tag = Tag{lang: _pt} // pt + BrazilianPortuguese Tag = Tag{lang: _pt, region: _BR} // pt-BR + EuropeanPortuguese Tag = Tag{lang: _pt, region: _PT} // pt-PT + Romanian Tag = Tag{lang: _ro} // ro + Russian Tag = Tag{lang: _ru} // ru + Sinhala Tag = Tag{lang: _si} // si + Slovak Tag = Tag{lang: _sk} // sk + Slovenian Tag = Tag{lang: _sl} // sl + Albanian Tag = Tag{lang: _sq} // sq + Serbian Tag = Tag{lang: _sr} // sr + SerbianLatin Tag = Tag{lang: _sr, script: _Latn} // sr-Latn + Swedish Tag = Tag{lang: _sv} // sv + Swahili Tag = Tag{lang: _sw} // sw + Tamil Tag = Tag{lang: _ta} // ta + Telugu Tag = Tag{lang: _te} // te + Thai Tag = Tag{lang: _th} // th + Turkish Tag = Tag{lang: _tr} // tr + Ukrainian Tag = Tag{lang: _uk} // uk + Urdu Tag = Tag{lang: _ur} // ur + Uzbek Tag = Tag{lang: _uz} // uz + Vietnamese Tag = Tag{lang: _vi} // vi + Chinese Tag = Tag{lang: _zh} // zh + SimplifiedChinese Tag = Tag{lang: _zh, script: _Hans} // zh-Hans + TraditionalChinese Tag = Tag{lang: _zh, script: _Hant} // zh-Hant + Zulu Tag = Tag{lang: _zu} // zu +) diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 000000000..df7aa02db --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + sIn, ok := tIn.(transform.SpanningTransformer) + if !ok { + sIn = dummySpan{tIn} + } + sNotIn, ok := tNotIn.(transform.SpanningTransformer) + if !ok { + sNotIn = dummySpan{tNotIn} + } + + a := &cond{ + tIn: sIn, + tNotIn: sNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type dummySpan struct{ transform.Transformer } + +func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +type cond struct { + tIn, tNotIn transform.SpanningTransformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.SpanningTransformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +// This implementation of Span doesn't help all too much, but it needs to be +// there to satisfy this package's Transformer interface. +// TODO: there are certainly room for improvements, though. For example, if +// t.t == transform.Nop (which will a common occurrence) it will save a bundle +// to special-case that loop. +func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { + p := 0 + for n < len(src) && err == nil { + // Don't process too much at a time as the Spanner that will be + // called on this block may terminate early. + const maxChunk = 4096 + max := len(src) + if v := n + maxChunk; v < max { + max = v + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) + n += n2 + if err2 != nil { + return n, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = n + size + } + return n, err +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 000000000..6a3195cf3 --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,355 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// In creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { + return t.t.Span(b, atEOF) +} + +func (t Transformer) Reset() { t.t.Reset() } + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Span implements transform.Spanner. +func (t remove) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) { + err = transform.ErrEndOfSpan + break + } + n += size + } + return +} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = r + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule.go b/vendor/golang.org/x/text/secure/bidirule/bidirule.go new file mode 100644 index 000000000..277257fd7 --- /dev/null +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule.go @@ -0,0 +1,290 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bidirule implements the Bidi Rule defined by RFC 5893. +// +// This package is under development. The API may change without notice and +// without preserving backward compatibility. +package bidirule + +import ( + "errors" + "unicode/utf8" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/bidi" +) + +// This file contains an implementation of RFC 5893: Right-to-Left Scripts for +// Internationalized Domain Names for Applications (IDNA) +// +// A label is an individual component of a domain name. Labels are usually +// shown separated by dots; for example, the domain name "www.example.com" is +// composed of three labels: "www", "example", and "com". +// +// An RTL label is a label that contains at least one character of class R, AL, +// or AN. An LTR label is any label that is not an RTL label. +// +// A "Bidi domain name" is a domain name that contains at least one RTL label. +// +// The following guarantees can be made based on the above: +// +// o In a domain name consisting of only labels that satisfy the rule, +// the requirements of Section 3 are satisfied. Note that even LTR +// labels and pure ASCII labels have to be tested. +// +// o In a domain name consisting of only LDH labels (as defined in the +// Definitions document [RFC5890]) and labels that satisfy the rule, +// the requirements of Section 3 are satisfied as long as a label +// that starts with an ASCII digit does not come after a +// right-to-left label. +// +// No guarantee is given for other combinations. + +// ErrInvalid indicates a label is invalid according to the Bidi Rule. +var ErrInvalid = errors.New("bidirule: failed Bidi Rule") + +type ruleState uint8 + +const ( + ruleInitial ruleState = iota + ruleLTR + ruleLTRFinal + ruleRTL + ruleRTLFinal + ruleInvalid +) + +type ruleTransition struct { + next ruleState + mask uint16 +} + +var transitions = [...][2]ruleTransition{ + // [2.1] The first character must be a character with Bidi property L, R, or + // AL. If it has the R or AL property, it is an RTL label; if it has the L + // property, it is an LTR label. + ruleInitial: { + {ruleLTRFinal, 1 << bidi.L}, + {ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL}, + }, + ruleRTL: { + // [2.3] In an RTL label, the end of the label must be a character with + // Bidi property R, AL, EN, or AN, followed by zero or more characters + // with Bidi property NSM. + {ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN}, + + // [2.2] In an RTL label, only characters with the Bidi properties R, + // AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed. + // We exclude the entries from [2.3] + {ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM}, + }, + ruleRTLFinal: { + // [2.3] In an RTL label, the end of the label must be a character with + // Bidi property R, AL, EN, or AN, followed by zero or more characters + // with Bidi property NSM. + {ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN | 1<<bidi.NSM}, + + // [2.2] In an RTL label, only characters with the Bidi properties R, + // AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed. + // We exclude the entries from [2.3] and NSM. + {ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN}, + }, + ruleLTR: { + // [2.6] In an LTR label, the end of the label must be a character with + // Bidi property L or EN, followed by zero or more characters with Bidi + // property NSM. + {ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN}, + + // [2.5] In an LTR label, only characters with the Bidi properties L, + // EN, ES, CS, ET, ON, BN, or NSM are allowed. + // We exclude the entries from [2.6]. + {ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM}, + }, + ruleLTRFinal: { + // [2.6] In an LTR label, the end of the label must be a character with + // Bidi property L or EN, followed by zero or more characters with Bidi + // property NSM. + {ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN | 1<<bidi.NSM}, + + // [2.5] In an LTR label, only characters with the Bidi properties L, + // EN, ES, CS, ET, ON, BN, or NSM are allowed. + // We exclude the entries from [2.6]. + {ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN}, + }, + ruleInvalid: { + {ruleInvalid, 0}, + {ruleInvalid, 0}, + }, +} + +// [2.4] In an RTL label, if an EN is present, no AN may be present, and +// vice versa. +const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN) + +// Direction reports the direction of the given label as defined by RFC 5893 or +// an error if b is not a valid label according to the Bidi Rule. +func Direction(b []byte) (bidi.Direction, error) { + t := Transformer{} + if n, ok := t.advance(b); ok && n == len(b) { + switch t.state { + case ruleLTRFinal, ruleInitial: + return bidi.LeftToRight, nil + case ruleRTLFinal: + return bidi.RightToLeft, nil + } + } + return bidi.Neutral, ErrInvalid +} + +// DirectionString reports the direction of the given label as defined by RFC +// 5893 or an error if s is not a valid label according to the Bidi Rule. +func DirectionString(s string) (bidi.Direction, error) { + t := Transformer{} + if n, ok := t.advanceString(s); ok && n == len(s) { + switch t.state { + case ruleLTRFinal, ruleInitial: + return bidi.LeftToRight, nil + case ruleRTLFinal: + return bidi.RightToLeft, nil + } + } + return bidi.Neutral, ErrInvalid +} + +// New returns a Transformer that verifies that input adheres to the Bidi Rule. +func New() *Transformer { + return &Transformer{} +} + +// Transformer implements transform.Transform. +type Transformer struct { + state ruleState + seen uint16 +} + +// Reset implements transform.Transformer. +func (t *Transformer) Reset() { *t = Transformer{} } + +// Transform implements transform.Transformer. This Transformer has state and +// needs to be reset between uses. +func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(dst) < len(src) { + src = src[:len(dst)] + atEOF = false + err = transform.ErrShortDst + } + n, err1 := t.Span(src, atEOF) + copy(dst, src[:n]) + if err == nil || err1 != nil && err1 != transform.ErrShortSrc { + err = err1 + } + return n, n, err +} + +// Span returns the first n bytes of src that conform to the Bidi rule. +func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) { + if t.state == ruleInvalid { + return 0, ErrInvalid + } + n, ok := t.advance(src) + switch { + case !ok: + err = ErrInvalid + case n < len(src): + if !atEOF { + err = transform.ErrShortSrc + break + } + err = ErrInvalid + case t.state != ruleLTRFinal && t.state != ruleRTLFinal && t.state != ruleInitial: + err = ErrInvalid + } + return n, err +} + +// Precomputing the ASCII values decreases running time for the ASCII fast path +// by about 30%. +var asciiTable [128]bidi.Properties + +func init() { + for i := range asciiTable { + p, _ := bidi.LookupRune(rune(i)) + asciiTable[i] = p + } +} + +func (t *Transformer) advance(s []byte) (n int, ok bool) { + var e bidi.Properties + var sz int + for n < len(s) { + if s[n] < utf8.RuneSelf { + e, sz = asciiTable[s[n]], 1 + } else { + e, sz = bidi.Lookup(s[n:]) + if sz <= 1 { + if sz == 1 { + return n, false // invalid UTF-8 + } + return n, true // incomplete UTF-8 encoding + } + } + // TODO: using CompactClass results in noticeable speedup. + // See unicode/bidi/prop.go:Properties.CompactClass. + c := uint16(1 << e.Class()) + t.seen |= c + if t.seen&exclusiveRTL == exclusiveRTL { + t.state = ruleInvalid + return n, false + } + switch tr := transitions[t.state]; { + case tr[0].mask&c != 0: + t.state = tr[0].next + case tr[1].mask&c != 0: + t.state = tr[1].next + default: + t.state = ruleInvalid + return n, false + } + n += sz + } + return n, true +} + +func (t *Transformer) advanceString(s string) (n int, ok bool) { + var e bidi.Properties + var sz int + for n < len(s) { + if s[n] < utf8.RuneSelf { + e, sz = asciiTable[s[n]], 1 + } else { + e, sz = bidi.LookupString(s[n:]) + if sz <= 1 { + if sz == 1 { + return n, false // invalid UTF-8 + } + return n, true // incomplete UTF-8 encoding + } + } + // TODO: using CompactClass results in noticeable speedup. + // See unicode/bidi/prop.go:Properties.CompactClass. + c := uint16(1 << e.Class()) + t.seen |= c + if t.seen&exclusiveRTL == exclusiveRTL { + t.state = ruleInvalid + return n, false + } + switch tr := transitions[t.state]; { + case tr[0].mask&c != 0: + t.state = tr[0].next + case tr[1].mask&c != 0: + t.state = tr[1].next + default: + t.state = ruleInvalid + return n, false + } + n += sz + } + return n, true +} diff --git a/vendor/golang.org/x/text/secure/precis/class.go b/vendor/golang.org/x/text/secure/precis/class.go new file mode 100644 index 000000000..f6b56413b --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/class.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "unicode/utf8" +) + +// TODO: Add contextual character rules from Appendix A of RFC5892. + +// A class is a set of characters that match certain derived properties. The +// PRECIS framework defines two classes: The Freeform class and the Identifier +// class. The freeform class should be used for profiles where expressiveness is +// prioritized over safety such as nicknames or passwords. The identifier class +// should be used for profiles where safety is the first priority such as +// addressable network labels and usernames. +type class struct { + validFrom property +} + +// Contains satisfies the runes.Set interface and returns whether the given rune +// is a member of the class. +func (c class) Contains(r rune) bool { + b := make([]byte, 4) + n := utf8.EncodeRune(b, r) + + trieval, _ := dpTrie.lookup(b[:n]) + return c.validFrom <= property(trieval) +} + +var ( + identifier = &class{validFrom: pValid} + freeform = &class{validFrom: idDisOrFreePVal} +) diff --git a/vendor/golang.org/x/text/secure/precis/context.go b/vendor/golang.org/x/text/secure/precis/context.go new file mode 100644 index 000000000..2dcaf29d7 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/context.go @@ -0,0 +1,139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import "errors" + +// This file contains tables and code related to context rules. + +type catBitmap uint16 + +const ( + // These bits, once set depending on the current value, are never unset. + bJapanese catBitmap = 1 << iota + bArabicIndicDigit + bExtendedArabicIndicDigit + + // These bits are set on each iteration depending on the current value. + bJoinStart + bJoinMid + bJoinEnd + bVirama + bLatinSmallL + bGreek + bHebrew + + // These bits indicated which of the permanent bits need to be set at the + // end of the checks. + bMustHaveJapn + + permanent = bJapanese | bArabicIndicDigit | bExtendedArabicIndicDigit | bMustHaveJapn +) + +const finalShift = 10 + +var errContext = errors.New("precis: contextual rule violated") + +func init() { + // Programmatically set these required bits as, manually setting them seems + // too error prone. + for i, ct := range categoryTransitions { + categoryTransitions[i].keep |= permanent + categoryTransitions[i].accept |= ct.term + } +} + +var categoryTransitions = []struct { + keep catBitmap // mask selecting which bits to keep from the previous state + set catBitmap // mask for which bits to set for this transition + + // These bitmaps are used for rules that require lookahead. + // term&accept == term must be true, which is enforced programmatically. + term catBitmap // bits accepted as termination condition + accept catBitmap // bits that pass, but not sufficient as termination + + // The rule function cannot take a *context as an argument, as it would + // cause the context to escape, adding significant overhead. + rule func(beforeBits catBitmap) (doLookahead bool, err error) +}{ + joiningL: {set: bJoinStart}, + joiningD: {set: bJoinStart | bJoinEnd}, + joiningT: {keep: bJoinStart, set: bJoinMid}, + joiningR: {set: bJoinEnd}, + viramaModifier: {set: bVirama}, + viramaJoinT: {set: bVirama | bJoinMid}, + latinSmallL: {set: bLatinSmallL}, + greek: {set: bGreek}, + greekJoinT: {set: bGreek | bJoinMid}, + hebrew: {set: bHebrew}, + hebrewJoinT: {set: bHebrew | bJoinMid}, + japanese: {set: bJapanese}, + katakanaMiddleDot: {set: bMustHaveJapn}, + + zeroWidthNonJoiner: { + term: bJoinEnd, + accept: bJoinMid, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bVirama != 0 { + return false, nil + } + if before&bJoinStart == 0 { + return false, errContext + } + return true, nil + }, + }, + zeroWidthJoiner: { + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bVirama == 0 { + err = errContext + } + return false, err + }, + }, + middleDot: { + term: bLatinSmallL, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bLatinSmallL == 0 { + return false, errContext + } + return true, nil + }, + }, + greekLowerNumeralSign: { + set: bGreek, + term: bGreek, + rule: func(before catBitmap) (doLookAhead bool, err error) { + return true, nil + }, + }, + hebrewPreceding: { + set: bHebrew, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bHebrew == 0 { + err = errContext + } + return false, err + }, + }, + arabicIndicDigit: { + set: bArabicIndicDigit, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bExtendedArabicIndicDigit != 0 { + err = errContext + } + return false, err + }, + }, + extendedArabicIndicDigit: { + set: bExtendedArabicIndicDigit, + rule: func(before catBitmap) (doLookAhead bool, err error) { + if before&bArabicIndicDigit != 0 { + err = errContext + } + return false, err + }, + }, +} diff --git a/vendor/golang.org/x/text/secure/precis/doc.go b/vendor/golang.org/x/text/secure/precis/doc.go new file mode 100644 index 000000000..aa76205e6 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/doc.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package precis contains types and functions for the preparation, +// enforcement, and comparison of internationalized strings ("PRECIS") as +// defined in RFC 7564. It also contains several pre-defined profiles for +// passwords, nicknames, and usernames as defined in RFC 7613 and RFC 7700. +// +// BE ADVISED: This package is under construction and the API may change in +// backwards incompatible ways and without notice. +package precis + +//go:generate go run gen.go gen_trieval.go diff --git a/vendor/golang.org/x/text/secure/precis/gen.go b/vendor/golang.org/x/text/secure/precis/gen.go new file mode 100644 index 000000000..dba9004a6 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/gen.go @@ -0,0 +1,310 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Unicode table generator. +// Data read from the web. + +// +build ignore + +package main + +import ( + "flag" + "log" + "unicode" + "unicode/utf8" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/norm" + "golang.org/x/text/unicode/rangetable" +) + +var outputFile = flag.String("output", "tables.go", "output file for generated tables; default tables.go") + +var assigned, disallowedRunes *unicode.RangeTable + +var runeCategory = map[rune]category{} + +var overrides = map[category]category{ + viramaModifier: viramaJoinT, + greek: greekJoinT, + hebrew: hebrewJoinT, +} + +func setCategory(r rune, cat category) { + if c, ok := runeCategory[r]; ok { + if override, ok := overrides[c]; cat == joiningT && ok { + cat = override + } else { + log.Fatalf("%U: multiple categories for rune (%v and %v)", r, c, cat) + } + } + runeCategory[r] = cat +} + +func init() { + if numCategories > 1<<propShift { + log.Fatalf("Number of categories is %d; may at most be %d", numCategories, 1<<propShift) + } +} + +func main() { + gen.Init() + + // Load data + runes := []rune{} + // PrecisIgnorableProperties: https://tools.ietf.org/html/rfc7564#section-9.13 + ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { + if p.String(1) == "Default_Ignorable_Code_Point" { + runes = append(runes, p.Rune(0)) + } + }) + ucd.Parse(gen.OpenUCDFile("PropList.txt"), func(p *ucd.Parser) { + switch p.String(1) { + case "Noncharacter_Code_Point": + runes = append(runes, p.Rune(0)) + } + }) + // OldHangulJamo: https://tools.ietf.org/html/rfc5892#section-2.9 + ucd.Parse(gen.OpenUCDFile("HangulSyllableType.txt"), func(p *ucd.Parser) { + switch p.String(1) { + case "L", "V", "T": + runes = append(runes, p.Rune(0)) + } + }) + + disallowedRunes = rangetable.New(runes...) + assigned = rangetable.Assigned(unicode.Version) + + // Load category data. + runeCategory['l'] = latinSmallL + ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { + const cccVirama = 9 + if p.Int(ucd.CanonicalCombiningClass) == cccVirama { + setCategory(p.Rune(0), viramaModifier) + } + }) + ucd.Parse(gen.OpenUCDFile("Scripts.txt"), func(p *ucd.Parser) { + switch p.String(1) { + case "Greek": + setCategory(p.Rune(0), greek) + case "Hebrew": + setCategory(p.Rune(0), hebrew) + case "Hiragana", "Katakana", "Han": + setCategory(p.Rune(0), japanese) + } + }) + + // Set the rule categories associated with exceptions. This overrides any + // previously set categories. The original categories are manually + // reintroduced in the categoryTransitions table. + for r, e := range exceptions { + if e.cat != 0 { + runeCategory[r] = e.cat + } + } + cat := map[string]category{ + "L": joiningL, + "D": joiningD, + "T": joiningT, + + "R": joiningR, + } + ucd.Parse(gen.OpenUCDFile("extracted/DerivedJoiningType.txt"), func(p *ucd.Parser) { + switch v := p.String(1); v { + case "L", "D", "T", "R": + setCategory(p.Rune(0), cat[v]) + } + }) + + writeTables() + gen.Repackage("gen_trieval.go", "trieval.go", "precis") +} + +type exception struct { + prop property + cat category +} + +func init() { + // Programmatically add the Arabic and Indic digits to the exceptions map. + // See comment in the exceptions map below why these are marked disallowed. + for i := rune(0); i <= 9; i++ { + exceptions[0x0660+i] = exception{ + prop: disallowed, + cat: arabicIndicDigit, + } + exceptions[0x06F0+i] = exception{ + prop: disallowed, + cat: extendedArabicIndicDigit, + } + } +} + +// The Exceptions class as defined in RFC 5892 +// https://tools.ietf.org/html/rfc5892#section-2.6 +var exceptions = map[rune]exception{ + 0x00DF: {prop: pValid}, + 0x03C2: {prop: pValid}, + 0x06FD: {prop: pValid}, + 0x06FE: {prop: pValid}, + 0x0F0B: {prop: pValid}, + 0x3007: {prop: pValid}, + + // ContextO|J rules are marked as disallowed, taking a "guilty until proven + // innocent" approach. The main reason for this is that the check for + // whether a context rule should be applied can be moved to the logic for + // handing disallowed runes, taken it off the common path. The exception to + // this rule is for katakanaMiddleDot, as the rule logic is handled without + // using a rule function. + + // ContextJ (Join control) + 0x200C: {prop: disallowed, cat: zeroWidthNonJoiner}, + 0x200D: {prop: disallowed, cat: zeroWidthJoiner}, + + // ContextO + 0x00B7: {prop: disallowed, cat: middleDot}, + 0x0375: {prop: disallowed, cat: greekLowerNumeralSign}, + 0x05F3: {prop: disallowed, cat: hebrewPreceding}, // punctuation Geresh + 0x05F4: {prop: disallowed, cat: hebrewPreceding}, // punctuation Gershayim + 0x30FB: {prop: pValid, cat: katakanaMiddleDot}, + + // These are officially ContextO, but the implementation does not require + // special treatment of these, so we simply mark them as valid. + 0x0660: {prop: pValid}, + 0x0661: {prop: pValid}, + 0x0662: {prop: pValid}, + 0x0663: {prop: pValid}, + 0x0664: {prop: pValid}, + 0x0665: {prop: pValid}, + 0x0666: {prop: pValid}, + 0x0667: {prop: pValid}, + 0x0668: {prop: pValid}, + 0x0669: {prop: pValid}, + 0x06F0: {prop: pValid}, + 0x06F1: {prop: pValid}, + 0x06F2: {prop: pValid}, + 0x06F3: {prop: pValid}, + 0x06F4: {prop: pValid}, + 0x06F5: {prop: pValid}, + 0x06F6: {prop: pValid}, + 0x06F7: {prop: pValid}, + 0x06F8: {prop: pValid}, + 0x06F9: {prop: pValid}, + + 0x0640: {prop: disallowed}, + 0x07FA: {prop: disallowed}, + 0x302E: {prop: disallowed}, + 0x302F: {prop: disallowed}, + 0x3031: {prop: disallowed}, + 0x3032: {prop: disallowed}, + 0x3033: {prop: disallowed}, + 0x3034: {prop: disallowed}, + 0x3035: {prop: disallowed}, + 0x303B: {prop: disallowed}, +} + +// LetterDigits: https://tools.ietf.org/html/rfc5892#section-2.1 +// r in {Ll, Lu, Lo, Nd, Lm, Mn, Mc}. +func isLetterDigits(r rune) bool { + return unicode.In(r, + unicode.Ll, unicode.Lu, unicode.Lm, unicode.Lo, // Letters + unicode.Mn, unicode.Mc, // Modifiers + unicode.Nd, // Digits + ) +} + +func isIdDisAndFreePVal(r rune) bool { + return unicode.In(r, + // OtherLetterDigits: https://tools.ietf.org/html/rfc7564#section-9.18 + // r in in {Lt, Nl, No, Me} + unicode.Lt, unicode.Nl, unicode.No, // Other letters / numbers + unicode.Me, // Modifiers + + // Spaces: https://tools.ietf.org/html/rfc7564#section-9.14 + // r in in {Zs} + unicode.Zs, + + // Symbols: https://tools.ietf.org/html/rfc7564#section-9.15 + // r in {Sm, Sc, Sk, So} + unicode.Sm, unicode.Sc, unicode.Sk, unicode.So, + + // Punctuation: https://tools.ietf.org/html/rfc7564#section-9.16 + // r in {Pc, Pd, Ps, Pe, Pi, Pf, Po} + unicode.Pc, unicode.Pd, unicode.Ps, unicode.Pe, + unicode.Pi, unicode.Pf, unicode.Po, + ) +} + +// HasCompat: https://tools.ietf.org/html/rfc7564#section-9.17 +func hasCompat(r rune) bool { + return !norm.NFKC.IsNormalString(string(r)) +} + +// From https://tools.ietf.org/html/rfc5892: +// +// If .cp. .in. Exceptions Then Exceptions(cp); +// Else If .cp. .in. BackwardCompatible Then BackwardCompatible(cp); +// Else If .cp. .in. Unassigned Then UNASSIGNED; +// Else If .cp. .in. ASCII7 Then PVALID; +// Else If .cp. .in. JoinControl Then CONTEXTJ; +// Else If .cp. .in. OldHangulJamo Then DISALLOWED; +// Else If .cp. .in. PrecisIgnorableProperties Then DISALLOWED; +// Else If .cp. .in. Controls Then DISALLOWED; +// Else If .cp. .in. HasCompat Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. LetterDigits Then PVALID; +// Else If .cp. .in. OtherLetterDigits Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. Spaces Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. Symbols Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. Punctuation Then ID_DIS or FREE_PVAL; +// Else DISALLOWED; + +func writeTables() { + propTrie := triegen.NewTrie("derivedProperties") + w := gen.NewCodeWriter() + defer w.WriteGoFile(*outputFile, "precis") + gen.WriteUnicodeVersion(w) + + // Iterate over all the runes... + for i := rune(0); i < unicode.MaxRune; i++ { + r := rune(i) + + if !utf8.ValidRune(r) { + continue + } + + e, ok := exceptions[i] + p := e.prop + switch { + case ok: + case !unicode.In(r, assigned): + p = unassigned + case r >= 0x0021 && r <= 0x007e: // Is ASCII 7 + p = pValid + case unicode.In(r, disallowedRunes, unicode.Cc): + p = disallowed + case hasCompat(r): + p = idDisOrFreePVal + case isLetterDigits(r): + p = pValid + case isIdDisAndFreePVal(r): + p = idDisOrFreePVal + default: + p = disallowed + } + cat := runeCategory[r] + // Don't set category for runes that are disallowed. + if p == disallowed { + cat = exceptions[r].cat + } + propTrie.Insert(r, uint64(p)|uint64(cat)) + } + sz, err := propTrie.Gen(w) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} diff --git a/vendor/golang.org/x/text/secure/precis/gen_trieval.go b/vendor/golang.org/x/text/secure/precis/gen_trieval.go new file mode 100644 index 000000000..308510c9a --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/gen_trieval.go @@ -0,0 +1,68 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// entry is the entry of a trie table +// 7..6 property (unassigned, disallowed, maybe, valid) +// 5..0 category +type entry uint8 + +const ( + propShift = 6 + propMask = 0xc0 + catMask = 0x3f +) + +func (e entry) property() property { return property(e & propMask) } +func (e entry) category() category { return category(e & catMask) } + +type property uint8 + +// The order of these constants matter. A Profile may consider runes to be +// allowed either from pValid or idDisOrFreePVal. +const ( + unassigned property = iota << propShift + disallowed + idDisOrFreePVal // disallowed for Identifier, pValid for FreeForm + pValid +) + +// compute permutations of all properties and specialCategories. +type category uint8 + +const ( + other category = iota + + // Special rune types + joiningL + joiningD + joiningT + joiningR + viramaModifier + viramaJoinT // Virama + JoiningT + latinSmallL // U+006c + greek + greekJoinT // Greek + JoiningT + hebrew + hebrewJoinT // Hebrew + JoiningT + japanese // hirigana, katakana, han + + // Special rune types associated with contextual rules defined in + // https://tools.ietf.org/html/rfc5892#appendix-A. + // ContextO + zeroWidthNonJoiner // rule 1 + zeroWidthJoiner // rule 2 + // ContextJ + middleDot // rule 3 + greekLowerNumeralSign // rule 4 + hebrewPreceding // rule 5 and 6 + katakanaMiddleDot // rule 7 + arabicIndicDigit // rule 8 + extendedArabicIndicDigit // rule 9 + + numCategories +) diff --git a/vendor/golang.org/x/text/secure/precis/nickname.go b/vendor/golang.org/x/text/secure/precis/nickname.go new file mode 100644 index 000000000..cd54b9e69 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/nickname.go @@ -0,0 +1,70 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type nickAdditionalMapping struct { + // TODO: This transformer needs to be stateless somehow… + notStart bool + prevSpace bool +} + +func (t *nickAdditionalMapping) Reset() { + t.prevSpace = false + t.notStart = false +} + +func (t *nickAdditionalMapping) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // RFC 7700 §2.1. Rules + // + // 2. Additional Mapping Rule: The additional mapping rule consists of + // the following sub-rules. + // + // 1. Any instances of non-ASCII space MUST be mapped to ASCII + // space (U+0020); a non-ASCII space is any Unicode code point + // having a general category of "Zs", naturally with the + // exception of U+0020. + // + // 2. Any instances of the ASCII space character at the beginning + // or end of a nickname MUST be removed (e.g., "stpeter " is + // mapped to "stpeter"). + // + // 3. Interior sequences of more than one ASCII space character + // MUST be mapped to a single ASCII space character (e.g., + // "St Peter" is mapped to "St Peter"). + + for nSrc < len(src) { + r, size := utf8.DecodeRune(src[nSrc:]) + if size == 0 { // Incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 + } + if unicode.Is(unicode.Zs, r) { + t.prevSpace = true + } else { + if t.prevSpace && t.notStart { + dst[nDst] = ' ' + nDst += 1 + } + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + nDst += size + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + t.prevSpace = false + t.notStart = true + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/secure/precis/options.go b/vendor/golang.org/x/text/secure/precis/options.go new file mode 100644 index 000000000..ec63783ef --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/options.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "golang.org/x/text/cases" + "golang.org/x/text/runes" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" + "golang.org/x/text/width" +) + +// An Option is used to define the behavior and rules of a Profile. +type Option func(*options) + +type options struct { + // Preparation options + foldWidth bool + + // Enforcement options + cases transform.Transformer + disallow runes.Set + norm norm.Form + additional []func() transform.Transformer + width *width.Transformer + disallowEmpty bool + bidiRule bool + + // Comparison options + ignorecase bool +} + +func getOpts(o ...Option) (res options) { + for _, f := range o { + f(&res) + } + return +} + +var ( + // The IgnoreCase option causes the profile to perform a case insensitive + // comparison during the PRECIS comparison step. + IgnoreCase Option = ignoreCase + + // The FoldWidth option causes the profile to map non-canonical wide and + // narrow variants to their decomposition mapping. This is useful for + // profiles that are based on the identifier class which would otherwise + // disallow such characters. + FoldWidth Option = foldWidth + + // The DisallowEmpty option causes the enforcement step to return an error if + // the resulting string would be empty. + DisallowEmpty Option = disallowEmpty + + // The BidiRule option causes the Bidi Rule defined in RFC 5893 to be + // applied. + BidiRule Option = bidiRule +) + +var ( + ignoreCase = func(o *options) { + o.ignorecase = true + } + foldWidth = func(o *options) { + o.foldWidth = true + } + disallowEmpty = func(o *options) { + o.disallowEmpty = true + } + bidiRule = func(o *options) { + o.bidiRule = true + } +) + +// The AdditionalMapping option defines the additional mapping rule for the +// Profile by applying Transformer's in sequence. +func AdditionalMapping(t ...func() transform.Transformer) Option { + return func(o *options) { + o.additional = t + } +} + +// The Norm option defines a Profile's normalization rule. Defaults to NFC. +func Norm(f norm.Form) Option { + return func(o *options) { + o.norm = f + } +} + +// The FoldCase option defines a Profile's case mapping rule. Options can be +// provided to determine the type of case folding used. +func FoldCase(opts ...cases.Option) Option { + return func(o *options) { + o.cases = cases.Fold(opts...) + } +} + +// The Disallow option further restricts a Profile's allowed characters beyond +// what is disallowed by the underlying string class. +func Disallow(set runes.Set) Option { + return func(o *options) { + o.disallow = set + } +} diff --git a/vendor/golang.org/x/text/secure/precis/profile.go b/vendor/golang.org/x/text/secure/precis/profile.go new file mode 100644 index 000000000..fd5c422fb --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/profile.go @@ -0,0 +1,330 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "errors" + "unicode/utf8" + + "golang.org/x/text/runes" + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/transform" + "golang.org/x/text/width" +) + +var ( + errDisallowedRune = errors.New("precis: disallowed rune encountered") +) + +var dpTrie = newDerivedPropertiesTrie(0) + +// A Profile represents a set of rules for normalizing and validating strings in +// the PRECIS framework. +type Profile struct { + options + class *class +} + +// NewIdentifier creates a new PRECIS profile based on the Identifier string +// class. Profiles created from this class are suitable for use where safety is +// prioritized over expressiveness like network identifiers, user accounts, chat +// rooms, and file names. +func NewIdentifier(opts ...Option) *Profile { + return &Profile{ + options: getOpts(opts...), + class: identifier, + } +} + +// NewFreeform creates a new PRECIS profile based on the Freeform string class. +// Profiles created from this class are suitable for use where expressiveness is +// prioritized over safety like passwords, and display-elements such as +// nicknames in a chat room. +func NewFreeform(opts ...Option) *Profile { + return &Profile{ + options: getOpts(opts...), + class: freeform, + } +} + +// NewTransformer creates a new transform.Transformer that performs the PRECIS +// preparation and enforcement steps on the given UTF-8 encoded bytes. +func (p *Profile) NewTransformer() *Transformer { + var ts []transform.Transformer + + // These transforms are applied in the order defined in + // https://tools.ietf.org/html/rfc7564#section-7 + + if p.options.foldWidth { + ts = append(ts, width.Fold) + } + + for _, f := range p.options.additional { + ts = append(ts, f()) + } + + if p.options.cases != nil { + ts = append(ts, p.options.cases) + } + + ts = append(ts, p.options.norm) + + if p.options.bidiRule { + ts = append(ts, bidirule.New()) + } + + ts = append(ts, &checker{p: p, allowed: p.Allowed()}) + + // TODO: Add the disallow empty rule with a dummy transformer? + + return &Transformer{transform.Chain(ts...)} +} + +var errEmptyString = errors.New("precis: transformation resulted in empty string") + +type buffers struct { + src []byte + buf [2][]byte + next int +} + +func (b *buffers) init(n int) { + b.buf[0] = make([]byte, 0, n) + b.buf[1] = make([]byte, 0, n) +} + +func (b *buffers) apply(t transform.Transformer) (err error) { + // TODO: use Span, once available. + x := b.next & 1 + b.src, _, err = transform.Append(t, b.buf[x][:0], b.src) + b.buf[x] = b.src + b.next++ + return err +} + +func (b *buffers) enforce(p *Profile, src []byte) (str []byte, err error) { + b.src = src + + // These transforms are applied in the order defined in + // https://tools.ietf.org/html/rfc7564#section-7 + + // TODO: allow different width transforms options. + if p.options.foldWidth { + // TODO: use Span, once available. + if err = b.apply(width.Fold); err != nil { + return nil, err + } + } + for _, f := range p.options.additional { + if err = b.apply(f()); err != nil { + return nil, err + } + } + if p.options.cases != nil { + if err = b.apply(p.options.cases); err != nil { + return nil, err + } + } + if n := p.norm.QuickSpan(b.src); n < len(b.src) { + x := b.next & 1 + n = copy(b.buf[x], b.src[:n]) + b.src, _, err = transform.Append(p.norm, b.buf[x][:n], b.src[n:]) + b.buf[x] = b.src + b.next++ + if err != nil { + return nil, err + } + } + if p.options.bidiRule { + if err := b.apply(bidirule.New()); err != nil { + return nil, err + } + } + c := checker{p: p} + if _, err := c.span(b.src, true); err != nil { + return nil, err + } + if p.disallow != nil { + for i := 0; i < len(b.src); { + r, size := utf8.DecodeRune(b.src[i:]) + if p.disallow.Contains(r) { + return nil, errDisallowedRune + } + i += size + } + } + + // TODO: Add the disallow empty rule with a dummy transformer? + + if p.options.disallowEmpty && len(b.src) == 0 { + return nil, errEmptyString + } + return b.src, nil +} + +// Append appends the result of applying p to src writing the result to dst. +// It returns an error if the input string is invalid. +func (p *Profile) Append(dst, src []byte) ([]byte, error) { + var buf buffers + buf.init(8 + len(src) + len(src)>>2) + b, err := buf.enforce(p, src) + if err != nil { + return nil, err + } + return append(dst, b...), nil +} + +// Bytes returns a new byte slice with the result of applying the profile to b. +func (p *Profile) Bytes(b []byte) ([]byte, error) { + var buf buffers + buf.init(8 + len(b) + len(b)>>2) + b, err := buf.enforce(p, b) + if err != nil { + return nil, err + } + if buf.next == 0 { + c := make([]byte, len(b)) + copy(c, b) + return c, nil + } + return b, nil +} + +// String returns a string with the result of applying the profile to s. +func (p *Profile) String(s string) (string, error) { + var buf buffers + buf.init(8 + len(s) + len(s)>>2) + b, err := buf.enforce(p, []byte(s)) + if err != nil { + return "", err + } + return string(b), nil +} + +// Compare enforces both strings, and then compares them for bit-string identity +// (byte-for-byte equality). If either string cannot be enforced, the comparison +// is false. +func (p *Profile) Compare(a, b string) bool { + a, err := p.String(a) + if err != nil { + return false + } + b, err = p.String(b) + if err != nil { + return false + } + + // TODO: This is out of order. Need to extract the transformation logic and + // put this in where the normal case folding would go (but only for + // comparison). + if p.options.ignorecase { + a = width.Fold.String(a) + b = width.Fold.String(a) + } + + return a == b +} + +// Allowed returns a runes.Set containing every rune that is a member of the +// underlying profile's string class and not disallowed by any profile specific +// rules. +func (p *Profile) Allowed() runes.Set { + if p.options.disallow != nil { + return runes.Predicate(func(r rune) bool { + return p.class.Contains(r) && !p.options.disallow.Contains(r) + }) + } + return p.class +} + +type checker struct { + p *Profile + allowed runes.Set + + beforeBits catBitmap + termBits catBitmap + acceptBits catBitmap +} + +func (c *checker) Reset() { + c.beforeBits = 0 + c.termBits = 0 + c.acceptBits = 0 +} + +func (c *checker) span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + e, sz := dpTrie.lookup(src[n:]) + d := categoryTransitions[category(e&catMask)] + if sz == 0 { + if !atEOF { + return n, transform.ErrShortSrc + } + return n, errDisallowedRune + } + if property(e) < c.p.class.validFrom { + if d.rule == nil { + return n, errDisallowedRune + } + doLookAhead, err := d.rule(c.beforeBits) + if err != nil { + return n, err + } + if doLookAhead { + c.beforeBits &= d.keep + c.beforeBits |= d.set + // We may still have a lookahead rule which we will require to + // complete (by checking termBits == 0) before setting the new + // bits. + if c.termBits != 0 && (!c.checkLookahead() || c.termBits == 0) { + return n, err + } + c.termBits = d.term + c.acceptBits = d.accept + n += sz + continue + } + } + c.beforeBits &= d.keep + c.beforeBits |= d.set + if c.termBits != 0 && !c.checkLookahead() { + return n, errContext + } + n += sz + } + if m := c.beforeBits >> finalShift; c.beforeBits&m != m || c.termBits != 0 { + err = errContext + } + return n, err +} + +func (c *checker) checkLookahead() bool { + switch { + case c.beforeBits&c.termBits != 0: + c.termBits = 0 + c.acceptBits = 0 + case c.beforeBits&c.acceptBits != 0: + default: + return false + } + return true +} + +// TODO: we may get rid of this transform if transform.Chain understands +// something like a Spanner interface. +func (c checker) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + short := false + if len(dst) < len(src) { + src = src[:len(dst)] + atEOF = false + short = true + } + nSrc, err = c.span(src, atEOF) + nDst = copy(dst, src[:nSrc]) + if short && (err == transform.ErrShortSrc || err == nil) { + err = transform.ErrShortDst + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/secure/precis/profiles.go b/vendor/golang.org/x/text/secure/precis/profiles.go new file mode 100644 index 000000000..ad50ae857 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/profiles.go @@ -0,0 +1,56 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import ( + "unicode" + + "golang.org/x/text/runes" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +var ( + Nickname *Profile = nickname // Implements the Nickname profile specified in RFC 7700. + UsernameCaseMapped *Profile = usernameCaseMap // Implements the UsernameCaseMapped profile specified in RFC 7613. + UsernameCasePreserved *Profile = usernameNoCaseMap // Implements the UsernameCasePreserved profile specified in RFC 7613. + OpaqueString *Profile = opaquestring // Implements the OpaqueString profile defined in RFC 7613 for passwords and other secure labels. +) + +// TODO: mvl: "Ultimately, I would manually define the structs for the internal +// profiles. This avoid pulling in unneeded tables when they are not used." +var ( + nickname = NewFreeform( + AdditionalMapping(func() transform.Transformer { + return &nickAdditionalMapping{} + }), + IgnoreCase, + Norm(norm.NFKC), + DisallowEmpty, + ) + usernameCaseMap = NewIdentifier( + FoldWidth, + FoldCase(), + Norm(norm.NFC), + BidiRule, + ) + usernameNoCaseMap = NewIdentifier( + FoldWidth, + Norm(norm.NFC), + BidiRule, + ) + opaquestring = NewFreeform( + AdditionalMapping(func() transform.Transformer { + return runes.Map(func(r rune) rune { + if unicode.Is(unicode.Zs, r) { + return ' ' + } + return r + }) + }), + Norm(norm.NFC), + DisallowEmpty, + ) +) diff --git a/vendor/golang.org/x/text/secure/precis/tables.go b/vendor/golang.org/x/text/secure/precis/tables.go new file mode 100644 index 000000000..a9b500deb --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/tables.go @@ -0,0 +1,3788 @@ +// This file was generated by go generate; DO NOT EDIT + +package precis + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *derivedPropertiesTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return derivedPropertiesValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = derivedPropertiesIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *derivedPropertiesTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return derivedPropertiesValues[c0] + } + i := derivedPropertiesIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *derivedPropertiesTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return derivedPropertiesValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := derivedPropertiesIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = derivedPropertiesIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = derivedPropertiesIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *derivedPropertiesTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return derivedPropertiesValues[c0] + } + i := derivedPropertiesIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = derivedPropertiesIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// derivedPropertiesTrie. Total size: 25344 bytes (24.75 KiB). Checksum: c5b977d76d42d8a. +type derivedPropertiesTrie struct{} + +func newDerivedPropertiesTrie(i int) *derivedPropertiesTrie { + return &derivedPropertiesTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *derivedPropertiesTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(derivedPropertiesValues[n<<6+uint32(b)]) + } +} + +// derivedPropertiesValues: 324 blocks, 20736 entries, 20736 bytes +// The third block is the zero block. +var derivedPropertiesValues = [20736]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x0040, 0x01: 0x0040, 0x02: 0x0040, 0x03: 0x0040, 0x04: 0x0040, 0x05: 0x0040, + 0x06: 0x0040, 0x07: 0x0040, 0x08: 0x0040, 0x09: 0x0040, 0x0a: 0x0040, 0x0b: 0x0040, + 0x0c: 0x0040, 0x0d: 0x0040, 0x0e: 0x0040, 0x0f: 0x0040, 0x10: 0x0040, 0x11: 0x0040, + 0x12: 0x0040, 0x13: 0x0040, 0x14: 0x0040, 0x15: 0x0040, 0x16: 0x0040, 0x17: 0x0040, + 0x18: 0x0040, 0x19: 0x0040, 0x1a: 0x0040, 0x1b: 0x0040, 0x1c: 0x0040, 0x1d: 0x0040, + 0x1e: 0x0040, 0x1f: 0x0040, 0x20: 0x0080, 0x21: 0x00c0, 0x22: 0x00c0, 0x23: 0x00c0, + 0x24: 0x00c0, 0x25: 0x00c0, 0x26: 0x00c0, 0x27: 0x00c0, 0x28: 0x00c0, 0x29: 0x00c0, + 0x2a: 0x00c0, 0x2b: 0x00c0, 0x2c: 0x00c0, 0x2d: 0x00c0, 0x2e: 0x00c0, 0x2f: 0x00c0, + 0x30: 0x00c0, 0x31: 0x00c0, 0x32: 0x00c0, 0x33: 0x00c0, 0x34: 0x00c0, 0x35: 0x00c0, + 0x36: 0x00c0, 0x37: 0x00c0, 0x38: 0x00c0, 0x39: 0x00c0, 0x3a: 0x00c0, 0x3b: 0x00c0, + 0x3c: 0x00c0, 0x3d: 0x00c0, 0x3e: 0x00c0, 0x3f: 0x00c0, + // Block 0x1, offset 0x40 + 0x40: 0x00c0, 0x41: 0x00c0, 0x42: 0x00c0, 0x43: 0x00c0, 0x44: 0x00c0, 0x45: 0x00c0, + 0x46: 0x00c0, 0x47: 0x00c0, 0x48: 0x00c0, 0x49: 0x00c0, 0x4a: 0x00c0, 0x4b: 0x00c0, + 0x4c: 0x00c0, 0x4d: 0x00c0, 0x4e: 0x00c0, 0x4f: 0x00c0, 0x50: 0x00c0, 0x51: 0x00c0, + 0x52: 0x00c0, 0x53: 0x00c0, 0x54: 0x00c0, 0x55: 0x00c0, 0x56: 0x00c0, 0x57: 0x00c0, + 0x58: 0x00c0, 0x59: 0x00c0, 0x5a: 0x00c0, 0x5b: 0x00c0, 0x5c: 0x00c0, 0x5d: 0x00c0, + 0x5e: 0x00c0, 0x5f: 0x00c0, 0x60: 0x00c0, 0x61: 0x00c0, 0x62: 0x00c0, 0x63: 0x00c0, + 0x64: 0x00c0, 0x65: 0x00c0, 0x66: 0x00c0, 0x67: 0x00c0, 0x68: 0x00c0, 0x69: 0x00c0, + 0x6a: 0x00c0, 0x6b: 0x00c0, 0x6c: 0x00c7, 0x6d: 0x00c0, 0x6e: 0x00c0, 0x6f: 0x00c0, + 0x70: 0x00c0, 0x71: 0x00c0, 0x72: 0x00c0, 0x73: 0x00c0, 0x74: 0x00c0, 0x75: 0x00c0, + 0x76: 0x00c0, 0x77: 0x00c0, 0x78: 0x00c0, 0x79: 0x00c0, 0x7a: 0x00c0, 0x7b: 0x00c0, + 0x7c: 0x00c0, 0x7d: 0x00c0, 0x7e: 0x00c0, 0x7f: 0x0040, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x0080, 0xe1: 0x0080, 0xe2: 0x0080, 0xe3: 0x0080, + 0xe4: 0x0080, 0xe5: 0x0080, 0xe6: 0x0080, 0xe7: 0x0080, 0xe8: 0x0080, 0xe9: 0x0080, + 0xea: 0x0080, 0xeb: 0x0080, 0xec: 0x0080, 0xed: 0x0040, 0xee: 0x0080, 0xef: 0x0080, + 0xf0: 0x0080, 0xf1: 0x0080, 0xf2: 0x0080, 0xf3: 0x0080, 0xf4: 0x0080, 0xf5: 0x0080, + 0xf6: 0x0080, 0xf7: 0x004f, 0xf8: 0x0080, 0xf9: 0x0080, 0xfa: 0x0080, 0xfb: 0x0080, + 0xfc: 0x0080, 0xfd: 0x0080, 0xfe: 0x0080, 0xff: 0x0080, + // Block 0x4, offset 0x100 + 0x100: 0x00c0, 0x101: 0x00c0, 0x102: 0x00c0, 0x103: 0x00c0, 0x104: 0x00c0, 0x105: 0x00c0, + 0x106: 0x00c0, 0x107: 0x00c0, 0x108: 0x00c0, 0x109: 0x00c0, 0x10a: 0x00c0, 0x10b: 0x00c0, + 0x10c: 0x00c0, 0x10d: 0x00c0, 0x10e: 0x00c0, 0x10f: 0x00c0, 0x110: 0x00c0, 0x111: 0x00c0, + 0x112: 0x00c0, 0x113: 0x00c0, 0x114: 0x00c0, 0x115: 0x00c0, 0x116: 0x00c0, 0x117: 0x0080, + 0x118: 0x00c0, 0x119: 0x00c0, 0x11a: 0x00c0, 0x11b: 0x00c0, 0x11c: 0x00c0, 0x11d: 0x00c0, + 0x11e: 0x00c0, 0x11f: 0x00c0, 0x120: 0x00c0, 0x121: 0x00c0, 0x122: 0x00c0, 0x123: 0x00c0, + 0x124: 0x00c0, 0x125: 0x00c0, 0x126: 0x00c0, 0x127: 0x00c0, 0x128: 0x00c0, 0x129: 0x00c0, + 0x12a: 0x00c0, 0x12b: 0x00c0, 0x12c: 0x00c0, 0x12d: 0x00c0, 0x12e: 0x00c0, 0x12f: 0x00c0, + 0x130: 0x00c0, 0x131: 0x00c0, 0x132: 0x00c0, 0x133: 0x00c0, 0x134: 0x00c0, 0x135: 0x00c0, + 0x136: 0x00c0, 0x137: 0x0080, 0x138: 0x00c0, 0x139: 0x00c0, 0x13a: 0x00c0, 0x13b: 0x00c0, + 0x13c: 0x00c0, 0x13d: 0x00c0, 0x13e: 0x00c0, 0x13f: 0x00c0, + // Block 0x5, offset 0x140 + 0x140: 0x00c0, 0x141: 0x00c0, 0x142: 0x00c0, 0x143: 0x00c0, 0x144: 0x00c0, 0x145: 0x00c0, + 0x146: 0x00c0, 0x147: 0x00c0, 0x148: 0x00c0, 0x149: 0x00c0, 0x14a: 0x00c0, 0x14b: 0x00c0, + 0x14c: 0x00c0, 0x14d: 0x00c0, 0x14e: 0x00c0, 0x14f: 0x00c0, 0x150: 0x00c0, 0x151: 0x00c0, + 0x152: 0x00c0, 0x153: 0x00c0, 0x154: 0x00c0, 0x155: 0x00c0, 0x156: 0x00c0, 0x157: 0x00c0, + 0x158: 0x00c0, 0x159: 0x00c0, 0x15a: 0x00c0, 0x15b: 0x00c0, 0x15c: 0x00c0, 0x15d: 0x00c0, + 0x15e: 0x00c0, 0x15f: 0x00c0, 0x160: 0x00c0, 0x161: 0x00c0, 0x162: 0x00c0, 0x163: 0x00c0, + 0x164: 0x00c0, 0x165: 0x00c0, 0x166: 0x00c0, 0x167: 0x00c0, 0x168: 0x00c0, 0x169: 0x00c0, + 0x16a: 0x00c0, 0x16b: 0x00c0, 0x16c: 0x00c0, 0x16d: 0x00c0, 0x16e: 0x00c0, 0x16f: 0x00c0, + 0x170: 0x00c0, 0x171: 0x00c0, 0x172: 0x0080, 0x173: 0x0080, 0x174: 0x00c0, 0x175: 0x00c0, + 0x176: 0x00c0, 0x177: 0x00c0, 0x178: 0x00c0, 0x179: 0x00c0, 0x17a: 0x00c0, 0x17b: 0x00c0, + 0x17c: 0x00c0, 0x17d: 0x00c0, 0x17e: 0x00c0, 0x17f: 0x0080, + // Block 0x6, offset 0x180 + 0x180: 0x0080, 0x181: 0x00c0, 0x182: 0x00c0, 0x183: 0x00c0, 0x184: 0x00c0, 0x185: 0x00c0, + 0x186: 0x00c0, 0x187: 0x00c0, 0x188: 0x00c0, 0x189: 0x0080, 0x18a: 0x00c0, 0x18b: 0x00c0, + 0x18c: 0x00c0, 0x18d: 0x00c0, 0x18e: 0x00c0, 0x18f: 0x00c0, 0x190: 0x00c0, 0x191: 0x00c0, + 0x192: 0x00c0, 0x193: 0x00c0, 0x194: 0x00c0, 0x195: 0x00c0, 0x196: 0x00c0, 0x197: 0x00c0, + 0x198: 0x00c0, 0x199: 0x00c0, 0x19a: 0x00c0, 0x19b: 0x00c0, 0x19c: 0x00c0, 0x19d: 0x00c0, + 0x19e: 0x00c0, 0x19f: 0x00c0, 0x1a0: 0x00c0, 0x1a1: 0x00c0, 0x1a2: 0x00c0, 0x1a3: 0x00c0, + 0x1a4: 0x00c0, 0x1a5: 0x00c0, 0x1a6: 0x00c0, 0x1a7: 0x00c0, 0x1a8: 0x00c0, 0x1a9: 0x00c0, + 0x1aa: 0x00c0, 0x1ab: 0x00c0, 0x1ac: 0x00c0, 0x1ad: 0x00c0, 0x1ae: 0x00c0, 0x1af: 0x00c0, + 0x1b0: 0x00c0, 0x1b1: 0x00c0, 0x1b2: 0x00c0, 0x1b3: 0x00c0, 0x1b4: 0x00c0, 0x1b5: 0x00c0, + 0x1b6: 0x00c0, 0x1b7: 0x00c0, 0x1b8: 0x00c0, 0x1b9: 0x00c0, 0x1ba: 0x00c0, 0x1bb: 0x00c0, + 0x1bc: 0x00c0, 0x1bd: 0x00c0, 0x1be: 0x00c0, 0x1bf: 0x0080, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x00c0, 0x1c1: 0x00c0, 0x1c2: 0x00c0, 0x1c3: 0x00c0, 0x1c4: 0x00c0, 0x1c5: 0x00c0, + 0x1c6: 0x00c0, 0x1c7: 0x00c0, 0x1c8: 0x00c0, 0x1c9: 0x00c0, 0x1ca: 0x00c0, 0x1cb: 0x00c0, + 0x1cc: 0x00c0, 0x1cd: 0x00c0, 0x1ce: 0x00c0, 0x1cf: 0x00c0, 0x1d0: 0x00c0, 0x1d1: 0x00c0, + 0x1d2: 0x00c0, 0x1d3: 0x00c0, 0x1d4: 0x00c0, 0x1d5: 0x00c0, 0x1d6: 0x00c0, 0x1d7: 0x00c0, + 0x1d8: 0x00c0, 0x1d9: 0x00c0, 0x1da: 0x00c0, 0x1db: 0x00c0, 0x1dc: 0x00c0, 0x1dd: 0x00c0, + 0x1de: 0x00c0, 0x1df: 0x00c0, 0x1e0: 0x00c0, 0x1e1: 0x00c0, 0x1e2: 0x00c0, 0x1e3: 0x00c0, + 0x1e4: 0x00c0, 0x1e5: 0x00c0, 0x1e6: 0x00c0, 0x1e7: 0x00c0, 0x1e8: 0x00c0, 0x1e9: 0x00c0, + 0x1ea: 0x00c0, 0x1eb: 0x00c0, 0x1ec: 0x00c0, 0x1ed: 0x00c0, 0x1ee: 0x00c0, 0x1ef: 0x00c0, + 0x1f0: 0x00c0, 0x1f1: 0x00c0, 0x1f2: 0x00c0, 0x1f3: 0x00c0, 0x1f4: 0x00c0, 0x1f5: 0x00c0, + 0x1f6: 0x00c0, 0x1f7: 0x00c0, 0x1f8: 0x00c0, 0x1f9: 0x00c0, 0x1fa: 0x00c0, 0x1fb: 0x00c0, + 0x1fc: 0x00c0, 0x1fd: 0x00c0, 0x1fe: 0x00c0, 0x1ff: 0x00c0, + // Block 0x8, offset 0x200 + 0x200: 0x00c0, 0x201: 0x00c0, 0x202: 0x00c0, 0x203: 0x00c0, 0x204: 0x0080, 0x205: 0x0080, + 0x206: 0x0080, 0x207: 0x0080, 0x208: 0x0080, 0x209: 0x0080, 0x20a: 0x0080, 0x20b: 0x0080, + 0x20c: 0x0080, 0x20d: 0x00c0, 0x20e: 0x00c0, 0x20f: 0x00c0, 0x210: 0x00c0, 0x211: 0x00c0, + 0x212: 0x00c0, 0x213: 0x00c0, 0x214: 0x00c0, 0x215: 0x00c0, 0x216: 0x00c0, 0x217: 0x00c0, + 0x218: 0x00c0, 0x219: 0x00c0, 0x21a: 0x00c0, 0x21b: 0x00c0, 0x21c: 0x00c0, 0x21d: 0x00c0, + 0x21e: 0x00c0, 0x21f: 0x00c0, 0x220: 0x00c0, 0x221: 0x00c0, 0x222: 0x00c0, 0x223: 0x00c0, + 0x224: 0x00c0, 0x225: 0x00c0, 0x226: 0x00c0, 0x227: 0x00c0, 0x228: 0x00c0, 0x229: 0x00c0, + 0x22a: 0x00c0, 0x22b: 0x00c0, 0x22c: 0x00c0, 0x22d: 0x00c0, 0x22e: 0x00c0, 0x22f: 0x00c0, + 0x230: 0x00c0, 0x231: 0x0080, 0x232: 0x0080, 0x233: 0x0080, 0x234: 0x00c0, 0x235: 0x00c0, + 0x236: 0x00c0, 0x237: 0x00c0, 0x238: 0x00c0, 0x239: 0x00c0, 0x23a: 0x00c0, 0x23b: 0x00c0, + 0x23c: 0x00c0, 0x23d: 0x00c0, 0x23e: 0x00c0, 0x23f: 0x00c0, + // Block 0x9, offset 0x240 + 0x240: 0x00c0, 0x241: 0x00c0, 0x242: 0x00c0, 0x243: 0x00c0, 0x244: 0x00c0, 0x245: 0x00c0, + 0x246: 0x00c0, 0x247: 0x00c0, 0x248: 0x00c0, 0x249: 0x00c0, 0x24a: 0x00c0, 0x24b: 0x00c0, + 0x24c: 0x00c0, 0x24d: 0x00c0, 0x24e: 0x00c0, 0x24f: 0x00c0, 0x250: 0x00c0, 0x251: 0x00c0, + 0x252: 0x00c0, 0x253: 0x00c0, 0x254: 0x00c0, 0x255: 0x00c0, 0x256: 0x00c0, 0x257: 0x00c0, + 0x258: 0x00c0, 0x259: 0x00c0, 0x25a: 0x00c0, 0x25b: 0x00c0, 0x25c: 0x00c0, 0x25d: 0x00c0, + 0x25e: 0x00c0, 0x25f: 0x00c0, 0x260: 0x00c0, 0x261: 0x00c0, 0x262: 0x00c0, 0x263: 0x00c0, + 0x264: 0x00c0, 0x265: 0x00c0, 0x266: 0x00c0, 0x267: 0x00c0, 0x268: 0x00c0, 0x269: 0x00c0, + 0x26a: 0x00c0, 0x26b: 0x00c0, 0x26c: 0x00c0, 0x26d: 0x00c0, 0x26e: 0x00c0, 0x26f: 0x00c0, + 0x270: 0x0080, 0x271: 0x0080, 0x272: 0x0080, 0x273: 0x0080, 0x274: 0x0080, 0x275: 0x0080, + 0x276: 0x0080, 0x277: 0x0080, 0x278: 0x0080, 0x279: 0x00c0, 0x27a: 0x00c0, 0x27b: 0x00c0, + 0x27c: 0x00c0, 0x27d: 0x00c0, 0x27e: 0x00c0, 0x27f: 0x00c0, + // Block 0xa, offset 0x280 + 0x280: 0x00c0, 0x281: 0x00c0, 0x282: 0x0080, 0x283: 0x0080, 0x284: 0x0080, 0x285: 0x0080, + 0x286: 0x00c0, 0x287: 0x00c0, 0x288: 0x00c0, 0x289: 0x00c0, 0x28a: 0x00c0, 0x28b: 0x00c0, + 0x28c: 0x00c0, 0x28d: 0x00c0, 0x28e: 0x00c0, 0x28f: 0x00c0, 0x290: 0x00c0, 0x291: 0x00c0, + 0x292: 0x0080, 0x293: 0x0080, 0x294: 0x0080, 0x295: 0x0080, 0x296: 0x0080, 0x297: 0x0080, + 0x298: 0x0080, 0x299: 0x0080, 0x29a: 0x0080, 0x29b: 0x0080, 0x29c: 0x0080, 0x29d: 0x0080, + 0x29e: 0x0080, 0x29f: 0x0080, 0x2a0: 0x0080, 0x2a1: 0x0080, 0x2a2: 0x0080, 0x2a3: 0x0080, + 0x2a4: 0x0080, 0x2a5: 0x0080, 0x2a6: 0x0080, 0x2a7: 0x0080, 0x2a8: 0x0080, 0x2a9: 0x0080, + 0x2aa: 0x0080, 0x2ab: 0x0080, 0x2ac: 0x00c0, 0x2ad: 0x0080, 0x2ae: 0x00c0, 0x2af: 0x0080, + 0x2b0: 0x0080, 0x2b1: 0x0080, 0x2b2: 0x0080, 0x2b3: 0x0080, 0x2b4: 0x0080, 0x2b5: 0x0080, + 0x2b6: 0x0080, 0x2b7: 0x0080, 0x2b8: 0x0080, 0x2b9: 0x0080, 0x2ba: 0x0080, 0x2bb: 0x0080, + 0x2bc: 0x0080, 0x2bd: 0x0080, 0x2be: 0x0080, 0x2bf: 0x0080, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x00c3, 0x2c1: 0x00c3, 0x2c2: 0x00c3, 0x2c3: 0x00c3, 0x2c4: 0x00c3, 0x2c5: 0x00c3, + 0x2c6: 0x00c3, 0x2c7: 0x00c3, 0x2c8: 0x00c3, 0x2c9: 0x00c3, 0x2ca: 0x00c3, 0x2cb: 0x00c3, + 0x2cc: 0x00c3, 0x2cd: 0x00c3, 0x2ce: 0x00c3, 0x2cf: 0x00c3, 0x2d0: 0x00c3, 0x2d1: 0x00c3, + 0x2d2: 0x00c3, 0x2d3: 0x00c3, 0x2d4: 0x00c3, 0x2d5: 0x00c3, 0x2d6: 0x00c3, 0x2d7: 0x00c3, + 0x2d8: 0x00c3, 0x2d9: 0x00c3, 0x2da: 0x00c3, 0x2db: 0x00c3, 0x2dc: 0x00c3, 0x2dd: 0x00c3, + 0x2de: 0x00c3, 0x2df: 0x00c3, 0x2e0: 0x00c3, 0x2e1: 0x00c3, 0x2e2: 0x00c3, 0x2e3: 0x00c3, + 0x2e4: 0x00c3, 0x2e5: 0x00c3, 0x2e6: 0x00c3, 0x2e7: 0x00c3, 0x2e8: 0x00c3, 0x2e9: 0x00c3, + 0x2ea: 0x00c3, 0x2eb: 0x00c3, 0x2ec: 0x00c3, 0x2ed: 0x00c3, 0x2ee: 0x00c3, 0x2ef: 0x00c3, + 0x2f0: 0x00c3, 0x2f1: 0x00c3, 0x2f2: 0x00c3, 0x2f3: 0x00c3, 0x2f4: 0x00c3, 0x2f5: 0x00c3, + 0x2f6: 0x00c3, 0x2f7: 0x00c3, 0x2f8: 0x00c3, 0x2f9: 0x00c3, 0x2fa: 0x00c3, 0x2fb: 0x00c3, + 0x2fc: 0x00c3, 0x2fd: 0x00c3, 0x2fe: 0x00c3, 0x2ff: 0x00c3, + // Block 0xc, offset 0x300 + 0x300: 0x0083, 0x301: 0x0083, 0x302: 0x00c3, 0x303: 0x0083, 0x304: 0x0083, 0x305: 0x00c3, + 0x306: 0x00c3, 0x307: 0x00c3, 0x308: 0x00c3, 0x309: 0x00c3, 0x30a: 0x00c3, 0x30b: 0x00c3, + 0x30c: 0x00c3, 0x30d: 0x00c3, 0x30e: 0x00c3, 0x30f: 0x0040, 0x310: 0x00c3, 0x311: 0x00c3, + 0x312: 0x00c3, 0x313: 0x00c3, 0x314: 0x00c3, 0x315: 0x00c3, 0x316: 0x00c3, 0x317: 0x00c3, + 0x318: 0x00c3, 0x319: 0x00c3, 0x31a: 0x00c3, 0x31b: 0x00c3, 0x31c: 0x00c3, 0x31d: 0x00c3, + 0x31e: 0x00c3, 0x31f: 0x00c3, 0x320: 0x00c3, 0x321: 0x00c3, 0x322: 0x00c3, 0x323: 0x00c3, + 0x324: 0x00c3, 0x325: 0x00c3, 0x326: 0x00c3, 0x327: 0x00c3, 0x328: 0x00c3, 0x329: 0x00c3, + 0x32a: 0x00c3, 0x32b: 0x00c3, 0x32c: 0x00c3, 0x32d: 0x00c3, 0x32e: 0x00c3, 0x32f: 0x00c3, + 0x330: 0x00c8, 0x331: 0x00c8, 0x332: 0x00c8, 0x333: 0x00c8, 0x334: 0x0080, 0x335: 0x0050, + 0x336: 0x00c8, 0x337: 0x00c8, 0x33a: 0x0088, 0x33b: 0x00c8, + 0x33c: 0x00c8, 0x33d: 0x00c8, 0x33e: 0x0080, 0x33f: 0x00c8, + // Block 0xd, offset 0x340 + 0x344: 0x0088, 0x345: 0x0080, + 0x346: 0x00c8, 0x347: 0x0080, 0x348: 0x00c8, 0x349: 0x00c8, 0x34a: 0x00c8, + 0x34c: 0x00c8, 0x34e: 0x00c8, 0x34f: 0x00c8, 0x350: 0x00c8, 0x351: 0x00c8, + 0x352: 0x00c8, 0x353: 0x00c8, 0x354: 0x00c8, 0x355: 0x00c8, 0x356: 0x00c8, 0x357: 0x00c8, + 0x358: 0x00c8, 0x359: 0x00c8, 0x35a: 0x00c8, 0x35b: 0x00c8, 0x35c: 0x00c8, 0x35d: 0x00c8, + 0x35e: 0x00c8, 0x35f: 0x00c8, 0x360: 0x00c8, 0x361: 0x00c8, 0x363: 0x00c8, + 0x364: 0x00c8, 0x365: 0x00c8, 0x366: 0x00c8, 0x367: 0x00c8, 0x368: 0x00c8, 0x369: 0x00c8, + 0x36a: 0x00c8, 0x36b: 0x00c8, 0x36c: 0x00c8, 0x36d: 0x00c8, 0x36e: 0x00c8, 0x36f: 0x00c8, + 0x370: 0x00c8, 0x371: 0x00c8, 0x372: 0x00c8, 0x373: 0x00c8, 0x374: 0x00c8, 0x375: 0x00c8, + 0x376: 0x00c8, 0x377: 0x00c8, 0x378: 0x00c8, 0x379: 0x00c8, 0x37a: 0x00c8, 0x37b: 0x00c8, + 0x37c: 0x00c8, 0x37d: 0x00c8, 0x37e: 0x00c8, 0x37f: 0x00c8, + // Block 0xe, offset 0x380 + 0x380: 0x00c8, 0x381: 0x00c8, 0x382: 0x00c8, 0x383: 0x00c8, 0x384: 0x00c8, 0x385: 0x00c8, + 0x386: 0x00c8, 0x387: 0x00c8, 0x388: 0x00c8, 0x389: 0x00c8, 0x38a: 0x00c8, 0x38b: 0x00c8, + 0x38c: 0x00c8, 0x38d: 0x00c8, 0x38e: 0x00c8, 0x38f: 0x00c8, 0x390: 0x0088, 0x391: 0x0088, + 0x392: 0x0088, 0x393: 0x0088, 0x394: 0x0088, 0x395: 0x0088, 0x396: 0x0088, 0x397: 0x00c8, + 0x398: 0x00c8, 0x399: 0x00c8, 0x39a: 0x00c8, 0x39b: 0x00c8, 0x39c: 0x00c8, 0x39d: 0x00c8, + 0x39e: 0x00c8, 0x39f: 0x00c8, 0x3a0: 0x00c8, 0x3a1: 0x00c8, 0x3a2: 0x00c0, 0x3a3: 0x00c0, + 0x3a4: 0x00c0, 0x3a5: 0x00c0, 0x3a6: 0x00c0, 0x3a7: 0x00c0, 0x3a8: 0x00c0, 0x3a9: 0x00c0, + 0x3aa: 0x00c0, 0x3ab: 0x00c0, 0x3ac: 0x00c0, 0x3ad: 0x00c0, 0x3ae: 0x00c0, 0x3af: 0x00c0, + 0x3b0: 0x0088, 0x3b1: 0x0088, 0x3b2: 0x0088, 0x3b3: 0x00c8, 0x3b4: 0x0088, 0x3b5: 0x0088, + 0x3b6: 0x0088, 0x3b7: 0x00c8, 0x3b8: 0x00c8, 0x3b9: 0x0088, 0x3ba: 0x00c8, 0x3bb: 0x00c8, + 0x3bc: 0x00c8, 0x3bd: 0x00c8, 0x3be: 0x00c8, 0x3bf: 0x00c8, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x00c0, 0x3c1: 0x00c0, 0x3c2: 0x0080, 0x3c3: 0x00c3, 0x3c4: 0x00c3, 0x3c5: 0x00c3, + 0x3c6: 0x00c3, 0x3c7: 0x00c3, 0x3c8: 0x0083, 0x3c9: 0x0083, 0x3ca: 0x00c0, 0x3cb: 0x00c0, + 0x3cc: 0x00c0, 0x3cd: 0x00c0, 0x3ce: 0x00c0, 0x3cf: 0x00c0, 0x3d0: 0x00c0, 0x3d1: 0x00c0, + 0x3d2: 0x00c0, 0x3d3: 0x00c0, 0x3d4: 0x00c0, 0x3d5: 0x00c0, 0x3d6: 0x00c0, 0x3d7: 0x00c0, + 0x3d8: 0x00c0, 0x3d9: 0x00c0, 0x3da: 0x00c0, 0x3db: 0x00c0, 0x3dc: 0x00c0, 0x3dd: 0x00c0, + 0x3de: 0x00c0, 0x3df: 0x00c0, 0x3e0: 0x00c0, 0x3e1: 0x00c0, 0x3e2: 0x00c0, 0x3e3: 0x00c0, + 0x3e4: 0x00c0, 0x3e5: 0x00c0, 0x3e6: 0x00c0, 0x3e7: 0x00c0, 0x3e8: 0x00c0, 0x3e9: 0x00c0, + 0x3ea: 0x00c0, 0x3eb: 0x00c0, 0x3ec: 0x00c0, 0x3ed: 0x00c0, 0x3ee: 0x00c0, 0x3ef: 0x00c0, + 0x3f0: 0x00c0, 0x3f1: 0x00c0, 0x3f2: 0x00c0, 0x3f3: 0x00c0, 0x3f4: 0x00c0, 0x3f5: 0x00c0, + 0x3f6: 0x00c0, 0x3f7: 0x00c0, 0x3f8: 0x00c0, 0x3f9: 0x00c0, 0x3fa: 0x00c0, 0x3fb: 0x00c0, + 0x3fc: 0x00c0, 0x3fd: 0x00c0, 0x3fe: 0x00c0, 0x3ff: 0x00c0, + // Block 0x10, offset 0x400 + 0x400: 0x00c0, 0x401: 0x00c0, 0x402: 0x00c0, 0x403: 0x00c0, 0x404: 0x00c0, 0x405: 0x00c0, + 0x406: 0x00c0, 0x407: 0x00c0, 0x408: 0x00c0, 0x409: 0x00c0, 0x40a: 0x00c0, 0x40b: 0x00c0, + 0x40c: 0x00c0, 0x40d: 0x00c0, 0x40e: 0x00c0, 0x40f: 0x00c0, 0x410: 0x00c0, 0x411: 0x00c0, + 0x412: 0x00c0, 0x413: 0x00c0, 0x414: 0x00c0, 0x415: 0x00c0, 0x416: 0x00c0, 0x417: 0x00c0, + 0x418: 0x00c0, 0x419: 0x00c0, 0x41a: 0x00c0, 0x41b: 0x00c0, 0x41c: 0x00c0, 0x41d: 0x00c0, + 0x41e: 0x00c0, 0x41f: 0x00c0, 0x420: 0x00c0, 0x421: 0x00c0, 0x422: 0x00c0, 0x423: 0x00c0, + 0x424: 0x00c0, 0x425: 0x00c0, 0x426: 0x00c0, 0x427: 0x00c0, 0x428: 0x00c0, 0x429: 0x00c0, + 0x42a: 0x00c0, 0x42b: 0x00c0, 0x42c: 0x00c0, 0x42d: 0x00c0, 0x42e: 0x00c0, 0x42f: 0x00c0, + 0x431: 0x00c0, 0x432: 0x00c0, 0x433: 0x00c0, 0x434: 0x00c0, 0x435: 0x00c0, + 0x436: 0x00c0, 0x437: 0x00c0, 0x438: 0x00c0, 0x439: 0x00c0, 0x43a: 0x00c0, 0x43b: 0x00c0, + 0x43c: 0x00c0, 0x43d: 0x00c0, 0x43e: 0x00c0, 0x43f: 0x00c0, + // Block 0x11, offset 0x440 + 0x440: 0x00c0, 0x441: 0x00c0, 0x442: 0x00c0, 0x443: 0x00c0, 0x444: 0x00c0, 0x445: 0x00c0, + 0x446: 0x00c0, 0x447: 0x00c0, 0x448: 0x00c0, 0x449: 0x00c0, 0x44a: 0x00c0, 0x44b: 0x00c0, + 0x44c: 0x00c0, 0x44d: 0x00c0, 0x44e: 0x00c0, 0x44f: 0x00c0, 0x450: 0x00c0, 0x451: 0x00c0, + 0x452: 0x00c0, 0x453: 0x00c0, 0x454: 0x00c0, 0x455: 0x00c0, 0x456: 0x00c0, + 0x459: 0x00c0, 0x45a: 0x0080, 0x45b: 0x0080, 0x45c: 0x0080, 0x45d: 0x0080, + 0x45e: 0x0080, 0x45f: 0x0080, 0x461: 0x00c0, 0x462: 0x00c0, 0x463: 0x00c0, + 0x464: 0x00c0, 0x465: 0x00c0, 0x466: 0x00c0, 0x467: 0x00c0, 0x468: 0x00c0, 0x469: 0x00c0, + 0x46a: 0x00c0, 0x46b: 0x00c0, 0x46c: 0x00c0, 0x46d: 0x00c0, 0x46e: 0x00c0, 0x46f: 0x00c0, + 0x470: 0x00c0, 0x471: 0x00c0, 0x472: 0x00c0, 0x473: 0x00c0, 0x474: 0x00c0, 0x475: 0x00c0, + 0x476: 0x00c0, 0x477: 0x00c0, 0x478: 0x00c0, 0x479: 0x00c0, 0x47a: 0x00c0, 0x47b: 0x00c0, + 0x47c: 0x00c0, 0x47d: 0x00c0, 0x47e: 0x00c0, 0x47f: 0x00c0, + // Block 0x12, offset 0x480 + 0x480: 0x00c0, 0x481: 0x00c0, 0x482: 0x00c0, 0x483: 0x00c0, 0x484: 0x00c0, 0x485: 0x00c0, + 0x486: 0x00c0, 0x487: 0x0080, 0x489: 0x0080, 0x48a: 0x0080, + 0x48d: 0x0080, 0x48e: 0x0080, 0x48f: 0x0080, 0x491: 0x00cb, + 0x492: 0x00cb, 0x493: 0x00cb, 0x494: 0x00cb, 0x495: 0x00cb, 0x496: 0x00cb, 0x497: 0x00cb, + 0x498: 0x00cb, 0x499: 0x00cb, 0x49a: 0x00cb, 0x49b: 0x00cb, 0x49c: 0x00cb, 0x49d: 0x00cb, + 0x49e: 0x00cb, 0x49f: 0x00cb, 0x4a0: 0x00cb, 0x4a1: 0x00cb, 0x4a2: 0x00cb, 0x4a3: 0x00cb, + 0x4a4: 0x00cb, 0x4a5: 0x00cb, 0x4a6: 0x00cb, 0x4a7: 0x00cb, 0x4a8: 0x00cb, 0x4a9: 0x00cb, + 0x4aa: 0x00cb, 0x4ab: 0x00cb, 0x4ac: 0x00cb, 0x4ad: 0x00cb, 0x4ae: 0x00cb, 0x4af: 0x00cb, + 0x4b0: 0x00cb, 0x4b1: 0x00cb, 0x4b2: 0x00cb, 0x4b3: 0x00cb, 0x4b4: 0x00cb, 0x4b5: 0x00cb, + 0x4b6: 0x00cb, 0x4b7: 0x00cb, 0x4b8: 0x00cb, 0x4b9: 0x00cb, 0x4ba: 0x00cb, 0x4bb: 0x00cb, + 0x4bc: 0x00cb, 0x4bd: 0x00cb, 0x4be: 0x008a, 0x4bf: 0x00cb, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x008a, 0x4c1: 0x00cb, 0x4c2: 0x00cb, 0x4c3: 0x008a, 0x4c4: 0x00cb, 0x4c5: 0x00cb, + 0x4c6: 0x008a, 0x4c7: 0x00cb, + 0x4d0: 0x00ca, 0x4d1: 0x00ca, + 0x4d2: 0x00ca, 0x4d3: 0x00ca, 0x4d4: 0x00ca, 0x4d5: 0x00ca, 0x4d6: 0x00ca, 0x4d7: 0x00ca, + 0x4d8: 0x00ca, 0x4d9: 0x00ca, 0x4da: 0x00ca, 0x4db: 0x00ca, 0x4dc: 0x00ca, 0x4dd: 0x00ca, + 0x4de: 0x00ca, 0x4df: 0x00ca, 0x4e0: 0x00ca, 0x4e1: 0x00ca, 0x4e2: 0x00ca, 0x4e3: 0x00ca, + 0x4e4: 0x00ca, 0x4e5: 0x00ca, 0x4e6: 0x00ca, 0x4e7: 0x00ca, 0x4e8: 0x00ca, 0x4e9: 0x00ca, + 0x4ea: 0x00ca, + 0x4f0: 0x00ca, 0x4f1: 0x00ca, 0x4f2: 0x00ca, 0x4f3: 0x0051, 0x4f4: 0x0051, + // Block 0x14, offset 0x500 + 0x500: 0x0040, 0x501: 0x0040, 0x502: 0x0040, 0x503: 0x0040, 0x504: 0x0040, 0x505: 0x0040, + 0x506: 0x0080, 0x507: 0x0080, 0x508: 0x0080, 0x509: 0x0080, 0x50a: 0x0080, 0x50b: 0x0080, + 0x50c: 0x0080, 0x50d: 0x0080, 0x50e: 0x0080, 0x50f: 0x0080, 0x510: 0x00c3, 0x511: 0x00c3, + 0x512: 0x00c3, 0x513: 0x00c3, 0x514: 0x00c3, 0x515: 0x00c3, 0x516: 0x00c3, 0x517: 0x00c3, + 0x518: 0x00c3, 0x519: 0x00c3, 0x51a: 0x00c3, 0x51b: 0x0080, 0x51c: 0x0040, + 0x51e: 0x0080, 0x51f: 0x0080, 0x520: 0x00c2, 0x521: 0x00c0, 0x522: 0x00c4, 0x523: 0x00c4, + 0x524: 0x00c4, 0x525: 0x00c4, 0x526: 0x00c2, 0x527: 0x00c4, 0x528: 0x00c2, 0x529: 0x00c4, + 0x52a: 0x00c2, 0x52b: 0x00c2, 0x52c: 0x00c2, 0x52d: 0x00c2, 0x52e: 0x00c2, 0x52f: 0x00c4, + 0x530: 0x00c4, 0x531: 0x00c4, 0x532: 0x00c4, 0x533: 0x00c2, 0x534: 0x00c2, 0x535: 0x00c2, + 0x536: 0x00c2, 0x537: 0x00c2, 0x538: 0x00c2, 0x539: 0x00c2, 0x53a: 0x00c2, 0x53b: 0x00c2, + 0x53c: 0x00c2, 0x53d: 0x00c2, 0x53e: 0x00c2, 0x53f: 0x00c2, + // Block 0x15, offset 0x540 + 0x540: 0x0040, 0x541: 0x00c2, 0x542: 0x00c2, 0x543: 0x00c2, 0x544: 0x00c2, 0x545: 0x00c2, + 0x546: 0x00c2, 0x547: 0x00c2, 0x548: 0x00c4, 0x549: 0x00c2, 0x54a: 0x00c2, 0x54b: 0x00c3, + 0x54c: 0x00c3, 0x54d: 0x00c3, 0x54e: 0x00c3, 0x54f: 0x00c3, 0x550: 0x00c3, 0x551: 0x00c3, + 0x552: 0x00c3, 0x553: 0x00c3, 0x554: 0x00c3, 0x555: 0x00c3, 0x556: 0x00c3, 0x557: 0x00c3, + 0x558: 0x00c3, 0x559: 0x00c3, 0x55a: 0x00c3, 0x55b: 0x00c3, 0x55c: 0x00c3, 0x55d: 0x00c3, + 0x55e: 0x00c3, 0x55f: 0x00c3, 0x560: 0x0053, 0x561: 0x0053, 0x562: 0x0053, 0x563: 0x0053, + 0x564: 0x0053, 0x565: 0x0053, 0x566: 0x0053, 0x567: 0x0053, 0x568: 0x0053, 0x569: 0x0053, + 0x56a: 0x0080, 0x56b: 0x0080, 0x56c: 0x0080, 0x56d: 0x0080, 0x56e: 0x00c2, 0x56f: 0x00c2, + 0x570: 0x00c3, 0x571: 0x00c4, 0x572: 0x00c4, 0x573: 0x00c4, 0x574: 0x00c0, 0x575: 0x0084, + 0x576: 0x0084, 0x577: 0x0084, 0x578: 0x0082, 0x579: 0x00c2, 0x57a: 0x00c2, 0x57b: 0x00c2, + 0x57c: 0x00c2, 0x57d: 0x00c2, 0x57e: 0x00c2, 0x57f: 0x00c2, + // Block 0x16, offset 0x580 + 0x580: 0x00c2, 0x581: 0x00c2, 0x582: 0x00c2, 0x583: 0x00c2, 0x584: 0x00c2, 0x585: 0x00c2, + 0x586: 0x00c2, 0x587: 0x00c2, 0x588: 0x00c4, 0x589: 0x00c4, 0x58a: 0x00c4, 0x58b: 0x00c4, + 0x58c: 0x00c4, 0x58d: 0x00c4, 0x58e: 0x00c4, 0x58f: 0x00c4, 0x590: 0x00c4, 0x591: 0x00c4, + 0x592: 0x00c4, 0x593: 0x00c4, 0x594: 0x00c4, 0x595: 0x00c4, 0x596: 0x00c4, 0x597: 0x00c4, + 0x598: 0x00c4, 0x599: 0x00c4, 0x59a: 0x00c2, 0x59b: 0x00c2, 0x59c: 0x00c2, 0x59d: 0x00c2, + 0x59e: 0x00c2, 0x59f: 0x00c2, 0x5a0: 0x00c2, 0x5a1: 0x00c2, 0x5a2: 0x00c2, 0x5a3: 0x00c2, + 0x5a4: 0x00c2, 0x5a5: 0x00c2, 0x5a6: 0x00c2, 0x5a7: 0x00c2, 0x5a8: 0x00c2, 0x5a9: 0x00c2, + 0x5aa: 0x00c2, 0x5ab: 0x00c2, 0x5ac: 0x00c2, 0x5ad: 0x00c2, 0x5ae: 0x00c2, 0x5af: 0x00c2, + 0x5b0: 0x00c2, 0x5b1: 0x00c2, 0x5b2: 0x00c2, 0x5b3: 0x00c2, 0x5b4: 0x00c2, 0x5b5: 0x00c2, + 0x5b6: 0x00c2, 0x5b7: 0x00c2, 0x5b8: 0x00c2, 0x5b9: 0x00c2, 0x5ba: 0x00c2, 0x5bb: 0x00c2, + 0x5bc: 0x00c2, 0x5bd: 0x00c2, 0x5be: 0x00c2, 0x5bf: 0x00c2, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x00c4, 0x5c1: 0x00c2, 0x5c2: 0x00c2, 0x5c3: 0x00c4, 0x5c4: 0x00c4, 0x5c5: 0x00c4, + 0x5c6: 0x00c4, 0x5c7: 0x00c4, 0x5c8: 0x00c4, 0x5c9: 0x00c4, 0x5ca: 0x00c4, 0x5cb: 0x00c4, + 0x5cc: 0x00c2, 0x5cd: 0x00c4, 0x5ce: 0x00c2, 0x5cf: 0x00c4, 0x5d0: 0x00c2, 0x5d1: 0x00c2, + 0x5d2: 0x00c4, 0x5d3: 0x00c4, 0x5d4: 0x0080, 0x5d5: 0x00c4, 0x5d6: 0x00c3, 0x5d7: 0x00c3, + 0x5d8: 0x00c3, 0x5d9: 0x00c3, 0x5da: 0x00c3, 0x5db: 0x00c3, 0x5dc: 0x00c3, 0x5dd: 0x0040, + 0x5de: 0x0080, 0x5df: 0x00c3, 0x5e0: 0x00c3, 0x5e1: 0x00c3, 0x5e2: 0x00c3, 0x5e3: 0x00c3, + 0x5e4: 0x00c3, 0x5e5: 0x00c0, 0x5e6: 0x00c0, 0x5e7: 0x00c3, 0x5e8: 0x00c3, 0x5e9: 0x0080, + 0x5ea: 0x00c3, 0x5eb: 0x00c3, 0x5ec: 0x00c3, 0x5ed: 0x00c3, 0x5ee: 0x00c4, 0x5ef: 0x00c4, + 0x5f0: 0x0054, 0x5f1: 0x0054, 0x5f2: 0x0054, 0x5f3: 0x0054, 0x5f4: 0x0054, 0x5f5: 0x0054, + 0x5f6: 0x0054, 0x5f7: 0x0054, 0x5f8: 0x0054, 0x5f9: 0x0054, 0x5fa: 0x00c2, 0x5fb: 0x00c2, + 0x5fc: 0x00c2, 0x5fd: 0x00c0, 0x5fe: 0x00c0, 0x5ff: 0x00c2, + // Block 0x18, offset 0x600 + 0x600: 0x0080, 0x601: 0x0080, 0x602: 0x0080, 0x603: 0x0080, 0x604: 0x0080, 0x605: 0x0080, + 0x606: 0x0080, 0x607: 0x0080, 0x608: 0x0080, 0x609: 0x0080, 0x60a: 0x0080, 0x60b: 0x0080, + 0x60c: 0x0080, 0x60d: 0x0080, 0x60f: 0x0040, 0x610: 0x00c4, 0x611: 0x00c3, + 0x612: 0x00c2, 0x613: 0x00c2, 0x614: 0x00c2, 0x615: 0x00c4, 0x616: 0x00c4, 0x617: 0x00c4, + 0x618: 0x00c4, 0x619: 0x00c4, 0x61a: 0x00c2, 0x61b: 0x00c2, 0x61c: 0x00c2, 0x61d: 0x00c2, + 0x61e: 0x00c4, 0x61f: 0x00c2, 0x620: 0x00c2, 0x621: 0x00c2, 0x622: 0x00c2, 0x623: 0x00c2, + 0x624: 0x00c2, 0x625: 0x00c2, 0x626: 0x00c2, 0x627: 0x00c2, 0x628: 0x00c4, 0x629: 0x00c2, + 0x62a: 0x00c4, 0x62b: 0x00c2, 0x62c: 0x00c4, 0x62d: 0x00c2, 0x62e: 0x00c2, 0x62f: 0x00c4, + 0x630: 0x00c3, 0x631: 0x00c3, 0x632: 0x00c3, 0x633: 0x00c3, 0x634: 0x00c3, 0x635: 0x00c3, + 0x636: 0x00c3, 0x637: 0x00c3, 0x638: 0x00c3, 0x639: 0x00c3, 0x63a: 0x00c3, 0x63b: 0x00c3, + 0x63c: 0x00c3, 0x63d: 0x00c3, 0x63e: 0x00c3, 0x63f: 0x00c3, + // Block 0x19, offset 0x640 + 0x640: 0x00c3, 0x641: 0x00c3, 0x642: 0x00c3, 0x643: 0x00c3, 0x644: 0x00c3, 0x645: 0x00c3, + 0x646: 0x00c3, 0x647: 0x00c3, 0x648: 0x00c3, 0x649: 0x00c3, 0x64a: 0x00c3, + 0x64d: 0x00c4, 0x64e: 0x00c2, 0x64f: 0x00c2, 0x650: 0x00c2, 0x651: 0x00c2, + 0x652: 0x00c2, 0x653: 0x00c2, 0x654: 0x00c2, 0x655: 0x00c2, 0x656: 0x00c2, 0x657: 0x00c2, + 0x658: 0x00c2, 0x659: 0x00c4, 0x65a: 0x00c4, 0x65b: 0x00c4, 0x65c: 0x00c2, 0x65d: 0x00c2, + 0x65e: 0x00c2, 0x65f: 0x00c2, 0x660: 0x00c2, 0x661: 0x00c2, 0x662: 0x00c2, 0x663: 0x00c2, + 0x664: 0x00c2, 0x665: 0x00c2, 0x666: 0x00c2, 0x667: 0x00c2, 0x668: 0x00c2, 0x669: 0x00c2, + 0x66a: 0x00c2, 0x66b: 0x00c4, 0x66c: 0x00c4, 0x66d: 0x00c2, 0x66e: 0x00c2, 0x66f: 0x00c2, + 0x670: 0x00c2, 0x671: 0x00c4, 0x672: 0x00c2, 0x673: 0x00c4, 0x674: 0x00c4, 0x675: 0x00c2, + 0x676: 0x00c2, 0x677: 0x00c2, 0x678: 0x00c4, 0x679: 0x00c4, 0x67a: 0x00c2, 0x67b: 0x00c2, + 0x67c: 0x00c2, 0x67d: 0x00c2, 0x67e: 0x00c2, 0x67f: 0x00c2, + // Block 0x1a, offset 0x680 + 0x680: 0x00c0, 0x681: 0x00c0, 0x682: 0x00c0, 0x683: 0x00c0, 0x684: 0x00c0, 0x685: 0x00c0, + 0x686: 0x00c0, 0x687: 0x00c0, 0x688: 0x00c0, 0x689: 0x00c0, 0x68a: 0x00c0, 0x68b: 0x00c0, + 0x68c: 0x00c0, 0x68d: 0x00c0, 0x68e: 0x00c0, 0x68f: 0x00c0, 0x690: 0x00c0, 0x691: 0x00c0, + 0x692: 0x00c0, 0x693: 0x00c0, 0x694: 0x00c0, 0x695: 0x00c0, 0x696: 0x00c0, 0x697: 0x00c0, + 0x698: 0x00c0, 0x699: 0x00c0, 0x69a: 0x00c0, 0x69b: 0x00c0, 0x69c: 0x00c0, 0x69d: 0x00c0, + 0x69e: 0x00c0, 0x69f: 0x00c0, 0x6a0: 0x00c0, 0x6a1: 0x00c0, 0x6a2: 0x00c0, 0x6a3: 0x00c0, + 0x6a4: 0x00c0, 0x6a5: 0x00c0, 0x6a6: 0x00c3, 0x6a7: 0x00c3, 0x6a8: 0x00c3, 0x6a9: 0x00c3, + 0x6aa: 0x00c3, 0x6ab: 0x00c3, 0x6ac: 0x00c3, 0x6ad: 0x00c3, 0x6ae: 0x00c3, 0x6af: 0x00c3, + 0x6b0: 0x00c3, 0x6b1: 0x00c0, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x00c0, 0x6c1: 0x00c0, 0x6c2: 0x00c0, 0x6c3: 0x00c0, 0x6c4: 0x00c0, 0x6c5: 0x00c0, + 0x6c6: 0x00c0, 0x6c7: 0x00c0, 0x6c8: 0x00c0, 0x6c9: 0x00c0, 0x6ca: 0x00c2, 0x6cb: 0x00c2, + 0x6cc: 0x00c2, 0x6cd: 0x00c2, 0x6ce: 0x00c2, 0x6cf: 0x00c2, 0x6d0: 0x00c2, 0x6d1: 0x00c2, + 0x6d2: 0x00c2, 0x6d3: 0x00c2, 0x6d4: 0x00c2, 0x6d5: 0x00c2, 0x6d6: 0x00c2, 0x6d7: 0x00c2, + 0x6d8: 0x00c2, 0x6d9: 0x00c2, 0x6da: 0x00c2, 0x6db: 0x00c2, 0x6dc: 0x00c2, 0x6dd: 0x00c2, + 0x6de: 0x00c2, 0x6df: 0x00c2, 0x6e0: 0x00c2, 0x6e1: 0x00c2, 0x6e2: 0x00c2, 0x6e3: 0x00c2, + 0x6e4: 0x00c2, 0x6e5: 0x00c2, 0x6e6: 0x00c2, 0x6e7: 0x00c2, 0x6e8: 0x00c2, 0x6e9: 0x00c2, + 0x6ea: 0x00c2, 0x6eb: 0x00c3, 0x6ec: 0x00c3, 0x6ed: 0x00c3, 0x6ee: 0x00c3, 0x6ef: 0x00c3, + 0x6f0: 0x00c3, 0x6f1: 0x00c3, 0x6f2: 0x00c3, 0x6f3: 0x00c3, 0x6f4: 0x00c0, 0x6f5: 0x00c0, + 0x6f6: 0x0080, 0x6f7: 0x0080, 0x6f8: 0x0080, 0x6f9: 0x0080, 0x6fa: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x00c0, 0x701: 0x00c0, 0x702: 0x00c0, 0x703: 0x00c0, 0x704: 0x00c0, 0x705: 0x00c0, + 0x706: 0x00c0, 0x707: 0x00c0, 0x708: 0x00c0, 0x709: 0x00c0, 0x70a: 0x00c0, 0x70b: 0x00c0, + 0x70c: 0x00c0, 0x70d: 0x00c0, 0x70e: 0x00c0, 0x70f: 0x00c0, 0x710: 0x00c0, 0x711: 0x00c0, + 0x712: 0x00c0, 0x713: 0x00c0, 0x714: 0x00c0, 0x715: 0x00c0, 0x716: 0x00c3, 0x717: 0x00c3, + 0x718: 0x00c3, 0x719: 0x00c3, 0x71a: 0x00c0, 0x71b: 0x00c3, 0x71c: 0x00c3, 0x71d: 0x00c3, + 0x71e: 0x00c3, 0x71f: 0x00c3, 0x720: 0x00c3, 0x721: 0x00c3, 0x722: 0x00c3, 0x723: 0x00c3, + 0x724: 0x00c0, 0x725: 0x00c3, 0x726: 0x00c3, 0x727: 0x00c3, 0x728: 0x00c0, 0x729: 0x00c3, + 0x72a: 0x00c3, 0x72b: 0x00c3, 0x72c: 0x00c3, 0x72d: 0x00c3, + 0x730: 0x0080, 0x731: 0x0080, 0x732: 0x0080, 0x733: 0x0080, 0x734: 0x0080, 0x735: 0x0080, + 0x736: 0x0080, 0x737: 0x0080, 0x738: 0x0080, 0x739: 0x0080, 0x73a: 0x0080, 0x73b: 0x0080, + 0x73c: 0x0080, 0x73d: 0x0080, 0x73e: 0x0080, + // Block 0x1d, offset 0x740 + 0x740: 0x00c4, 0x741: 0x00c2, 0x742: 0x00c2, 0x743: 0x00c2, 0x744: 0x00c2, 0x745: 0x00c2, + 0x746: 0x00c4, 0x747: 0x00c4, 0x748: 0x00c2, 0x749: 0x00c4, 0x74a: 0x00c2, 0x74b: 0x00c2, + 0x74c: 0x00c2, 0x74d: 0x00c2, 0x74e: 0x00c2, 0x74f: 0x00c2, 0x750: 0x00c2, 0x751: 0x00c2, + 0x752: 0x00c2, 0x753: 0x00c2, 0x754: 0x00c4, 0x755: 0x00c2, 0x756: 0x00c0, 0x757: 0x00c0, + 0x758: 0x00c0, 0x759: 0x00c3, 0x75a: 0x00c3, 0x75b: 0x00c3, + 0x75e: 0x0080, + // Block 0x1e, offset 0x780 + 0x7a0: 0x00c2, 0x7a1: 0x00c2, 0x7a2: 0x00c2, 0x7a3: 0x00c2, + 0x7a4: 0x00c2, 0x7a5: 0x00c2, 0x7a6: 0x00c2, 0x7a7: 0x00c2, 0x7a8: 0x00c2, 0x7a9: 0x00c2, + 0x7aa: 0x00c4, 0x7ab: 0x00c4, 0x7ac: 0x00c4, 0x7ad: 0x00c0, 0x7ae: 0x00c4, 0x7af: 0x00c2, + 0x7b0: 0x00c2, 0x7b1: 0x00c4, 0x7b2: 0x00c4, 0x7b3: 0x00c2, 0x7b4: 0x00c2, + 0x7b6: 0x00c2, 0x7b7: 0x00c2, 0x7b8: 0x00c2, 0x7b9: 0x00c4, 0x7ba: 0x00c2, 0x7bb: 0x00c2, + 0x7bc: 0x00c2, 0x7bd: 0x00c2, + // Block 0x1f, offset 0x7c0 + 0x7d4: 0x00c3, 0x7d5: 0x00c3, 0x7d6: 0x00c3, 0x7d7: 0x00c3, + 0x7d8: 0x00c3, 0x7d9: 0x00c3, 0x7da: 0x00c3, 0x7db: 0x00c3, 0x7dc: 0x00c3, 0x7dd: 0x00c3, + 0x7de: 0x00c3, 0x7df: 0x00c3, 0x7e0: 0x00c3, 0x7e1: 0x00c3, 0x7e2: 0x0040, 0x7e3: 0x00c3, + 0x7e4: 0x00c3, 0x7e5: 0x00c3, 0x7e6: 0x00c3, 0x7e7: 0x00c3, 0x7e8: 0x00c3, 0x7e9: 0x00c3, + 0x7ea: 0x00c3, 0x7eb: 0x00c3, 0x7ec: 0x00c3, 0x7ed: 0x00c3, 0x7ee: 0x00c3, 0x7ef: 0x00c3, + 0x7f0: 0x00c3, 0x7f1: 0x00c3, 0x7f2: 0x00c3, 0x7f3: 0x00c3, 0x7f4: 0x00c3, 0x7f5: 0x00c3, + 0x7f6: 0x00c3, 0x7f7: 0x00c3, 0x7f8: 0x00c3, 0x7f9: 0x00c3, 0x7fa: 0x00c3, 0x7fb: 0x00c3, + 0x7fc: 0x00c3, 0x7fd: 0x00c3, 0x7fe: 0x00c3, 0x7ff: 0x00c3, + // Block 0x20, offset 0x800 + 0x800: 0x00c3, 0x801: 0x00c3, 0x802: 0x00c3, 0x803: 0x00c0, 0x804: 0x00c0, 0x805: 0x00c0, + 0x806: 0x00c0, 0x807: 0x00c0, 0x808: 0x00c0, 0x809: 0x00c0, 0x80a: 0x00c0, 0x80b: 0x00c0, + 0x80c: 0x00c0, 0x80d: 0x00c0, 0x80e: 0x00c0, 0x80f: 0x00c0, 0x810: 0x00c0, 0x811: 0x00c0, + 0x812: 0x00c0, 0x813: 0x00c0, 0x814: 0x00c0, 0x815: 0x00c0, 0x816: 0x00c0, 0x817: 0x00c0, + 0x818: 0x00c0, 0x819: 0x00c0, 0x81a: 0x00c0, 0x81b: 0x00c0, 0x81c: 0x00c0, 0x81d: 0x00c0, + 0x81e: 0x00c0, 0x81f: 0x00c0, 0x820: 0x00c0, 0x821: 0x00c0, 0x822: 0x00c0, 0x823: 0x00c0, + 0x824: 0x00c0, 0x825: 0x00c0, 0x826: 0x00c0, 0x827: 0x00c0, 0x828: 0x00c0, 0x829: 0x00c0, + 0x82a: 0x00c0, 0x82b: 0x00c0, 0x82c: 0x00c0, 0x82d: 0x00c0, 0x82e: 0x00c0, 0x82f: 0x00c0, + 0x830: 0x00c0, 0x831: 0x00c0, 0x832: 0x00c0, 0x833: 0x00c0, 0x834: 0x00c0, 0x835: 0x00c0, + 0x836: 0x00c0, 0x837: 0x00c0, 0x838: 0x00c0, 0x839: 0x00c0, 0x83a: 0x00c3, 0x83b: 0x00c0, + 0x83c: 0x00c3, 0x83d: 0x00c0, 0x83e: 0x00c0, 0x83f: 0x00c0, + // Block 0x21, offset 0x840 + 0x840: 0x00c0, 0x841: 0x00c3, 0x842: 0x00c3, 0x843: 0x00c3, 0x844: 0x00c3, 0x845: 0x00c3, + 0x846: 0x00c3, 0x847: 0x00c3, 0x848: 0x00c3, 0x849: 0x00c0, 0x84a: 0x00c0, 0x84b: 0x00c0, + 0x84c: 0x00c0, 0x84d: 0x00c6, 0x84e: 0x00c0, 0x84f: 0x00c0, 0x850: 0x00c0, 0x851: 0x00c3, + 0x852: 0x00c3, 0x853: 0x00c3, 0x854: 0x00c3, 0x855: 0x00c3, 0x856: 0x00c3, 0x857: 0x00c3, + 0x858: 0x0080, 0x859: 0x0080, 0x85a: 0x0080, 0x85b: 0x0080, 0x85c: 0x0080, 0x85d: 0x0080, + 0x85e: 0x0080, 0x85f: 0x0080, 0x860: 0x00c0, 0x861: 0x00c0, 0x862: 0x00c3, 0x863: 0x00c3, + 0x864: 0x0080, 0x865: 0x0080, 0x866: 0x00c0, 0x867: 0x00c0, 0x868: 0x00c0, 0x869: 0x00c0, + 0x86a: 0x00c0, 0x86b: 0x00c0, 0x86c: 0x00c0, 0x86d: 0x00c0, 0x86e: 0x00c0, 0x86f: 0x00c0, + 0x870: 0x0080, 0x871: 0x00c0, 0x872: 0x00c0, 0x873: 0x00c0, 0x874: 0x00c0, 0x875: 0x00c0, + 0x876: 0x00c0, 0x877: 0x00c0, 0x878: 0x00c0, 0x879: 0x00c0, 0x87a: 0x00c0, 0x87b: 0x00c0, + 0x87c: 0x00c0, 0x87d: 0x00c0, 0x87e: 0x00c0, 0x87f: 0x00c0, + // Block 0x22, offset 0x880 + 0x880: 0x00c0, 0x881: 0x00c3, 0x882: 0x00c0, 0x883: 0x00c0, 0x885: 0x00c0, + 0x886: 0x00c0, 0x887: 0x00c0, 0x888: 0x00c0, 0x889: 0x00c0, 0x88a: 0x00c0, 0x88b: 0x00c0, + 0x88c: 0x00c0, 0x88f: 0x00c0, 0x890: 0x00c0, + 0x893: 0x00c0, 0x894: 0x00c0, 0x895: 0x00c0, 0x896: 0x00c0, 0x897: 0x00c0, + 0x898: 0x00c0, 0x899: 0x00c0, 0x89a: 0x00c0, 0x89b: 0x00c0, 0x89c: 0x00c0, 0x89d: 0x00c0, + 0x89e: 0x00c0, 0x89f: 0x00c0, 0x8a0: 0x00c0, 0x8a1: 0x00c0, 0x8a2: 0x00c0, 0x8a3: 0x00c0, + 0x8a4: 0x00c0, 0x8a5: 0x00c0, 0x8a6: 0x00c0, 0x8a7: 0x00c0, 0x8a8: 0x00c0, + 0x8aa: 0x00c0, 0x8ab: 0x00c0, 0x8ac: 0x00c0, 0x8ad: 0x00c0, 0x8ae: 0x00c0, 0x8af: 0x00c0, + 0x8b0: 0x00c0, 0x8b2: 0x00c0, + 0x8b6: 0x00c0, 0x8b7: 0x00c0, 0x8b8: 0x00c0, 0x8b9: 0x00c0, + 0x8bc: 0x00c3, 0x8bd: 0x00c0, 0x8be: 0x00c0, 0x8bf: 0x00c0, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x00c0, 0x8c1: 0x00c3, 0x8c2: 0x00c3, 0x8c3: 0x00c3, 0x8c4: 0x00c3, + 0x8c7: 0x00c0, 0x8c8: 0x00c0, 0x8cb: 0x00c0, + 0x8cc: 0x00c0, 0x8cd: 0x00c6, 0x8ce: 0x00c0, + 0x8d7: 0x00c0, + 0x8dc: 0x0080, 0x8dd: 0x0080, + 0x8df: 0x0080, 0x8e0: 0x00c0, 0x8e1: 0x00c0, 0x8e2: 0x00c3, 0x8e3: 0x00c3, + 0x8e6: 0x00c0, 0x8e7: 0x00c0, 0x8e8: 0x00c0, 0x8e9: 0x00c0, + 0x8ea: 0x00c0, 0x8eb: 0x00c0, 0x8ec: 0x00c0, 0x8ed: 0x00c0, 0x8ee: 0x00c0, 0x8ef: 0x00c0, + 0x8f0: 0x00c0, 0x8f1: 0x00c0, 0x8f2: 0x0080, 0x8f3: 0x0080, 0x8f4: 0x0080, 0x8f5: 0x0080, + 0x8f6: 0x0080, 0x8f7: 0x0080, 0x8f8: 0x0080, 0x8f9: 0x0080, 0x8fa: 0x0080, 0x8fb: 0x0080, + // Block 0x24, offset 0x900 + 0x901: 0x00c3, 0x902: 0x00c3, 0x903: 0x00c0, 0x905: 0x00c0, + 0x906: 0x00c0, 0x907: 0x00c0, 0x908: 0x00c0, 0x909: 0x00c0, 0x90a: 0x00c0, + 0x90f: 0x00c0, 0x910: 0x00c0, + 0x913: 0x00c0, 0x914: 0x00c0, 0x915: 0x00c0, 0x916: 0x00c0, 0x917: 0x00c0, + 0x918: 0x00c0, 0x919: 0x00c0, 0x91a: 0x00c0, 0x91b: 0x00c0, 0x91c: 0x00c0, 0x91d: 0x00c0, + 0x91e: 0x00c0, 0x91f: 0x00c0, 0x920: 0x00c0, 0x921: 0x00c0, 0x922: 0x00c0, 0x923: 0x00c0, + 0x924: 0x00c0, 0x925: 0x00c0, 0x926: 0x00c0, 0x927: 0x00c0, 0x928: 0x00c0, + 0x92a: 0x00c0, 0x92b: 0x00c0, 0x92c: 0x00c0, 0x92d: 0x00c0, 0x92e: 0x00c0, 0x92f: 0x00c0, + 0x930: 0x00c0, 0x932: 0x00c0, 0x933: 0x0080, 0x935: 0x00c0, + 0x936: 0x0080, 0x938: 0x00c0, 0x939: 0x00c0, + 0x93c: 0x00c3, 0x93e: 0x00c0, 0x93f: 0x00c0, + // Block 0x25, offset 0x940 + 0x940: 0x00c0, 0x941: 0x00c3, 0x942: 0x00c3, + 0x947: 0x00c3, 0x948: 0x00c3, 0x94b: 0x00c3, + 0x94c: 0x00c3, 0x94d: 0x00c6, 0x951: 0x00c3, + 0x959: 0x0080, 0x95a: 0x0080, 0x95b: 0x0080, 0x95c: 0x00c0, + 0x95e: 0x0080, + 0x966: 0x00c0, 0x967: 0x00c0, 0x968: 0x00c0, 0x969: 0x00c0, + 0x96a: 0x00c0, 0x96b: 0x00c0, 0x96c: 0x00c0, 0x96d: 0x00c0, 0x96e: 0x00c0, 0x96f: 0x00c0, + 0x970: 0x00c3, 0x971: 0x00c3, 0x972: 0x00c0, 0x973: 0x00c0, 0x974: 0x00c0, 0x975: 0x00c3, + // Block 0x26, offset 0x980 + 0x981: 0x00c3, 0x982: 0x00c3, 0x983: 0x00c0, 0x985: 0x00c0, + 0x986: 0x00c0, 0x987: 0x00c0, 0x988: 0x00c0, 0x989: 0x00c0, 0x98a: 0x00c0, 0x98b: 0x00c0, + 0x98c: 0x00c0, 0x98d: 0x00c0, 0x98f: 0x00c0, 0x990: 0x00c0, 0x991: 0x00c0, + 0x993: 0x00c0, 0x994: 0x00c0, 0x995: 0x00c0, 0x996: 0x00c0, 0x997: 0x00c0, + 0x998: 0x00c0, 0x999: 0x00c0, 0x99a: 0x00c0, 0x99b: 0x00c0, 0x99c: 0x00c0, 0x99d: 0x00c0, + 0x99e: 0x00c0, 0x99f: 0x00c0, 0x9a0: 0x00c0, 0x9a1: 0x00c0, 0x9a2: 0x00c0, 0x9a3: 0x00c0, + 0x9a4: 0x00c0, 0x9a5: 0x00c0, 0x9a6: 0x00c0, 0x9a7: 0x00c0, 0x9a8: 0x00c0, + 0x9aa: 0x00c0, 0x9ab: 0x00c0, 0x9ac: 0x00c0, 0x9ad: 0x00c0, 0x9ae: 0x00c0, 0x9af: 0x00c0, + 0x9b0: 0x00c0, 0x9b2: 0x00c0, 0x9b3: 0x00c0, 0x9b5: 0x00c0, + 0x9b6: 0x00c0, 0x9b7: 0x00c0, 0x9b8: 0x00c0, 0x9b9: 0x00c0, + 0x9bc: 0x00c3, 0x9bd: 0x00c0, 0x9be: 0x00c0, 0x9bf: 0x00c0, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x00c0, 0x9c1: 0x00c3, 0x9c2: 0x00c3, 0x9c3: 0x00c3, 0x9c4: 0x00c3, 0x9c5: 0x00c3, + 0x9c7: 0x00c3, 0x9c8: 0x00c3, 0x9c9: 0x00c0, 0x9cb: 0x00c0, + 0x9cc: 0x00c0, 0x9cd: 0x00c6, 0x9d0: 0x00c0, + 0x9e0: 0x00c0, 0x9e1: 0x00c0, 0x9e2: 0x00c3, 0x9e3: 0x00c3, + 0x9e6: 0x00c0, 0x9e7: 0x00c0, 0x9e8: 0x00c0, 0x9e9: 0x00c0, + 0x9ea: 0x00c0, 0x9eb: 0x00c0, 0x9ec: 0x00c0, 0x9ed: 0x00c0, 0x9ee: 0x00c0, 0x9ef: 0x00c0, + 0x9f0: 0x0080, 0x9f1: 0x0080, + 0x9f9: 0x00c0, + // Block 0x28, offset 0xa00 + 0xa01: 0x00c3, 0xa02: 0x00c0, 0xa03: 0x00c0, 0xa05: 0x00c0, + 0xa06: 0x00c0, 0xa07: 0x00c0, 0xa08: 0x00c0, 0xa09: 0x00c0, 0xa0a: 0x00c0, 0xa0b: 0x00c0, + 0xa0c: 0x00c0, 0xa0f: 0x00c0, 0xa10: 0x00c0, + 0xa13: 0x00c0, 0xa14: 0x00c0, 0xa15: 0x00c0, 0xa16: 0x00c0, 0xa17: 0x00c0, + 0xa18: 0x00c0, 0xa19: 0x00c0, 0xa1a: 0x00c0, 0xa1b: 0x00c0, 0xa1c: 0x00c0, 0xa1d: 0x00c0, + 0xa1e: 0x00c0, 0xa1f: 0x00c0, 0xa20: 0x00c0, 0xa21: 0x00c0, 0xa22: 0x00c0, 0xa23: 0x00c0, + 0xa24: 0x00c0, 0xa25: 0x00c0, 0xa26: 0x00c0, 0xa27: 0x00c0, 0xa28: 0x00c0, + 0xa2a: 0x00c0, 0xa2b: 0x00c0, 0xa2c: 0x00c0, 0xa2d: 0x00c0, 0xa2e: 0x00c0, 0xa2f: 0x00c0, + 0xa30: 0x00c0, 0xa32: 0x00c0, 0xa33: 0x00c0, 0xa35: 0x00c0, + 0xa36: 0x00c0, 0xa37: 0x00c0, 0xa38: 0x00c0, 0xa39: 0x00c0, + 0xa3c: 0x00c3, 0xa3d: 0x00c0, 0xa3e: 0x00c0, 0xa3f: 0x00c3, + // Block 0x29, offset 0xa40 + 0xa40: 0x00c0, 0xa41: 0x00c3, 0xa42: 0x00c3, 0xa43: 0x00c3, 0xa44: 0x00c3, + 0xa47: 0x00c0, 0xa48: 0x00c0, 0xa4b: 0x00c0, + 0xa4c: 0x00c0, 0xa4d: 0x00c6, + 0xa56: 0x00c3, 0xa57: 0x00c0, + 0xa5c: 0x0080, 0xa5d: 0x0080, + 0xa5f: 0x00c0, 0xa60: 0x00c0, 0xa61: 0x00c0, 0xa62: 0x00c3, 0xa63: 0x00c3, + 0xa66: 0x00c0, 0xa67: 0x00c0, 0xa68: 0x00c0, 0xa69: 0x00c0, + 0xa6a: 0x00c0, 0xa6b: 0x00c0, 0xa6c: 0x00c0, 0xa6d: 0x00c0, 0xa6e: 0x00c0, 0xa6f: 0x00c0, + 0xa70: 0x0080, 0xa71: 0x00c0, 0xa72: 0x0080, 0xa73: 0x0080, 0xa74: 0x0080, 0xa75: 0x0080, + 0xa76: 0x0080, 0xa77: 0x0080, + // Block 0x2a, offset 0xa80 + 0xa82: 0x00c3, 0xa83: 0x00c0, 0xa85: 0x00c0, + 0xa86: 0x00c0, 0xa87: 0x00c0, 0xa88: 0x00c0, 0xa89: 0x00c0, 0xa8a: 0x00c0, + 0xa8e: 0x00c0, 0xa8f: 0x00c0, 0xa90: 0x00c0, + 0xa92: 0x00c0, 0xa93: 0x00c0, 0xa94: 0x00c0, 0xa95: 0x00c0, + 0xa99: 0x00c0, 0xa9a: 0x00c0, 0xa9c: 0x00c0, + 0xa9e: 0x00c0, 0xa9f: 0x00c0, 0xaa3: 0x00c0, + 0xaa4: 0x00c0, 0xaa8: 0x00c0, 0xaa9: 0x00c0, + 0xaaa: 0x00c0, 0xaae: 0x00c0, 0xaaf: 0x00c0, + 0xab0: 0x00c0, 0xab1: 0x00c0, 0xab2: 0x00c0, 0xab3: 0x00c0, 0xab4: 0x00c0, 0xab5: 0x00c0, + 0xab6: 0x00c0, 0xab7: 0x00c0, 0xab8: 0x00c0, 0xab9: 0x00c0, + 0xabe: 0x00c0, 0xabf: 0x00c0, + // Block 0x2b, offset 0xac0 + 0xac0: 0x00c3, 0xac1: 0x00c0, 0xac2: 0x00c0, + 0xac6: 0x00c0, 0xac7: 0x00c0, 0xac8: 0x00c0, 0xaca: 0x00c0, 0xacb: 0x00c0, + 0xacc: 0x00c0, 0xacd: 0x00c6, 0xad0: 0x00c0, + 0xad7: 0x00c0, + 0xae6: 0x00c0, 0xae7: 0x00c0, 0xae8: 0x00c0, 0xae9: 0x00c0, + 0xaea: 0x00c0, 0xaeb: 0x00c0, 0xaec: 0x00c0, 0xaed: 0x00c0, 0xaee: 0x00c0, 0xaef: 0x00c0, + 0xaf0: 0x0080, 0xaf1: 0x0080, 0xaf2: 0x0080, 0xaf3: 0x0080, 0xaf4: 0x0080, 0xaf5: 0x0080, + 0xaf6: 0x0080, 0xaf7: 0x0080, 0xaf8: 0x0080, 0xaf9: 0x0080, 0xafa: 0x0080, + // Block 0x2c, offset 0xb00 + 0xb00: 0x00c3, 0xb01: 0x00c0, 0xb02: 0x00c0, 0xb03: 0x00c0, 0xb05: 0x00c0, + 0xb06: 0x00c0, 0xb07: 0x00c0, 0xb08: 0x00c0, 0xb09: 0x00c0, 0xb0a: 0x00c0, 0xb0b: 0x00c0, + 0xb0c: 0x00c0, 0xb0e: 0x00c0, 0xb0f: 0x00c0, 0xb10: 0x00c0, + 0xb12: 0x00c0, 0xb13: 0x00c0, 0xb14: 0x00c0, 0xb15: 0x00c0, 0xb16: 0x00c0, 0xb17: 0x00c0, + 0xb18: 0x00c0, 0xb19: 0x00c0, 0xb1a: 0x00c0, 0xb1b: 0x00c0, 0xb1c: 0x00c0, 0xb1d: 0x00c0, + 0xb1e: 0x00c0, 0xb1f: 0x00c0, 0xb20: 0x00c0, 0xb21: 0x00c0, 0xb22: 0x00c0, 0xb23: 0x00c0, + 0xb24: 0x00c0, 0xb25: 0x00c0, 0xb26: 0x00c0, 0xb27: 0x00c0, 0xb28: 0x00c0, + 0xb2a: 0x00c0, 0xb2b: 0x00c0, 0xb2c: 0x00c0, 0xb2d: 0x00c0, 0xb2e: 0x00c0, 0xb2f: 0x00c0, + 0xb30: 0x00c0, 0xb31: 0x00c0, 0xb32: 0x00c0, 0xb33: 0x00c0, 0xb34: 0x00c0, 0xb35: 0x00c0, + 0xb36: 0x00c0, 0xb37: 0x00c0, 0xb38: 0x00c0, 0xb39: 0x00c0, + 0xb3d: 0x00c0, 0xb3e: 0x00c3, 0xb3f: 0x00c3, + // Block 0x2d, offset 0xb40 + 0xb40: 0x00c3, 0xb41: 0x00c0, 0xb42: 0x00c0, 0xb43: 0x00c0, 0xb44: 0x00c0, + 0xb46: 0x00c3, 0xb47: 0x00c3, 0xb48: 0x00c3, 0xb4a: 0x00c3, 0xb4b: 0x00c3, + 0xb4c: 0x00c3, 0xb4d: 0x00c6, + 0xb55: 0x00c3, 0xb56: 0x00c3, + 0xb58: 0x00c0, 0xb59: 0x00c0, 0xb5a: 0x00c0, + 0xb60: 0x00c0, 0xb61: 0x00c0, 0xb62: 0x00c3, 0xb63: 0x00c3, + 0xb66: 0x00c0, 0xb67: 0x00c0, 0xb68: 0x00c0, 0xb69: 0x00c0, + 0xb6a: 0x00c0, 0xb6b: 0x00c0, 0xb6c: 0x00c0, 0xb6d: 0x00c0, 0xb6e: 0x00c0, 0xb6f: 0x00c0, + 0xb78: 0x0080, 0xb79: 0x0080, 0xb7a: 0x0080, 0xb7b: 0x0080, + 0xb7c: 0x0080, 0xb7d: 0x0080, 0xb7e: 0x0080, 0xb7f: 0x0080, + // Block 0x2e, offset 0xb80 + 0xb80: 0x00c0, 0xb81: 0x00c3, 0xb82: 0x00c0, 0xb83: 0x00c0, 0xb85: 0x00c0, + 0xb86: 0x00c0, 0xb87: 0x00c0, 0xb88: 0x00c0, 0xb89: 0x00c0, 0xb8a: 0x00c0, 0xb8b: 0x00c0, + 0xb8c: 0x00c0, 0xb8e: 0x00c0, 0xb8f: 0x00c0, 0xb90: 0x00c0, + 0xb92: 0x00c0, 0xb93: 0x00c0, 0xb94: 0x00c0, 0xb95: 0x00c0, 0xb96: 0x00c0, 0xb97: 0x00c0, + 0xb98: 0x00c0, 0xb99: 0x00c0, 0xb9a: 0x00c0, 0xb9b: 0x00c0, 0xb9c: 0x00c0, 0xb9d: 0x00c0, + 0xb9e: 0x00c0, 0xb9f: 0x00c0, 0xba0: 0x00c0, 0xba1: 0x00c0, 0xba2: 0x00c0, 0xba3: 0x00c0, + 0xba4: 0x00c0, 0xba5: 0x00c0, 0xba6: 0x00c0, 0xba7: 0x00c0, 0xba8: 0x00c0, + 0xbaa: 0x00c0, 0xbab: 0x00c0, 0xbac: 0x00c0, 0xbad: 0x00c0, 0xbae: 0x00c0, 0xbaf: 0x00c0, + 0xbb0: 0x00c0, 0xbb1: 0x00c0, 0xbb2: 0x00c0, 0xbb3: 0x00c0, 0xbb5: 0x00c0, + 0xbb6: 0x00c0, 0xbb7: 0x00c0, 0xbb8: 0x00c0, 0xbb9: 0x00c0, + 0xbbc: 0x00c3, 0xbbd: 0x00c0, 0xbbe: 0x00c0, 0xbbf: 0x00c3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x00c0, 0xbc1: 0x00c0, 0xbc2: 0x00c0, 0xbc3: 0x00c0, 0xbc4: 0x00c0, + 0xbc6: 0x00c3, 0xbc7: 0x00c0, 0xbc8: 0x00c0, 0xbca: 0x00c0, 0xbcb: 0x00c0, + 0xbcc: 0x00c3, 0xbcd: 0x00c6, + 0xbd5: 0x00c0, 0xbd6: 0x00c0, + 0xbde: 0x00c0, 0xbe0: 0x00c0, 0xbe1: 0x00c0, 0xbe2: 0x00c3, 0xbe3: 0x00c3, + 0xbe6: 0x00c0, 0xbe7: 0x00c0, 0xbe8: 0x00c0, 0xbe9: 0x00c0, + 0xbea: 0x00c0, 0xbeb: 0x00c0, 0xbec: 0x00c0, 0xbed: 0x00c0, 0xbee: 0x00c0, 0xbef: 0x00c0, + 0xbf1: 0x00c0, 0xbf2: 0x00c0, + // Block 0x30, offset 0xc00 + 0xc01: 0x00c3, 0xc02: 0x00c0, 0xc03: 0x00c0, 0xc05: 0x00c0, + 0xc06: 0x00c0, 0xc07: 0x00c0, 0xc08: 0x00c0, 0xc09: 0x00c0, 0xc0a: 0x00c0, 0xc0b: 0x00c0, + 0xc0c: 0x00c0, 0xc0e: 0x00c0, 0xc0f: 0x00c0, 0xc10: 0x00c0, + 0xc12: 0x00c0, 0xc13: 0x00c0, 0xc14: 0x00c0, 0xc15: 0x00c0, 0xc16: 0x00c0, 0xc17: 0x00c0, + 0xc18: 0x00c0, 0xc19: 0x00c0, 0xc1a: 0x00c0, 0xc1b: 0x00c0, 0xc1c: 0x00c0, 0xc1d: 0x00c0, + 0xc1e: 0x00c0, 0xc1f: 0x00c0, 0xc20: 0x00c0, 0xc21: 0x00c0, 0xc22: 0x00c0, 0xc23: 0x00c0, + 0xc24: 0x00c0, 0xc25: 0x00c0, 0xc26: 0x00c0, 0xc27: 0x00c0, 0xc28: 0x00c0, 0xc29: 0x00c0, + 0xc2a: 0x00c0, 0xc2b: 0x00c0, 0xc2c: 0x00c0, 0xc2d: 0x00c0, 0xc2e: 0x00c0, 0xc2f: 0x00c0, + 0xc30: 0x00c0, 0xc31: 0x00c0, 0xc32: 0x00c0, 0xc33: 0x00c0, 0xc34: 0x00c0, 0xc35: 0x00c0, + 0xc36: 0x00c0, 0xc37: 0x00c0, 0xc38: 0x00c0, 0xc39: 0x00c0, 0xc3a: 0x00c0, + 0xc3d: 0x00c0, 0xc3e: 0x00c0, 0xc3f: 0x00c0, + // Block 0x31, offset 0xc40 + 0xc40: 0x00c0, 0xc41: 0x00c3, 0xc42: 0x00c3, 0xc43: 0x00c3, 0xc44: 0x00c3, + 0xc46: 0x00c0, 0xc47: 0x00c0, 0xc48: 0x00c0, 0xc4a: 0x00c0, 0xc4b: 0x00c0, + 0xc4c: 0x00c0, 0xc4d: 0x00c6, 0xc4e: 0x00c0, 0xc4f: 0x0080, + 0xc54: 0x00c0, 0xc55: 0x00c0, 0xc56: 0x00c0, 0xc57: 0x00c0, + 0xc58: 0x0080, 0xc59: 0x0080, 0xc5a: 0x0080, 0xc5b: 0x0080, 0xc5c: 0x0080, 0xc5d: 0x0080, + 0xc5e: 0x0080, 0xc5f: 0x00c0, 0xc60: 0x00c0, 0xc61: 0x00c0, 0xc62: 0x00c3, 0xc63: 0x00c3, + 0xc66: 0x00c0, 0xc67: 0x00c0, 0xc68: 0x00c0, 0xc69: 0x00c0, + 0xc6a: 0x00c0, 0xc6b: 0x00c0, 0xc6c: 0x00c0, 0xc6d: 0x00c0, 0xc6e: 0x00c0, 0xc6f: 0x00c0, + 0xc70: 0x0080, 0xc71: 0x0080, 0xc72: 0x0080, 0xc73: 0x0080, 0xc74: 0x0080, 0xc75: 0x0080, + 0xc76: 0x0080, 0xc77: 0x0080, 0xc78: 0x0080, 0xc79: 0x0080, 0xc7a: 0x00c0, 0xc7b: 0x00c0, + 0xc7c: 0x00c0, 0xc7d: 0x00c0, 0xc7e: 0x00c0, 0xc7f: 0x00c0, + // Block 0x32, offset 0xc80 + 0xc82: 0x00c0, 0xc83: 0x00c0, 0xc85: 0x00c0, + 0xc86: 0x00c0, 0xc87: 0x00c0, 0xc88: 0x00c0, 0xc89: 0x00c0, 0xc8a: 0x00c0, 0xc8b: 0x00c0, + 0xc8c: 0x00c0, 0xc8d: 0x00c0, 0xc8e: 0x00c0, 0xc8f: 0x00c0, 0xc90: 0x00c0, 0xc91: 0x00c0, + 0xc92: 0x00c0, 0xc93: 0x00c0, 0xc94: 0x00c0, 0xc95: 0x00c0, 0xc96: 0x00c0, + 0xc9a: 0x00c0, 0xc9b: 0x00c0, 0xc9c: 0x00c0, 0xc9d: 0x00c0, + 0xc9e: 0x00c0, 0xc9f: 0x00c0, 0xca0: 0x00c0, 0xca1: 0x00c0, 0xca2: 0x00c0, 0xca3: 0x00c0, + 0xca4: 0x00c0, 0xca5: 0x00c0, 0xca6: 0x00c0, 0xca7: 0x00c0, 0xca8: 0x00c0, 0xca9: 0x00c0, + 0xcaa: 0x00c0, 0xcab: 0x00c0, 0xcac: 0x00c0, 0xcad: 0x00c0, 0xcae: 0x00c0, 0xcaf: 0x00c0, + 0xcb0: 0x00c0, 0xcb1: 0x00c0, 0xcb3: 0x00c0, 0xcb4: 0x00c0, 0xcb5: 0x00c0, + 0xcb6: 0x00c0, 0xcb7: 0x00c0, 0xcb8: 0x00c0, 0xcb9: 0x00c0, 0xcba: 0x00c0, 0xcbb: 0x00c0, + 0xcbd: 0x00c0, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x00c0, 0xcc1: 0x00c0, 0xcc2: 0x00c0, 0xcc3: 0x00c0, 0xcc4: 0x00c0, 0xcc5: 0x00c0, + 0xcc6: 0x00c0, 0xcca: 0x00c6, + 0xccf: 0x00c0, 0xcd0: 0x00c0, 0xcd1: 0x00c0, + 0xcd2: 0x00c3, 0xcd3: 0x00c3, 0xcd4: 0x00c3, 0xcd6: 0x00c3, + 0xcd8: 0x00c0, 0xcd9: 0x00c0, 0xcda: 0x00c0, 0xcdb: 0x00c0, 0xcdc: 0x00c0, 0xcdd: 0x00c0, + 0xcde: 0x00c0, 0xcdf: 0x00c0, + 0xce6: 0x00c0, 0xce7: 0x00c0, 0xce8: 0x00c0, 0xce9: 0x00c0, + 0xcea: 0x00c0, 0xceb: 0x00c0, 0xcec: 0x00c0, 0xced: 0x00c0, 0xcee: 0x00c0, 0xcef: 0x00c0, + 0xcf2: 0x00c0, 0xcf3: 0x00c0, 0xcf4: 0x0080, + // Block 0x34, offset 0xd00 + 0xd01: 0x00c0, 0xd02: 0x00c0, 0xd03: 0x00c0, 0xd04: 0x00c0, 0xd05: 0x00c0, + 0xd06: 0x00c0, 0xd07: 0x00c0, 0xd08: 0x00c0, 0xd09: 0x00c0, 0xd0a: 0x00c0, 0xd0b: 0x00c0, + 0xd0c: 0x00c0, 0xd0d: 0x00c0, 0xd0e: 0x00c0, 0xd0f: 0x00c0, 0xd10: 0x00c0, 0xd11: 0x00c0, + 0xd12: 0x00c0, 0xd13: 0x00c0, 0xd14: 0x00c0, 0xd15: 0x00c0, 0xd16: 0x00c0, 0xd17: 0x00c0, + 0xd18: 0x00c0, 0xd19: 0x00c0, 0xd1a: 0x00c0, 0xd1b: 0x00c0, 0xd1c: 0x00c0, 0xd1d: 0x00c0, + 0xd1e: 0x00c0, 0xd1f: 0x00c0, 0xd20: 0x00c0, 0xd21: 0x00c0, 0xd22: 0x00c0, 0xd23: 0x00c0, + 0xd24: 0x00c0, 0xd25: 0x00c0, 0xd26: 0x00c0, 0xd27: 0x00c0, 0xd28: 0x00c0, 0xd29: 0x00c0, + 0xd2a: 0x00c0, 0xd2b: 0x00c0, 0xd2c: 0x00c0, 0xd2d: 0x00c0, 0xd2e: 0x00c0, 0xd2f: 0x00c0, + 0xd30: 0x00c0, 0xd31: 0x00c3, 0xd32: 0x00c0, 0xd33: 0x0080, 0xd34: 0x00c3, 0xd35: 0x00c3, + 0xd36: 0x00c3, 0xd37: 0x00c3, 0xd38: 0x00c3, 0xd39: 0x00c3, 0xd3a: 0x00c6, + 0xd3f: 0x0080, + // Block 0x35, offset 0xd40 + 0xd40: 0x00c0, 0xd41: 0x00c0, 0xd42: 0x00c0, 0xd43: 0x00c0, 0xd44: 0x00c0, 0xd45: 0x00c0, + 0xd46: 0x00c0, 0xd47: 0x00c3, 0xd48: 0x00c3, 0xd49: 0x00c3, 0xd4a: 0x00c3, 0xd4b: 0x00c3, + 0xd4c: 0x00c3, 0xd4d: 0x00c3, 0xd4e: 0x00c3, 0xd4f: 0x0080, 0xd50: 0x00c0, 0xd51: 0x00c0, + 0xd52: 0x00c0, 0xd53: 0x00c0, 0xd54: 0x00c0, 0xd55: 0x00c0, 0xd56: 0x00c0, 0xd57: 0x00c0, + 0xd58: 0x00c0, 0xd59: 0x00c0, 0xd5a: 0x0080, 0xd5b: 0x0080, + // Block 0x36, offset 0xd80 + 0xd81: 0x00c0, 0xd82: 0x00c0, 0xd84: 0x00c0, + 0xd87: 0x00c0, 0xd88: 0x00c0, 0xd8a: 0x00c0, + 0xd8d: 0x00c0, + 0xd94: 0x00c0, 0xd95: 0x00c0, 0xd96: 0x00c0, 0xd97: 0x00c0, + 0xd99: 0x00c0, 0xd9a: 0x00c0, 0xd9b: 0x00c0, 0xd9c: 0x00c0, 0xd9d: 0x00c0, + 0xd9e: 0x00c0, 0xd9f: 0x00c0, 0xda1: 0x00c0, 0xda2: 0x00c0, 0xda3: 0x00c0, + 0xda5: 0x00c0, 0xda7: 0x00c0, + 0xdaa: 0x00c0, 0xdab: 0x00c0, 0xdad: 0x00c0, 0xdae: 0x00c0, 0xdaf: 0x00c0, + 0xdb0: 0x00c0, 0xdb1: 0x00c3, 0xdb2: 0x00c0, 0xdb3: 0x0080, 0xdb4: 0x00c3, 0xdb5: 0x00c3, + 0xdb6: 0x00c3, 0xdb7: 0x00c3, 0xdb8: 0x00c3, 0xdb9: 0x00c3, 0xdbb: 0x00c3, + 0xdbc: 0x00c3, 0xdbd: 0x00c0, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x00c0, 0xdc1: 0x00c0, 0xdc2: 0x00c0, 0xdc3: 0x00c0, 0xdc4: 0x00c0, + 0xdc6: 0x00c0, 0xdc8: 0x00c3, 0xdc9: 0x00c3, 0xdca: 0x00c3, 0xdcb: 0x00c3, + 0xdcc: 0x00c3, 0xdcd: 0x00c3, 0xdd0: 0x00c0, 0xdd1: 0x00c0, + 0xdd2: 0x00c0, 0xdd3: 0x00c0, 0xdd4: 0x00c0, 0xdd5: 0x00c0, 0xdd6: 0x00c0, 0xdd7: 0x00c0, + 0xdd8: 0x00c0, 0xdd9: 0x00c0, 0xddc: 0x0080, 0xddd: 0x0080, + 0xdde: 0x00c0, 0xddf: 0x00c0, + // Block 0x38, offset 0xe00 + 0xe00: 0x00c0, 0xe01: 0x0080, 0xe02: 0x0080, 0xe03: 0x0080, 0xe04: 0x0080, 0xe05: 0x0080, + 0xe06: 0x0080, 0xe07: 0x0080, 0xe08: 0x0080, 0xe09: 0x0080, 0xe0a: 0x0080, 0xe0b: 0x00c0, + 0xe0c: 0x0080, 0xe0d: 0x0080, 0xe0e: 0x0080, 0xe0f: 0x0080, 0xe10: 0x0080, 0xe11: 0x0080, + 0xe12: 0x0080, 0xe13: 0x0080, 0xe14: 0x0080, 0xe15: 0x0080, 0xe16: 0x0080, 0xe17: 0x0080, + 0xe18: 0x00c3, 0xe19: 0x00c3, 0xe1a: 0x0080, 0xe1b: 0x0080, 0xe1c: 0x0080, 0xe1d: 0x0080, + 0xe1e: 0x0080, 0xe1f: 0x0080, 0xe20: 0x00c0, 0xe21: 0x00c0, 0xe22: 0x00c0, 0xe23: 0x00c0, + 0xe24: 0x00c0, 0xe25: 0x00c0, 0xe26: 0x00c0, 0xe27: 0x00c0, 0xe28: 0x00c0, 0xe29: 0x00c0, + 0xe2a: 0x0080, 0xe2b: 0x0080, 0xe2c: 0x0080, 0xe2d: 0x0080, 0xe2e: 0x0080, 0xe2f: 0x0080, + 0xe30: 0x0080, 0xe31: 0x0080, 0xe32: 0x0080, 0xe33: 0x0080, 0xe34: 0x0080, 0xe35: 0x00c3, + 0xe36: 0x0080, 0xe37: 0x00c3, 0xe38: 0x0080, 0xe39: 0x00c3, 0xe3a: 0x0080, 0xe3b: 0x0080, + 0xe3c: 0x0080, 0xe3d: 0x0080, 0xe3e: 0x00c0, 0xe3f: 0x00c0, + // Block 0x39, offset 0xe40 + 0xe40: 0x00c0, 0xe41: 0x00c0, 0xe42: 0x00c0, 0xe43: 0x0080, 0xe44: 0x00c0, 0xe45: 0x00c0, + 0xe46: 0x00c0, 0xe47: 0x00c0, 0xe49: 0x00c0, 0xe4a: 0x00c0, 0xe4b: 0x00c0, + 0xe4c: 0x00c0, 0xe4d: 0x0080, 0xe4e: 0x00c0, 0xe4f: 0x00c0, 0xe50: 0x00c0, 0xe51: 0x00c0, + 0xe52: 0x0080, 0xe53: 0x00c0, 0xe54: 0x00c0, 0xe55: 0x00c0, 0xe56: 0x00c0, 0xe57: 0x0080, + 0xe58: 0x00c0, 0xe59: 0x00c0, 0xe5a: 0x00c0, 0xe5b: 0x00c0, 0xe5c: 0x0080, 0xe5d: 0x00c0, + 0xe5e: 0x00c0, 0xe5f: 0x00c0, 0xe60: 0x00c0, 0xe61: 0x00c0, 0xe62: 0x00c0, 0xe63: 0x00c0, + 0xe64: 0x00c0, 0xe65: 0x00c0, 0xe66: 0x00c0, 0xe67: 0x00c0, 0xe68: 0x00c0, 0xe69: 0x0080, + 0xe6a: 0x00c0, 0xe6b: 0x00c0, 0xe6c: 0x00c0, + 0xe71: 0x00c3, 0xe72: 0x00c3, 0xe73: 0x0083, 0xe74: 0x00c3, 0xe75: 0x0083, + 0xe76: 0x0083, 0xe77: 0x0083, 0xe78: 0x0083, 0xe79: 0x0083, 0xe7a: 0x00c3, 0xe7b: 0x00c3, + 0xe7c: 0x00c3, 0xe7d: 0x00c3, 0xe7e: 0x00c3, 0xe7f: 0x00c0, + // Block 0x3a, offset 0xe80 + 0xe80: 0x00c3, 0xe81: 0x0083, 0xe82: 0x00c3, 0xe83: 0x00c3, 0xe84: 0x00c6, 0xe85: 0x0080, + 0xe86: 0x00c3, 0xe87: 0x00c3, 0xe88: 0x00c0, 0xe89: 0x00c0, 0xe8a: 0x00c0, 0xe8b: 0x00c0, + 0xe8c: 0x00c0, 0xe8d: 0x00c3, 0xe8e: 0x00c3, 0xe8f: 0x00c3, 0xe90: 0x00c3, 0xe91: 0x00c3, + 0xe92: 0x00c3, 0xe93: 0x0083, 0xe94: 0x00c3, 0xe95: 0x00c3, 0xe96: 0x00c3, 0xe97: 0x00c3, + 0xe99: 0x00c3, 0xe9a: 0x00c3, 0xe9b: 0x00c3, 0xe9c: 0x00c3, 0xe9d: 0x0083, + 0xe9e: 0x00c3, 0xe9f: 0x00c3, 0xea0: 0x00c3, 0xea1: 0x00c3, 0xea2: 0x0083, 0xea3: 0x00c3, + 0xea4: 0x00c3, 0xea5: 0x00c3, 0xea6: 0x00c3, 0xea7: 0x0083, 0xea8: 0x00c3, 0xea9: 0x00c3, + 0xeaa: 0x00c3, 0xeab: 0x00c3, 0xeac: 0x0083, 0xead: 0x00c3, 0xeae: 0x00c3, 0xeaf: 0x00c3, + 0xeb0: 0x00c3, 0xeb1: 0x00c3, 0xeb2: 0x00c3, 0xeb3: 0x00c3, 0xeb4: 0x00c3, 0xeb5: 0x00c3, + 0xeb6: 0x00c3, 0xeb7: 0x00c3, 0xeb8: 0x00c3, 0xeb9: 0x0083, 0xeba: 0x00c3, 0xebb: 0x00c3, + 0xebc: 0x00c3, 0xebe: 0x0080, 0xebf: 0x0080, + // Block 0x3b, offset 0xec0 + 0xec0: 0x0080, 0xec1: 0x0080, 0xec2: 0x0080, 0xec3: 0x0080, 0xec4: 0x0080, 0xec5: 0x0080, + 0xec6: 0x00c3, 0xec7: 0x0080, 0xec8: 0x0080, 0xec9: 0x0080, 0xeca: 0x0080, 0xecb: 0x0080, + 0xecc: 0x0080, 0xece: 0x0080, 0xecf: 0x0080, 0xed0: 0x0080, 0xed1: 0x0080, + 0xed2: 0x0080, 0xed3: 0x0080, 0xed4: 0x0080, 0xed5: 0x0080, 0xed6: 0x0080, 0xed7: 0x0080, + 0xed8: 0x0080, 0xed9: 0x0080, 0xeda: 0x0080, + // Block 0x3c, offset 0xf00 + 0xf00: 0x00c0, 0xf01: 0x00c0, 0xf02: 0x00c0, 0xf03: 0x00c0, 0xf04: 0x00c0, 0xf05: 0x00c0, + 0xf06: 0x00c0, 0xf07: 0x00c0, 0xf08: 0x00c0, 0xf09: 0x00c0, 0xf0a: 0x00c0, 0xf0b: 0x00c0, + 0xf0c: 0x00c0, 0xf0d: 0x00c0, 0xf0e: 0x00c0, 0xf0f: 0x00c0, 0xf10: 0x00c0, 0xf11: 0x00c0, + 0xf12: 0x00c0, 0xf13: 0x00c0, 0xf14: 0x00c0, 0xf15: 0x00c0, 0xf16: 0x00c0, 0xf17: 0x00c0, + 0xf18: 0x00c0, 0xf19: 0x00c0, 0xf1a: 0x00c0, 0xf1b: 0x00c0, 0xf1c: 0x00c0, 0xf1d: 0x00c0, + 0xf1e: 0x00c0, 0xf1f: 0x00c0, 0xf20: 0x00c0, 0xf21: 0x00c0, 0xf22: 0x00c0, 0xf23: 0x00c0, + 0xf24: 0x00c0, 0xf25: 0x00c0, 0xf26: 0x00c0, 0xf27: 0x00c0, 0xf28: 0x00c0, 0xf29: 0x00c0, + 0xf2a: 0x00c0, 0xf2b: 0x00c0, 0xf2c: 0x00c0, 0xf2d: 0x00c3, 0xf2e: 0x00c3, 0xf2f: 0x00c3, + 0xf30: 0x00c3, 0xf31: 0x00c0, 0xf32: 0x00c3, 0xf33: 0x00c3, 0xf34: 0x00c3, 0xf35: 0x00c3, + 0xf36: 0x00c3, 0xf37: 0x00c3, 0xf38: 0x00c0, 0xf39: 0x00c6, 0xf3a: 0x00c6, 0xf3b: 0x00c0, + 0xf3c: 0x00c0, 0xf3d: 0x00c3, 0xf3e: 0x00c3, 0xf3f: 0x00c0, + // Block 0x3d, offset 0xf40 + 0xf40: 0x00c0, 0xf41: 0x00c0, 0xf42: 0x00c0, 0xf43: 0x00c0, 0xf44: 0x00c0, 0xf45: 0x00c0, + 0xf46: 0x00c0, 0xf47: 0x00c0, 0xf48: 0x00c0, 0xf49: 0x00c0, 0xf4a: 0x0080, 0xf4b: 0x0080, + 0xf4c: 0x0080, 0xf4d: 0x0080, 0xf4e: 0x0080, 0xf4f: 0x0080, 0xf50: 0x00c0, 0xf51: 0x00c0, + 0xf52: 0x00c0, 0xf53: 0x00c0, 0xf54: 0x00c0, 0xf55: 0x00c0, 0xf56: 0x00c0, 0xf57: 0x00c0, + 0xf58: 0x00c3, 0xf59: 0x00c3, 0xf5a: 0x00c0, 0xf5b: 0x00c0, 0xf5c: 0x00c0, 0xf5d: 0x00c0, + 0xf5e: 0x00c3, 0xf5f: 0x00c3, 0xf60: 0x00c3, 0xf61: 0x00c0, 0xf62: 0x00c0, 0xf63: 0x00c0, + 0xf64: 0x00c0, 0xf65: 0x00c0, 0xf66: 0x00c0, 0xf67: 0x00c0, 0xf68: 0x00c0, 0xf69: 0x00c0, + 0xf6a: 0x00c0, 0xf6b: 0x00c0, 0xf6c: 0x00c0, 0xf6d: 0x00c0, 0xf6e: 0x00c0, 0xf6f: 0x00c0, + 0xf70: 0x00c0, 0xf71: 0x00c3, 0xf72: 0x00c3, 0xf73: 0x00c3, 0xf74: 0x00c3, 0xf75: 0x00c0, + 0xf76: 0x00c0, 0xf77: 0x00c0, 0xf78: 0x00c0, 0xf79: 0x00c0, 0xf7a: 0x00c0, 0xf7b: 0x00c0, + 0xf7c: 0x00c0, 0xf7d: 0x00c0, 0xf7e: 0x00c0, 0xf7f: 0x00c0, + // Block 0x3e, offset 0xf80 + 0xf80: 0x00c0, 0xf81: 0x00c0, 0xf82: 0x00c3, 0xf83: 0x00c0, 0xf84: 0x00c0, 0xf85: 0x00c3, + 0xf86: 0x00c3, 0xf87: 0x00c0, 0xf88: 0x00c0, 0xf89: 0x00c0, 0xf8a: 0x00c0, 0xf8b: 0x00c0, + 0xf8c: 0x00c0, 0xf8d: 0x00c3, 0xf8e: 0x00c0, 0xf8f: 0x00c0, 0xf90: 0x00c0, 0xf91: 0x00c0, + 0xf92: 0x00c0, 0xf93: 0x00c0, 0xf94: 0x00c0, 0xf95: 0x00c0, 0xf96: 0x00c0, 0xf97: 0x00c0, + 0xf98: 0x00c0, 0xf99: 0x00c0, 0xf9a: 0x00c0, 0xf9b: 0x00c0, 0xf9c: 0x00c0, 0xf9d: 0x00c3, + 0xf9e: 0x0080, 0xf9f: 0x0080, 0xfa0: 0x00c0, 0xfa1: 0x00c0, 0xfa2: 0x00c0, 0xfa3: 0x00c0, + 0xfa4: 0x00c0, 0xfa5: 0x00c0, 0xfa6: 0x00c0, 0xfa7: 0x00c0, 0xfa8: 0x00c0, 0xfa9: 0x00c0, + 0xfaa: 0x00c0, 0xfab: 0x00c0, 0xfac: 0x00c0, 0xfad: 0x00c0, 0xfae: 0x00c0, 0xfaf: 0x00c0, + 0xfb0: 0x00c0, 0xfb1: 0x00c0, 0xfb2: 0x00c0, 0xfb3: 0x00c0, 0xfb4: 0x00c0, 0xfb5: 0x00c0, + 0xfb6: 0x00c0, 0xfb7: 0x00c0, 0xfb8: 0x00c0, 0xfb9: 0x00c0, 0xfba: 0x00c0, 0xfbb: 0x00c0, + 0xfbc: 0x00c0, 0xfbd: 0x00c0, 0xfbe: 0x00c0, 0xfbf: 0x00c0, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x00c0, 0xfc1: 0x00c0, 0xfc2: 0x00c0, 0xfc3: 0x00c0, 0xfc4: 0x00c0, 0xfc5: 0x00c0, + 0xfc7: 0x00c0, + 0xfcd: 0x00c0, 0xfd0: 0x00c0, 0xfd1: 0x00c0, + 0xfd2: 0x00c0, 0xfd3: 0x00c0, 0xfd4: 0x00c0, 0xfd5: 0x00c0, 0xfd6: 0x00c0, 0xfd7: 0x00c0, + 0xfd8: 0x00c0, 0xfd9: 0x00c0, 0xfda: 0x00c0, 0xfdb: 0x00c0, 0xfdc: 0x00c0, 0xfdd: 0x00c0, + 0xfde: 0x00c0, 0xfdf: 0x00c0, 0xfe0: 0x00c0, 0xfe1: 0x00c0, 0xfe2: 0x00c0, 0xfe3: 0x00c0, + 0xfe4: 0x00c0, 0xfe5: 0x00c0, 0xfe6: 0x00c0, 0xfe7: 0x00c0, 0xfe8: 0x00c0, 0xfe9: 0x00c0, + 0xfea: 0x00c0, 0xfeb: 0x00c0, 0xfec: 0x00c0, 0xfed: 0x00c0, 0xfee: 0x00c0, 0xfef: 0x00c0, + 0xff0: 0x00c0, 0xff1: 0x00c0, 0xff2: 0x00c0, 0xff3: 0x00c0, 0xff4: 0x00c0, 0xff5: 0x00c0, + 0xff6: 0x00c0, 0xff7: 0x00c0, 0xff8: 0x00c0, 0xff9: 0x00c0, 0xffa: 0x00c0, 0xffb: 0x0080, + 0xffc: 0x0080, 0xffd: 0x00c0, 0xffe: 0x00c0, 0xfff: 0x00c0, + // Block 0x40, offset 0x1000 + 0x1000: 0x0040, 0x1001: 0x0040, 0x1002: 0x0040, 0x1003: 0x0040, 0x1004: 0x0040, 0x1005: 0x0040, + 0x1006: 0x0040, 0x1007: 0x0040, 0x1008: 0x0040, 0x1009: 0x0040, 0x100a: 0x0040, 0x100b: 0x0040, + 0x100c: 0x0040, 0x100d: 0x0040, 0x100e: 0x0040, 0x100f: 0x0040, 0x1010: 0x0040, 0x1011: 0x0040, + 0x1012: 0x0040, 0x1013: 0x0040, 0x1014: 0x0040, 0x1015: 0x0040, 0x1016: 0x0040, 0x1017: 0x0040, + 0x1018: 0x0040, 0x1019: 0x0040, 0x101a: 0x0040, 0x101b: 0x0040, 0x101c: 0x0040, 0x101d: 0x0040, + 0x101e: 0x0040, 0x101f: 0x0040, 0x1020: 0x0040, 0x1021: 0x0040, 0x1022: 0x0040, 0x1023: 0x0040, + 0x1024: 0x0040, 0x1025: 0x0040, 0x1026: 0x0040, 0x1027: 0x0040, 0x1028: 0x0040, 0x1029: 0x0040, + 0x102a: 0x0040, 0x102b: 0x0040, 0x102c: 0x0040, 0x102d: 0x0040, 0x102e: 0x0040, 0x102f: 0x0040, + 0x1030: 0x0040, 0x1031: 0x0040, 0x1032: 0x0040, 0x1033: 0x0040, 0x1034: 0x0040, 0x1035: 0x0040, + 0x1036: 0x0040, 0x1037: 0x0040, 0x1038: 0x0040, 0x1039: 0x0040, 0x103a: 0x0040, 0x103b: 0x0040, + 0x103c: 0x0040, 0x103d: 0x0040, 0x103e: 0x0040, 0x103f: 0x0040, + // Block 0x41, offset 0x1040 + 0x1040: 0x00c0, 0x1041: 0x00c0, 0x1042: 0x00c0, 0x1043: 0x00c0, 0x1044: 0x00c0, 0x1045: 0x00c0, + 0x1046: 0x00c0, 0x1047: 0x00c0, 0x1048: 0x00c0, 0x104a: 0x00c0, 0x104b: 0x00c0, + 0x104c: 0x00c0, 0x104d: 0x00c0, 0x1050: 0x00c0, 0x1051: 0x00c0, + 0x1052: 0x00c0, 0x1053: 0x00c0, 0x1054: 0x00c0, 0x1055: 0x00c0, 0x1056: 0x00c0, + 0x1058: 0x00c0, 0x105a: 0x00c0, 0x105b: 0x00c0, 0x105c: 0x00c0, 0x105d: 0x00c0, + 0x1060: 0x00c0, 0x1061: 0x00c0, 0x1062: 0x00c0, 0x1063: 0x00c0, + 0x1064: 0x00c0, 0x1065: 0x00c0, 0x1066: 0x00c0, 0x1067: 0x00c0, 0x1068: 0x00c0, 0x1069: 0x00c0, + 0x106a: 0x00c0, 0x106b: 0x00c0, 0x106c: 0x00c0, 0x106d: 0x00c0, 0x106e: 0x00c0, 0x106f: 0x00c0, + 0x1070: 0x00c0, 0x1071: 0x00c0, 0x1072: 0x00c0, 0x1073: 0x00c0, 0x1074: 0x00c0, 0x1075: 0x00c0, + 0x1076: 0x00c0, 0x1077: 0x00c0, 0x1078: 0x00c0, 0x1079: 0x00c0, 0x107a: 0x00c0, 0x107b: 0x00c0, + 0x107c: 0x00c0, 0x107d: 0x00c0, 0x107e: 0x00c0, 0x107f: 0x00c0, + // Block 0x42, offset 0x1080 + 0x1080: 0x00c0, 0x1081: 0x00c0, 0x1082: 0x00c0, 0x1083: 0x00c0, 0x1084: 0x00c0, 0x1085: 0x00c0, + 0x1086: 0x00c0, 0x1087: 0x00c0, 0x1088: 0x00c0, 0x108a: 0x00c0, 0x108b: 0x00c0, + 0x108c: 0x00c0, 0x108d: 0x00c0, 0x1090: 0x00c0, 0x1091: 0x00c0, + 0x1092: 0x00c0, 0x1093: 0x00c0, 0x1094: 0x00c0, 0x1095: 0x00c0, 0x1096: 0x00c0, 0x1097: 0x00c0, + 0x1098: 0x00c0, 0x1099: 0x00c0, 0x109a: 0x00c0, 0x109b: 0x00c0, 0x109c: 0x00c0, 0x109d: 0x00c0, + 0x109e: 0x00c0, 0x109f: 0x00c0, 0x10a0: 0x00c0, 0x10a1: 0x00c0, 0x10a2: 0x00c0, 0x10a3: 0x00c0, + 0x10a4: 0x00c0, 0x10a5: 0x00c0, 0x10a6: 0x00c0, 0x10a7: 0x00c0, 0x10a8: 0x00c0, 0x10a9: 0x00c0, + 0x10aa: 0x00c0, 0x10ab: 0x00c0, 0x10ac: 0x00c0, 0x10ad: 0x00c0, 0x10ae: 0x00c0, 0x10af: 0x00c0, + 0x10b0: 0x00c0, 0x10b2: 0x00c0, 0x10b3: 0x00c0, 0x10b4: 0x00c0, 0x10b5: 0x00c0, + 0x10b8: 0x00c0, 0x10b9: 0x00c0, 0x10ba: 0x00c0, 0x10bb: 0x00c0, + 0x10bc: 0x00c0, 0x10bd: 0x00c0, 0x10be: 0x00c0, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x00c0, 0x10c2: 0x00c0, 0x10c3: 0x00c0, 0x10c4: 0x00c0, 0x10c5: 0x00c0, + 0x10c8: 0x00c0, 0x10c9: 0x00c0, 0x10ca: 0x00c0, 0x10cb: 0x00c0, + 0x10cc: 0x00c0, 0x10cd: 0x00c0, 0x10ce: 0x00c0, 0x10cf: 0x00c0, 0x10d0: 0x00c0, 0x10d1: 0x00c0, + 0x10d2: 0x00c0, 0x10d3: 0x00c0, 0x10d4: 0x00c0, 0x10d5: 0x00c0, 0x10d6: 0x00c0, + 0x10d8: 0x00c0, 0x10d9: 0x00c0, 0x10da: 0x00c0, 0x10db: 0x00c0, 0x10dc: 0x00c0, 0x10dd: 0x00c0, + 0x10de: 0x00c0, 0x10df: 0x00c0, 0x10e0: 0x00c0, 0x10e1: 0x00c0, 0x10e2: 0x00c0, 0x10e3: 0x00c0, + 0x10e4: 0x00c0, 0x10e5: 0x00c0, 0x10e6: 0x00c0, 0x10e7: 0x00c0, 0x10e8: 0x00c0, 0x10e9: 0x00c0, + 0x10ea: 0x00c0, 0x10eb: 0x00c0, 0x10ec: 0x00c0, 0x10ed: 0x00c0, 0x10ee: 0x00c0, 0x10ef: 0x00c0, + 0x10f0: 0x00c0, 0x10f1: 0x00c0, 0x10f2: 0x00c0, 0x10f3: 0x00c0, 0x10f4: 0x00c0, 0x10f5: 0x00c0, + 0x10f6: 0x00c0, 0x10f7: 0x00c0, 0x10f8: 0x00c0, 0x10f9: 0x00c0, 0x10fa: 0x00c0, 0x10fb: 0x00c0, + 0x10fc: 0x00c0, 0x10fd: 0x00c0, 0x10fe: 0x00c0, 0x10ff: 0x00c0, + // Block 0x44, offset 0x1100 + 0x1100: 0x00c0, 0x1101: 0x00c0, 0x1102: 0x00c0, 0x1103: 0x00c0, 0x1104: 0x00c0, 0x1105: 0x00c0, + 0x1106: 0x00c0, 0x1107: 0x00c0, 0x1108: 0x00c0, 0x1109: 0x00c0, 0x110a: 0x00c0, 0x110b: 0x00c0, + 0x110c: 0x00c0, 0x110d: 0x00c0, 0x110e: 0x00c0, 0x110f: 0x00c0, 0x1110: 0x00c0, + 0x1112: 0x00c0, 0x1113: 0x00c0, 0x1114: 0x00c0, 0x1115: 0x00c0, + 0x1118: 0x00c0, 0x1119: 0x00c0, 0x111a: 0x00c0, 0x111b: 0x00c0, 0x111c: 0x00c0, 0x111d: 0x00c0, + 0x111e: 0x00c0, 0x111f: 0x00c0, 0x1120: 0x00c0, 0x1121: 0x00c0, 0x1122: 0x00c0, 0x1123: 0x00c0, + 0x1124: 0x00c0, 0x1125: 0x00c0, 0x1126: 0x00c0, 0x1127: 0x00c0, 0x1128: 0x00c0, 0x1129: 0x00c0, + 0x112a: 0x00c0, 0x112b: 0x00c0, 0x112c: 0x00c0, 0x112d: 0x00c0, 0x112e: 0x00c0, 0x112f: 0x00c0, + 0x1130: 0x00c0, 0x1131: 0x00c0, 0x1132: 0x00c0, 0x1133: 0x00c0, 0x1134: 0x00c0, 0x1135: 0x00c0, + 0x1136: 0x00c0, 0x1137: 0x00c0, 0x1138: 0x00c0, 0x1139: 0x00c0, 0x113a: 0x00c0, 0x113b: 0x00c0, + 0x113c: 0x00c0, 0x113d: 0x00c0, 0x113e: 0x00c0, 0x113f: 0x00c0, + // Block 0x45, offset 0x1140 + 0x1140: 0x00c0, 0x1141: 0x00c0, 0x1142: 0x00c0, 0x1143: 0x00c0, 0x1144: 0x00c0, 0x1145: 0x00c0, + 0x1146: 0x00c0, 0x1147: 0x00c0, 0x1148: 0x00c0, 0x1149: 0x00c0, 0x114a: 0x00c0, 0x114b: 0x00c0, + 0x114c: 0x00c0, 0x114d: 0x00c0, 0x114e: 0x00c0, 0x114f: 0x00c0, 0x1150: 0x00c0, 0x1151: 0x00c0, + 0x1152: 0x00c0, 0x1153: 0x00c0, 0x1154: 0x00c0, 0x1155: 0x00c0, 0x1156: 0x00c0, 0x1157: 0x00c0, + 0x1158: 0x00c0, 0x1159: 0x00c0, 0x115a: 0x00c0, 0x115d: 0x00c3, + 0x115e: 0x00c3, 0x115f: 0x00c3, 0x1160: 0x0080, 0x1161: 0x0080, 0x1162: 0x0080, 0x1163: 0x0080, + 0x1164: 0x0080, 0x1165: 0x0080, 0x1166: 0x0080, 0x1167: 0x0080, 0x1168: 0x0080, 0x1169: 0x0080, + 0x116a: 0x0080, 0x116b: 0x0080, 0x116c: 0x0080, 0x116d: 0x0080, 0x116e: 0x0080, 0x116f: 0x0080, + 0x1170: 0x0080, 0x1171: 0x0080, 0x1172: 0x0080, 0x1173: 0x0080, 0x1174: 0x0080, 0x1175: 0x0080, + 0x1176: 0x0080, 0x1177: 0x0080, 0x1178: 0x0080, 0x1179: 0x0080, 0x117a: 0x0080, 0x117b: 0x0080, + 0x117c: 0x0080, + // Block 0x46, offset 0x1180 + 0x1180: 0x00c0, 0x1181: 0x00c0, 0x1182: 0x00c0, 0x1183: 0x00c0, 0x1184: 0x00c0, 0x1185: 0x00c0, + 0x1186: 0x00c0, 0x1187: 0x00c0, 0x1188: 0x00c0, 0x1189: 0x00c0, 0x118a: 0x00c0, 0x118b: 0x00c0, + 0x118c: 0x00c0, 0x118d: 0x00c0, 0x118e: 0x00c0, 0x118f: 0x00c0, 0x1190: 0x0080, 0x1191: 0x0080, + 0x1192: 0x0080, 0x1193: 0x0080, 0x1194: 0x0080, 0x1195: 0x0080, 0x1196: 0x0080, 0x1197: 0x0080, + 0x1198: 0x0080, 0x1199: 0x0080, + 0x11a0: 0x00c0, 0x11a1: 0x00c0, 0x11a2: 0x00c0, 0x11a3: 0x00c0, + 0x11a4: 0x00c0, 0x11a5: 0x00c0, 0x11a6: 0x00c0, 0x11a7: 0x00c0, 0x11a8: 0x00c0, 0x11a9: 0x00c0, + 0x11aa: 0x00c0, 0x11ab: 0x00c0, 0x11ac: 0x00c0, 0x11ad: 0x00c0, 0x11ae: 0x00c0, 0x11af: 0x00c0, + 0x11b0: 0x00c0, 0x11b1: 0x00c0, 0x11b2: 0x00c0, 0x11b3: 0x00c0, 0x11b4: 0x00c0, 0x11b5: 0x00c0, + 0x11b6: 0x00c0, 0x11b7: 0x00c0, 0x11b8: 0x00c0, 0x11b9: 0x00c0, 0x11ba: 0x00c0, 0x11bb: 0x00c0, + 0x11bc: 0x00c0, 0x11bd: 0x00c0, 0x11be: 0x00c0, 0x11bf: 0x00c0, + // Block 0x47, offset 0x11c0 + 0x11c0: 0x00c0, 0x11c1: 0x00c0, 0x11c2: 0x00c0, 0x11c3: 0x00c0, 0x11c4: 0x00c0, 0x11c5: 0x00c0, + 0x11c6: 0x00c0, 0x11c7: 0x00c0, 0x11c8: 0x00c0, 0x11c9: 0x00c0, 0x11ca: 0x00c0, 0x11cb: 0x00c0, + 0x11cc: 0x00c0, 0x11cd: 0x00c0, 0x11ce: 0x00c0, 0x11cf: 0x00c0, 0x11d0: 0x00c0, 0x11d1: 0x00c0, + 0x11d2: 0x00c0, 0x11d3: 0x00c0, 0x11d4: 0x00c0, 0x11d5: 0x00c0, 0x11d6: 0x00c0, 0x11d7: 0x00c0, + 0x11d8: 0x00c0, 0x11d9: 0x00c0, 0x11da: 0x00c0, 0x11db: 0x00c0, 0x11dc: 0x00c0, 0x11dd: 0x00c0, + 0x11de: 0x00c0, 0x11df: 0x00c0, 0x11e0: 0x00c0, 0x11e1: 0x00c0, 0x11e2: 0x00c0, 0x11e3: 0x00c0, + 0x11e4: 0x00c0, 0x11e5: 0x00c0, 0x11e6: 0x00c0, 0x11e7: 0x00c0, 0x11e8: 0x00c0, 0x11e9: 0x00c0, + 0x11ea: 0x00c0, 0x11eb: 0x00c0, 0x11ec: 0x00c0, 0x11ed: 0x00c0, 0x11ee: 0x00c0, 0x11ef: 0x00c0, + 0x11f0: 0x00c0, 0x11f1: 0x00c0, 0x11f2: 0x00c0, 0x11f3: 0x00c0, 0x11f4: 0x00c0, 0x11f5: 0x00c0, + 0x11f8: 0x00c0, 0x11f9: 0x00c0, 0x11fa: 0x00c0, 0x11fb: 0x00c0, + 0x11fc: 0x00c0, 0x11fd: 0x00c0, + // Block 0x48, offset 0x1200 + 0x1200: 0x0080, 0x1201: 0x00c0, 0x1202: 0x00c0, 0x1203: 0x00c0, 0x1204: 0x00c0, 0x1205: 0x00c0, + 0x1206: 0x00c0, 0x1207: 0x00c0, 0x1208: 0x00c0, 0x1209: 0x00c0, 0x120a: 0x00c0, 0x120b: 0x00c0, + 0x120c: 0x00c0, 0x120d: 0x00c0, 0x120e: 0x00c0, 0x120f: 0x00c0, 0x1210: 0x00c0, 0x1211: 0x00c0, + 0x1212: 0x00c0, 0x1213: 0x00c0, 0x1214: 0x00c0, 0x1215: 0x00c0, 0x1216: 0x00c0, 0x1217: 0x00c0, + 0x1218: 0x00c0, 0x1219: 0x00c0, 0x121a: 0x00c0, 0x121b: 0x00c0, 0x121c: 0x00c0, 0x121d: 0x00c0, + 0x121e: 0x00c0, 0x121f: 0x00c0, 0x1220: 0x00c0, 0x1221: 0x00c0, 0x1222: 0x00c0, 0x1223: 0x00c0, + 0x1224: 0x00c0, 0x1225: 0x00c0, 0x1226: 0x00c0, 0x1227: 0x00c0, 0x1228: 0x00c0, 0x1229: 0x00c0, + 0x122a: 0x00c0, 0x122b: 0x00c0, 0x122c: 0x00c0, 0x122d: 0x00c0, 0x122e: 0x00c0, 0x122f: 0x00c0, + 0x1230: 0x00c0, 0x1231: 0x00c0, 0x1232: 0x00c0, 0x1233: 0x00c0, 0x1234: 0x00c0, 0x1235: 0x00c0, + 0x1236: 0x00c0, 0x1237: 0x00c0, 0x1238: 0x00c0, 0x1239: 0x00c0, 0x123a: 0x00c0, 0x123b: 0x00c0, + 0x123c: 0x00c0, 0x123d: 0x00c0, 0x123e: 0x00c0, 0x123f: 0x00c0, + // Block 0x49, offset 0x1240 + 0x1240: 0x00c0, 0x1241: 0x00c0, 0x1242: 0x00c0, 0x1243: 0x00c0, 0x1244: 0x00c0, 0x1245: 0x00c0, + 0x1246: 0x00c0, 0x1247: 0x00c0, 0x1248: 0x00c0, 0x1249: 0x00c0, 0x124a: 0x00c0, 0x124b: 0x00c0, + 0x124c: 0x00c0, 0x124d: 0x00c0, 0x124e: 0x00c0, 0x124f: 0x00c0, 0x1250: 0x00c0, 0x1251: 0x00c0, + 0x1252: 0x00c0, 0x1253: 0x00c0, 0x1254: 0x00c0, 0x1255: 0x00c0, 0x1256: 0x00c0, 0x1257: 0x00c0, + 0x1258: 0x00c0, 0x1259: 0x00c0, 0x125a: 0x00c0, 0x125b: 0x00c0, 0x125c: 0x00c0, 0x125d: 0x00c0, + 0x125e: 0x00c0, 0x125f: 0x00c0, 0x1260: 0x00c0, 0x1261: 0x00c0, 0x1262: 0x00c0, 0x1263: 0x00c0, + 0x1264: 0x00c0, 0x1265: 0x00c0, 0x1266: 0x00c0, 0x1267: 0x00c0, 0x1268: 0x00c0, 0x1269: 0x00c0, + 0x126a: 0x00c0, 0x126b: 0x00c0, 0x126c: 0x00c0, 0x126d: 0x0080, 0x126e: 0x0080, 0x126f: 0x00c0, + 0x1270: 0x00c0, 0x1271: 0x00c0, 0x1272: 0x00c0, 0x1273: 0x00c0, 0x1274: 0x00c0, 0x1275: 0x00c0, + 0x1276: 0x00c0, 0x1277: 0x00c0, 0x1278: 0x00c0, 0x1279: 0x00c0, 0x127a: 0x00c0, 0x127b: 0x00c0, + 0x127c: 0x00c0, 0x127d: 0x00c0, 0x127e: 0x00c0, 0x127f: 0x00c0, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0080, 0x1281: 0x00c0, 0x1282: 0x00c0, 0x1283: 0x00c0, 0x1284: 0x00c0, 0x1285: 0x00c0, + 0x1286: 0x00c0, 0x1287: 0x00c0, 0x1288: 0x00c0, 0x1289: 0x00c0, 0x128a: 0x00c0, 0x128b: 0x00c0, + 0x128c: 0x00c0, 0x128d: 0x00c0, 0x128e: 0x00c0, 0x128f: 0x00c0, 0x1290: 0x00c0, 0x1291: 0x00c0, + 0x1292: 0x00c0, 0x1293: 0x00c0, 0x1294: 0x00c0, 0x1295: 0x00c0, 0x1296: 0x00c0, 0x1297: 0x00c0, + 0x1298: 0x00c0, 0x1299: 0x00c0, 0x129a: 0x00c0, 0x129b: 0x0080, 0x129c: 0x0080, + 0x12a0: 0x00c0, 0x12a1: 0x00c0, 0x12a2: 0x00c0, 0x12a3: 0x00c0, + 0x12a4: 0x00c0, 0x12a5: 0x00c0, 0x12a6: 0x00c0, 0x12a7: 0x00c0, 0x12a8: 0x00c0, 0x12a9: 0x00c0, + 0x12aa: 0x00c0, 0x12ab: 0x00c0, 0x12ac: 0x00c0, 0x12ad: 0x00c0, 0x12ae: 0x00c0, 0x12af: 0x00c0, + 0x12b0: 0x00c0, 0x12b1: 0x00c0, 0x12b2: 0x00c0, 0x12b3: 0x00c0, 0x12b4: 0x00c0, 0x12b5: 0x00c0, + 0x12b6: 0x00c0, 0x12b7: 0x00c0, 0x12b8: 0x00c0, 0x12b9: 0x00c0, 0x12ba: 0x00c0, 0x12bb: 0x00c0, + 0x12bc: 0x00c0, 0x12bd: 0x00c0, 0x12be: 0x00c0, 0x12bf: 0x00c0, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x00c0, 0x12c1: 0x00c0, 0x12c2: 0x00c0, 0x12c3: 0x00c0, 0x12c4: 0x00c0, 0x12c5: 0x00c0, + 0x12c6: 0x00c0, 0x12c7: 0x00c0, 0x12c8: 0x00c0, 0x12c9: 0x00c0, 0x12ca: 0x00c0, 0x12cb: 0x00c0, + 0x12cc: 0x00c0, 0x12cd: 0x00c0, 0x12ce: 0x00c0, 0x12cf: 0x00c0, 0x12d0: 0x00c0, 0x12d1: 0x00c0, + 0x12d2: 0x00c0, 0x12d3: 0x00c0, 0x12d4: 0x00c0, 0x12d5: 0x00c0, 0x12d6: 0x00c0, 0x12d7: 0x00c0, + 0x12d8: 0x00c0, 0x12d9: 0x00c0, 0x12da: 0x00c0, 0x12db: 0x00c0, 0x12dc: 0x00c0, 0x12dd: 0x00c0, + 0x12de: 0x00c0, 0x12df: 0x00c0, 0x12e0: 0x00c0, 0x12e1: 0x00c0, 0x12e2: 0x00c0, 0x12e3: 0x00c0, + 0x12e4: 0x00c0, 0x12e5: 0x00c0, 0x12e6: 0x00c0, 0x12e7: 0x00c0, 0x12e8: 0x00c0, 0x12e9: 0x00c0, + 0x12ea: 0x00c0, 0x12eb: 0x0080, 0x12ec: 0x0080, 0x12ed: 0x0080, 0x12ee: 0x0080, 0x12ef: 0x0080, + 0x12f0: 0x0080, 0x12f1: 0x00c0, 0x12f2: 0x00c0, 0x12f3: 0x00c0, 0x12f4: 0x00c0, 0x12f5: 0x00c0, + 0x12f6: 0x00c0, 0x12f7: 0x00c0, 0x12f8: 0x00c0, + // Block 0x4c, offset 0x1300 + 0x1300: 0x00c0, 0x1301: 0x00c0, 0x1302: 0x00c0, 0x1303: 0x00c0, 0x1304: 0x00c0, 0x1305: 0x00c0, + 0x1306: 0x00c0, 0x1307: 0x00c0, 0x1308: 0x00c0, 0x1309: 0x00c0, 0x130a: 0x00c0, 0x130b: 0x00c0, + 0x130c: 0x00c0, 0x130e: 0x00c0, 0x130f: 0x00c0, 0x1310: 0x00c0, 0x1311: 0x00c0, + 0x1312: 0x00c3, 0x1313: 0x00c3, 0x1314: 0x00c6, + 0x1320: 0x00c0, 0x1321: 0x00c0, 0x1322: 0x00c0, 0x1323: 0x00c0, + 0x1324: 0x00c0, 0x1325: 0x00c0, 0x1326: 0x00c0, 0x1327: 0x00c0, 0x1328: 0x00c0, 0x1329: 0x00c0, + 0x132a: 0x00c0, 0x132b: 0x00c0, 0x132c: 0x00c0, 0x132d: 0x00c0, 0x132e: 0x00c0, 0x132f: 0x00c0, + 0x1330: 0x00c0, 0x1331: 0x00c0, 0x1332: 0x00c3, 0x1333: 0x00c3, 0x1334: 0x00c6, 0x1335: 0x0080, + 0x1336: 0x0080, + // Block 0x4d, offset 0x1340 + 0x1340: 0x00c0, 0x1341: 0x00c0, 0x1342: 0x00c0, 0x1343: 0x00c0, 0x1344: 0x00c0, 0x1345: 0x00c0, + 0x1346: 0x00c0, 0x1347: 0x00c0, 0x1348: 0x00c0, 0x1349: 0x00c0, 0x134a: 0x00c0, 0x134b: 0x00c0, + 0x134c: 0x00c0, 0x134d: 0x00c0, 0x134e: 0x00c0, 0x134f: 0x00c0, 0x1350: 0x00c0, 0x1351: 0x00c0, + 0x1352: 0x00c3, 0x1353: 0x00c3, + 0x1360: 0x00c0, 0x1361: 0x00c0, 0x1362: 0x00c0, 0x1363: 0x00c0, + 0x1364: 0x00c0, 0x1365: 0x00c0, 0x1366: 0x00c0, 0x1367: 0x00c0, 0x1368: 0x00c0, 0x1369: 0x00c0, + 0x136a: 0x00c0, 0x136b: 0x00c0, 0x136c: 0x00c0, 0x136e: 0x00c0, 0x136f: 0x00c0, + 0x1370: 0x00c0, 0x1372: 0x00c3, 0x1373: 0x00c3, + // Block 0x4e, offset 0x1380 + 0x1380: 0x00c0, 0x1381: 0x00c0, 0x1382: 0x00c0, 0x1383: 0x00c0, 0x1384: 0x00c0, 0x1385: 0x00c0, + 0x1386: 0x00c0, 0x1387: 0x00c0, 0x1388: 0x00c0, 0x1389: 0x00c0, 0x138a: 0x00c0, 0x138b: 0x00c0, + 0x138c: 0x00c0, 0x138d: 0x00c0, 0x138e: 0x00c0, 0x138f: 0x00c0, 0x1390: 0x00c0, 0x1391: 0x00c0, + 0x1392: 0x00c0, 0x1393: 0x00c0, 0x1394: 0x00c0, 0x1395: 0x00c0, 0x1396: 0x00c0, 0x1397: 0x00c0, + 0x1398: 0x00c0, 0x1399: 0x00c0, 0x139a: 0x00c0, 0x139b: 0x00c0, 0x139c: 0x00c0, 0x139d: 0x00c0, + 0x139e: 0x00c0, 0x139f: 0x00c0, 0x13a0: 0x00c0, 0x13a1: 0x00c0, 0x13a2: 0x00c0, 0x13a3: 0x00c0, + 0x13a4: 0x00c0, 0x13a5: 0x00c0, 0x13a6: 0x00c0, 0x13a7: 0x00c0, 0x13a8: 0x00c0, 0x13a9: 0x00c0, + 0x13aa: 0x00c0, 0x13ab: 0x00c0, 0x13ac: 0x00c0, 0x13ad: 0x00c0, 0x13ae: 0x00c0, 0x13af: 0x00c0, + 0x13b0: 0x00c0, 0x13b1: 0x00c0, 0x13b2: 0x00c0, 0x13b3: 0x00c0, 0x13b4: 0x0040, 0x13b5: 0x0040, + 0x13b6: 0x00c0, 0x13b7: 0x00c3, 0x13b8: 0x00c3, 0x13b9: 0x00c3, 0x13ba: 0x00c3, 0x13bb: 0x00c3, + 0x13bc: 0x00c3, 0x13bd: 0x00c3, 0x13be: 0x00c0, 0x13bf: 0x00c0, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x00c0, 0x13c1: 0x00c0, 0x13c2: 0x00c0, 0x13c3: 0x00c0, 0x13c4: 0x00c0, 0x13c5: 0x00c0, + 0x13c6: 0x00c3, 0x13c7: 0x00c0, 0x13c8: 0x00c0, 0x13c9: 0x00c3, 0x13ca: 0x00c3, 0x13cb: 0x00c3, + 0x13cc: 0x00c3, 0x13cd: 0x00c3, 0x13ce: 0x00c3, 0x13cf: 0x00c3, 0x13d0: 0x00c3, 0x13d1: 0x00c3, + 0x13d2: 0x00c6, 0x13d3: 0x00c3, 0x13d4: 0x0080, 0x13d5: 0x0080, 0x13d6: 0x0080, 0x13d7: 0x00c0, + 0x13d8: 0x0080, 0x13d9: 0x0080, 0x13da: 0x0080, 0x13db: 0x0080, 0x13dc: 0x00c0, 0x13dd: 0x00c3, + 0x13e0: 0x00c0, 0x13e1: 0x00c0, 0x13e2: 0x00c0, 0x13e3: 0x00c0, + 0x13e4: 0x00c0, 0x13e5: 0x00c0, 0x13e6: 0x00c0, 0x13e7: 0x00c0, 0x13e8: 0x00c0, 0x13e9: 0x00c0, + 0x13f0: 0x0080, 0x13f1: 0x0080, 0x13f2: 0x0080, 0x13f3: 0x0080, 0x13f4: 0x0080, 0x13f5: 0x0080, + 0x13f6: 0x0080, 0x13f7: 0x0080, 0x13f8: 0x0080, 0x13f9: 0x0080, + // Block 0x50, offset 0x1400 + 0x1400: 0x0080, 0x1401: 0x0080, 0x1402: 0x0080, 0x1403: 0x0080, 0x1404: 0x0080, 0x1405: 0x0080, + 0x1406: 0x0080, 0x1407: 0x0082, 0x1408: 0x0080, 0x1409: 0x0080, 0x140a: 0x0080, 0x140b: 0x0040, + 0x140c: 0x0040, 0x140d: 0x0040, 0x140e: 0x0040, 0x1410: 0x00c0, 0x1411: 0x00c0, + 0x1412: 0x00c0, 0x1413: 0x00c0, 0x1414: 0x00c0, 0x1415: 0x00c0, 0x1416: 0x00c0, 0x1417: 0x00c0, + 0x1418: 0x00c0, 0x1419: 0x00c0, + 0x1420: 0x00c2, 0x1421: 0x00c2, 0x1422: 0x00c2, 0x1423: 0x00c2, + 0x1424: 0x00c2, 0x1425: 0x00c2, 0x1426: 0x00c2, 0x1427: 0x00c2, 0x1428: 0x00c2, 0x1429: 0x00c2, + 0x142a: 0x00c2, 0x142b: 0x00c2, 0x142c: 0x00c2, 0x142d: 0x00c2, 0x142e: 0x00c2, 0x142f: 0x00c2, + 0x1430: 0x00c2, 0x1431: 0x00c2, 0x1432: 0x00c2, 0x1433: 0x00c2, 0x1434: 0x00c2, 0x1435: 0x00c2, + 0x1436: 0x00c2, 0x1437: 0x00c2, 0x1438: 0x00c2, 0x1439: 0x00c2, 0x143a: 0x00c2, 0x143b: 0x00c2, + 0x143c: 0x00c2, 0x143d: 0x00c2, 0x143e: 0x00c2, 0x143f: 0x00c2, + // Block 0x51, offset 0x1440 + 0x1440: 0x00c2, 0x1441: 0x00c2, 0x1442: 0x00c2, 0x1443: 0x00c2, 0x1444: 0x00c2, 0x1445: 0x00c2, + 0x1446: 0x00c2, 0x1447: 0x00c2, 0x1448: 0x00c2, 0x1449: 0x00c2, 0x144a: 0x00c2, 0x144b: 0x00c2, + 0x144c: 0x00c2, 0x144d: 0x00c2, 0x144e: 0x00c2, 0x144f: 0x00c2, 0x1450: 0x00c2, 0x1451: 0x00c2, + 0x1452: 0x00c2, 0x1453: 0x00c2, 0x1454: 0x00c2, 0x1455: 0x00c2, 0x1456: 0x00c2, 0x1457: 0x00c2, + 0x1458: 0x00c2, 0x1459: 0x00c2, 0x145a: 0x00c2, 0x145b: 0x00c2, 0x145c: 0x00c2, 0x145d: 0x00c2, + 0x145e: 0x00c2, 0x145f: 0x00c2, 0x1460: 0x00c2, 0x1461: 0x00c2, 0x1462: 0x00c2, 0x1463: 0x00c2, + 0x1464: 0x00c2, 0x1465: 0x00c2, 0x1466: 0x00c2, 0x1467: 0x00c2, 0x1468: 0x00c2, 0x1469: 0x00c2, + 0x146a: 0x00c2, 0x146b: 0x00c2, 0x146c: 0x00c2, 0x146d: 0x00c2, 0x146e: 0x00c2, 0x146f: 0x00c2, + 0x1470: 0x00c2, 0x1471: 0x00c2, 0x1472: 0x00c2, 0x1473: 0x00c2, 0x1474: 0x00c2, 0x1475: 0x00c2, + 0x1476: 0x00c2, 0x1477: 0x00c2, + // Block 0x52, offset 0x1480 + 0x1480: 0x00c0, 0x1481: 0x00c0, 0x1482: 0x00c0, 0x1483: 0x00c0, 0x1484: 0x00c0, 0x1485: 0x00c3, + 0x1486: 0x00c3, 0x1487: 0x00c2, 0x1488: 0x00c2, 0x1489: 0x00c2, 0x148a: 0x00c2, 0x148b: 0x00c2, + 0x148c: 0x00c2, 0x148d: 0x00c2, 0x148e: 0x00c2, 0x148f: 0x00c2, 0x1490: 0x00c2, 0x1491: 0x00c2, + 0x1492: 0x00c2, 0x1493: 0x00c2, 0x1494: 0x00c2, 0x1495: 0x00c2, 0x1496: 0x00c2, 0x1497: 0x00c2, + 0x1498: 0x00c2, 0x1499: 0x00c2, 0x149a: 0x00c2, 0x149b: 0x00c2, 0x149c: 0x00c2, 0x149d: 0x00c2, + 0x149e: 0x00c2, 0x149f: 0x00c2, 0x14a0: 0x00c2, 0x14a1: 0x00c2, 0x14a2: 0x00c2, 0x14a3: 0x00c2, + 0x14a4: 0x00c2, 0x14a5: 0x00c2, 0x14a6: 0x00c2, 0x14a7: 0x00c2, 0x14a8: 0x00c2, 0x14a9: 0x00c3, + 0x14aa: 0x00c2, + 0x14b0: 0x00c0, 0x14b1: 0x00c0, 0x14b2: 0x00c0, 0x14b3: 0x00c0, 0x14b4: 0x00c0, 0x14b5: 0x00c0, + 0x14b6: 0x00c0, 0x14b7: 0x00c0, 0x14b8: 0x00c0, 0x14b9: 0x00c0, 0x14ba: 0x00c0, 0x14bb: 0x00c0, + 0x14bc: 0x00c0, 0x14bd: 0x00c0, 0x14be: 0x00c0, 0x14bf: 0x00c0, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x00c0, 0x14c1: 0x00c0, 0x14c2: 0x00c0, 0x14c3: 0x00c0, 0x14c4: 0x00c0, 0x14c5: 0x00c0, + 0x14c6: 0x00c0, 0x14c7: 0x00c0, 0x14c8: 0x00c0, 0x14c9: 0x00c0, 0x14ca: 0x00c0, 0x14cb: 0x00c0, + 0x14cc: 0x00c0, 0x14cd: 0x00c0, 0x14ce: 0x00c0, 0x14cf: 0x00c0, 0x14d0: 0x00c0, 0x14d1: 0x00c0, + 0x14d2: 0x00c0, 0x14d3: 0x00c0, 0x14d4: 0x00c0, 0x14d5: 0x00c0, 0x14d6: 0x00c0, 0x14d7: 0x00c0, + 0x14d8: 0x00c0, 0x14d9: 0x00c0, 0x14da: 0x00c0, 0x14db: 0x00c0, 0x14dc: 0x00c0, 0x14dd: 0x00c0, + 0x14de: 0x00c0, 0x14df: 0x00c0, 0x14e0: 0x00c0, 0x14e1: 0x00c0, 0x14e2: 0x00c0, 0x14e3: 0x00c0, + 0x14e4: 0x00c0, 0x14e5: 0x00c0, 0x14e6: 0x00c0, 0x14e7: 0x00c0, 0x14e8: 0x00c0, 0x14e9: 0x00c0, + 0x14ea: 0x00c0, 0x14eb: 0x00c0, 0x14ec: 0x00c0, 0x14ed: 0x00c0, 0x14ee: 0x00c0, 0x14ef: 0x00c0, + 0x14f0: 0x00c0, 0x14f1: 0x00c0, 0x14f2: 0x00c0, 0x14f3: 0x00c0, 0x14f4: 0x00c0, 0x14f5: 0x00c0, + // Block 0x54, offset 0x1500 + 0x1500: 0x00c0, 0x1501: 0x00c0, 0x1502: 0x00c0, 0x1503: 0x00c0, 0x1504: 0x00c0, 0x1505: 0x00c0, + 0x1506: 0x00c0, 0x1507: 0x00c0, 0x1508: 0x00c0, 0x1509: 0x00c0, 0x150a: 0x00c0, 0x150b: 0x00c0, + 0x150c: 0x00c0, 0x150d: 0x00c0, 0x150e: 0x00c0, 0x150f: 0x00c0, 0x1510: 0x00c0, 0x1511: 0x00c0, + 0x1512: 0x00c0, 0x1513: 0x00c0, 0x1514: 0x00c0, 0x1515: 0x00c0, 0x1516: 0x00c0, 0x1517: 0x00c0, + 0x1518: 0x00c0, 0x1519: 0x00c0, 0x151a: 0x00c0, 0x151b: 0x00c0, 0x151c: 0x00c0, 0x151d: 0x00c0, + 0x151e: 0x00c0, 0x1520: 0x00c3, 0x1521: 0x00c3, 0x1522: 0x00c3, 0x1523: 0x00c0, + 0x1524: 0x00c0, 0x1525: 0x00c0, 0x1526: 0x00c0, 0x1527: 0x00c3, 0x1528: 0x00c3, 0x1529: 0x00c0, + 0x152a: 0x00c0, 0x152b: 0x00c0, + 0x1530: 0x00c0, 0x1531: 0x00c0, 0x1532: 0x00c3, 0x1533: 0x00c0, 0x1534: 0x00c0, 0x1535: 0x00c0, + 0x1536: 0x00c0, 0x1537: 0x00c0, 0x1538: 0x00c0, 0x1539: 0x00c3, 0x153a: 0x00c3, 0x153b: 0x00c3, + // Block 0x55, offset 0x1540 + 0x1540: 0x0080, 0x1544: 0x0080, 0x1545: 0x0080, + 0x1546: 0x00c0, 0x1547: 0x00c0, 0x1548: 0x00c0, 0x1549: 0x00c0, 0x154a: 0x00c0, 0x154b: 0x00c0, + 0x154c: 0x00c0, 0x154d: 0x00c0, 0x154e: 0x00c0, 0x154f: 0x00c0, 0x1550: 0x00c0, 0x1551: 0x00c0, + 0x1552: 0x00c0, 0x1553: 0x00c0, 0x1554: 0x00c0, 0x1555: 0x00c0, 0x1556: 0x00c0, 0x1557: 0x00c0, + 0x1558: 0x00c0, 0x1559: 0x00c0, 0x155a: 0x00c0, 0x155b: 0x00c0, 0x155c: 0x00c0, 0x155d: 0x00c0, + 0x155e: 0x00c0, 0x155f: 0x00c0, 0x1560: 0x00c0, 0x1561: 0x00c0, 0x1562: 0x00c0, 0x1563: 0x00c0, + 0x1564: 0x00c0, 0x1565: 0x00c0, 0x1566: 0x00c0, 0x1567: 0x00c0, 0x1568: 0x00c0, 0x1569: 0x00c0, + 0x156a: 0x00c0, 0x156b: 0x00c0, 0x156c: 0x00c0, 0x156d: 0x00c0, + 0x1570: 0x00c0, 0x1571: 0x00c0, 0x1572: 0x00c0, 0x1573: 0x00c0, 0x1574: 0x00c0, + // Block 0x56, offset 0x1580 + 0x1580: 0x00c0, 0x1581: 0x00c0, 0x1582: 0x00c0, 0x1583: 0x00c0, 0x1584: 0x00c0, 0x1585: 0x00c0, + 0x1586: 0x00c0, 0x1587: 0x00c0, 0x1588: 0x00c0, 0x1589: 0x00c0, 0x158a: 0x00c0, 0x158b: 0x00c0, + 0x158c: 0x00c0, 0x158d: 0x00c0, 0x158e: 0x00c0, 0x158f: 0x00c0, 0x1590: 0x00c0, 0x1591: 0x00c0, + 0x1592: 0x00c0, 0x1593: 0x00c0, 0x1594: 0x00c0, 0x1595: 0x00c0, 0x1596: 0x00c0, 0x1597: 0x00c0, + 0x1598: 0x00c0, 0x1599: 0x00c0, 0x159a: 0x00c0, 0x159b: 0x00c0, 0x159c: 0x00c0, 0x159d: 0x00c0, + 0x159e: 0x00c0, 0x159f: 0x00c0, 0x15a0: 0x00c0, 0x15a1: 0x00c0, 0x15a2: 0x00c0, 0x15a3: 0x00c0, + 0x15a4: 0x00c0, 0x15a5: 0x00c0, 0x15a6: 0x00c0, 0x15a7: 0x00c0, 0x15a8: 0x00c0, 0x15a9: 0x00c0, + 0x15aa: 0x00c0, 0x15ab: 0x00c0, + 0x15b0: 0x00c0, 0x15b1: 0x00c0, 0x15b2: 0x00c0, 0x15b3: 0x00c0, 0x15b4: 0x00c0, 0x15b5: 0x00c0, + 0x15b6: 0x00c0, 0x15b7: 0x00c0, 0x15b8: 0x00c0, 0x15b9: 0x00c0, 0x15ba: 0x00c0, 0x15bb: 0x00c0, + 0x15bc: 0x00c0, 0x15bd: 0x00c0, 0x15be: 0x00c0, 0x15bf: 0x00c0, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x00c0, 0x15c1: 0x00c0, 0x15c2: 0x00c0, 0x15c3: 0x00c0, 0x15c4: 0x00c0, 0x15c5: 0x00c0, + 0x15c6: 0x00c0, 0x15c7: 0x00c0, 0x15c8: 0x00c0, 0x15c9: 0x00c0, + 0x15d0: 0x00c0, 0x15d1: 0x00c0, + 0x15d2: 0x00c0, 0x15d3: 0x00c0, 0x15d4: 0x00c0, 0x15d5: 0x00c0, 0x15d6: 0x00c0, 0x15d7: 0x00c0, + 0x15d8: 0x00c0, 0x15d9: 0x00c0, 0x15da: 0x0080, + 0x15de: 0x0080, 0x15df: 0x0080, 0x15e0: 0x0080, 0x15e1: 0x0080, 0x15e2: 0x0080, 0x15e3: 0x0080, + 0x15e4: 0x0080, 0x15e5: 0x0080, 0x15e6: 0x0080, 0x15e7: 0x0080, 0x15e8: 0x0080, 0x15e9: 0x0080, + 0x15ea: 0x0080, 0x15eb: 0x0080, 0x15ec: 0x0080, 0x15ed: 0x0080, 0x15ee: 0x0080, 0x15ef: 0x0080, + 0x15f0: 0x0080, 0x15f1: 0x0080, 0x15f2: 0x0080, 0x15f3: 0x0080, 0x15f4: 0x0080, 0x15f5: 0x0080, + 0x15f6: 0x0080, 0x15f7: 0x0080, 0x15f8: 0x0080, 0x15f9: 0x0080, 0x15fa: 0x0080, 0x15fb: 0x0080, + 0x15fc: 0x0080, 0x15fd: 0x0080, 0x15fe: 0x0080, 0x15ff: 0x0080, + // Block 0x58, offset 0x1600 + 0x1600: 0x00c0, 0x1601: 0x00c0, 0x1602: 0x00c0, 0x1603: 0x00c0, 0x1604: 0x00c0, 0x1605: 0x00c0, + 0x1606: 0x00c0, 0x1607: 0x00c0, 0x1608: 0x00c0, 0x1609: 0x00c0, 0x160a: 0x00c0, 0x160b: 0x00c0, + 0x160c: 0x00c0, 0x160d: 0x00c0, 0x160e: 0x00c0, 0x160f: 0x00c0, 0x1610: 0x00c0, 0x1611: 0x00c0, + 0x1612: 0x00c0, 0x1613: 0x00c0, 0x1614: 0x00c0, 0x1615: 0x00c0, 0x1616: 0x00c0, 0x1617: 0x00c3, + 0x1618: 0x00c3, 0x1619: 0x00c0, 0x161a: 0x00c0, 0x161b: 0x00c3, + 0x161e: 0x0080, 0x161f: 0x0080, 0x1620: 0x00c0, 0x1621: 0x00c0, 0x1622: 0x00c0, 0x1623: 0x00c0, + 0x1624: 0x00c0, 0x1625: 0x00c0, 0x1626: 0x00c0, 0x1627: 0x00c0, 0x1628: 0x00c0, 0x1629: 0x00c0, + 0x162a: 0x00c0, 0x162b: 0x00c0, 0x162c: 0x00c0, 0x162d: 0x00c0, 0x162e: 0x00c0, 0x162f: 0x00c0, + 0x1630: 0x00c0, 0x1631: 0x00c0, 0x1632: 0x00c0, 0x1633: 0x00c0, 0x1634: 0x00c0, 0x1635: 0x00c0, + 0x1636: 0x00c0, 0x1637: 0x00c0, 0x1638: 0x00c0, 0x1639: 0x00c0, 0x163a: 0x00c0, 0x163b: 0x00c0, + 0x163c: 0x00c0, 0x163d: 0x00c0, 0x163e: 0x00c0, 0x163f: 0x00c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x00c0, 0x1641: 0x00c0, 0x1642: 0x00c0, 0x1643: 0x00c0, 0x1644: 0x00c0, 0x1645: 0x00c0, + 0x1646: 0x00c0, 0x1647: 0x00c0, 0x1648: 0x00c0, 0x1649: 0x00c0, 0x164a: 0x00c0, 0x164b: 0x00c0, + 0x164c: 0x00c0, 0x164d: 0x00c0, 0x164e: 0x00c0, 0x164f: 0x00c0, 0x1650: 0x00c0, 0x1651: 0x00c0, + 0x1652: 0x00c0, 0x1653: 0x00c0, 0x1654: 0x00c0, 0x1655: 0x00c0, 0x1656: 0x00c3, 0x1657: 0x00c0, + 0x1658: 0x00c3, 0x1659: 0x00c3, 0x165a: 0x00c3, 0x165b: 0x00c3, 0x165c: 0x00c3, 0x165d: 0x00c3, + 0x165e: 0x00c3, 0x1660: 0x00c6, 0x1661: 0x00c0, 0x1662: 0x00c3, 0x1663: 0x00c0, + 0x1664: 0x00c0, 0x1665: 0x00c3, 0x1666: 0x00c3, 0x1667: 0x00c3, 0x1668: 0x00c3, 0x1669: 0x00c3, + 0x166a: 0x00c3, 0x166b: 0x00c3, 0x166c: 0x00c3, 0x166d: 0x00c0, 0x166e: 0x00c0, 0x166f: 0x00c0, + 0x1670: 0x00c0, 0x1671: 0x00c0, 0x1672: 0x00c0, 0x1673: 0x00c3, 0x1674: 0x00c3, 0x1675: 0x00c3, + 0x1676: 0x00c3, 0x1677: 0x00c3, 0x1678: 0x00c3, 0x1679: 0x00c3, 0x167a: 0x00c3, 0x167b: 0x00c3, + 0x167c: 0x00c3, 0x167f: 0x00c3, + // Block 0x5a, offset 0x1680 + 0x1680: 0x00c0, 0x1681: 0x00c0, 0x1682: 0x00c0, 0x1683: 0x00c0, 0x1684: 0x00c0, 0x1685: 0x00c0, + 0x1686: 0x00c0, 0x1687: 0x00c0, 0x1688: 0x00c0, 0x1689: 0x00c0, + 0x1690: 0x00c0, 0x1691: 0x00c0, + 0x1692: 0x00c0, 0x1693: 0x00c0, 0x1694: 0x00c0, 0x1695: 0x00c0, 0x1696: 0x00c0, 0x1697: 0x00c0, + 0x1698: 0x00c0, 0x1699: 0x00c0, + 0x16a0: 0x0080, 0x16a1: 0x0080, 0x16a2: 0x0080, 0x16a3: 0x0080, + 0x16a4: 0x0080, 0x16a5: 0x0080, 0x16a6: 0x0080, 0x16a7: 0x00c0, 0x16a8: 0x0080, 0x16a9: 0x0080, + 0x16aa: 0x0080, 0x16ab: 0x0080, 0x16ac: 0x0080, 0x16ad: 0x0080, + 0x16b0: 0x00c3, 0x16b1: 0x00c3, 0x16b2: 0x00c3, 0x16b3: 0x00c3, 0x16b4: 0x00c3, 0x16b5: 0x00c3, + 0x16b6: 0x00c3, 0x16b7: 0x00c3, 0x16b8: 0x00c3, 0x16b9: 0x00c3, 0x16ba: 0x00c3, 0x16bb: 0x00c3, + 0x16bc: 0x00c3, 0x16bd: 0x00c3, 0x16be: 0x0083, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x00c3, 0x16c1: 0x00c3, 0x16c2: 0x00c3, 0x16c3: 0x00c3, 0x16c4: 0x00c0, 0x16c5: 0x00c0, + 0x16c6: 0x00c0, 0x16c7: 0x00c0, 0x16c8: 0x00c0, 0x16c9: 0x00c0, 0x16ca: 0x00c0, 0x16cb: 0x00c0, + 0x16cc: 0x00c0, 0x16cd: 0x00c0, 0x16ce: 0x00c0, 0x16cf: 0x00c0, 0x16d0: 0x00c0, 0x16d1: 0x00c0, + 0x16d2: 0x00c0, 0x16d3: 0x00c0, 0x16d4: 0x00c0, 0x16d5: 0x00c0, 0x16d6: 0x00c0, 0x16d7: 0x00c0, + 0x16d8: 0x00c0, 0x16d9: 0x00c0, 0x16da: 0x00c0, 0x16db: 0x00c0, 0x16dc: 0x00c0, 0x16dd: 0x00c0, + 0x16de: 0x00c0, 0x16df: 0x00c0, 0x16e0: 0x00c0, 0x16e1: 0x00c0, 0x16e2: 0x00c0, 0x16e3: 0x00c0, + 0x16e4: 0x00c0, 0x16e5: 0x00c0, 0x16e6: 0x00c0, 0x16e7: 0x00c0, 0x16e8: 0x00c0, 0x16e9: 0x00c0, + 0x16ea: 0x00c0, 0x16eb: 0x00c0, 0x16ec: 0x00c0, 0x16ed: 0x00c0, 0x16ee: 0x00c0, 0x16ef: 0x00c0, + 0x16f0: 0x00c0, 0x16f1: 0x00c0, 0x16f2: 0x00c0, 0x16f3: 0x00c0, 0x16f4: 0x00c3, 0x16f5: 0x00c0, + 0x16f6: 0x00c3, 0x16f7: 0x00c3, 0x16f8: 0x00c3, 0x16f9: 0x00c3, 0x16fa: 0x00c3, 0x16fb: 0x00c0, + 0x16fc: 0x00c3, 0x16fd: 0x00c0, 0x16fe: 0x00c0, 0x16ff: 0x00c0, + // Block 0x5c, offset 0x1700 + 0x1700: 0x00c0, 0x1701: 0x00c0, 0x1702: 0x00c3, 0x1703: 0x00c0, 0x1704: 0x00c5, 0x1705: 0x00c0, + 0x1706: 0x00c0, 0x1707: 0x00c0, 0x1708: 0x00c0, 0x1709: 0x00c0, 0x170a: 0x00c0, 0x170b: 0x00c0, + 0x1710: 0x00c0, 0x1711: 0x00c0, + 0x1712: 0x00c0, 0x1713: 0x00c0, 0x1714: 0x00c0, 0x1715: 0x00c0, 0x1716: 0x00c0, 0x1717: 0x00c0, + 0x1718: 0x00c0, 0x1719: 0x00c0, 0x171a: 0x0080, 0x171b: 0x0080, 0x171c: 0x0080, 0x171d: 0x0080, + 0x171e: 0x0080, 0x171f: 0x0080, 0x1720: 0x0080, 0x1721: 0x0080, 0x1722: 0x0080, 0x1723: 0x0080, + 0x1724: 0x0080, 0x1725: 0x0080, 0x1726: 0x0080, 0x1727: 0x0080, 0x1728: 0x0080, 0x1729: 0x0080, + 0x172a: 0x0080, 0x172b: 0x00c3, 0x172c: 0x00c3, 0x172d: 0x00c3, 0x172e: 0x00c3, 0x172f: 0x00c3, + 0x1730: 0x00c3, 0x1731: 0x00c3, 0x1732: 0x00c3, 0x1733: 0x00c3, 0x1734: 0x0080, 0x1735: 0x0080, + 0x1736: 0x0080, 0x1737: 0x0080, 0x1738: 0x0080, 0x1739: 0x0080, 0x173a: 0x0080, 0x173b: 0x0080, + 0x173c: 0x0080, + // Block 0x5d, offset 0x1740 + 0x1740: 0x00c3, 0x1741: 0x00c3, 0x1742: 0x00c0, 0x1743: 0x00c0, 0x1744: 0x00c0, 0x1745: 0x00c0, + 0x1746: 0x00c0, 0x1747: 0x00c0, 0x1748: 0x00c0, 0x1749: 0x00c0, 0x174a: 0x00c0, 0x174b: 0x00c0, + 0x174c: 0x00c0, 0x174d: 0x00c0, 0x174e: 0x00c0, 0x174f: 0x00c0, 0x1750: 0x00c0, 0x1751: 0x00c0, + 0x1752: 0x00c0, 0x1753: 0x00c0, 0x1754: 0x00c0, 0x1755: 0x00c0, 0x1756: 0x00c0, 0x1757: 0x00c0, + 0x1758: 0x00c0, 0x1759: 0x00c0, 0x175a: 0x00c0, 0x175b: 0x00c0, 0x175c: 0x00c0, 0x175d: 0x00c0, + 0x175e: 0x00c0, 0x175f: 0x00c0, 0x1760: 0x00c0, 0x1761: 0x00c0, 0x1762: 0x00c3, 0x1763: 0x00c3, + 0x1764: 0x00c3, 0x1765: 0x00c3, 0x1766: 0x00c0, 0x1767: 0x00c0, 0x1768: 0x00c3, 0x1769: 0x00c3, + 0x176a: 0x00c5, 0x176b: 0x00c6, 0x176c: 0x00c3, 0x176d: 0x00c3, 0x176e: 0x00c0, 0x176f: 0x00c0, + 0x1770: 0x00c0, 0x1771: 0x00c0, 0x1772: 0x00c0, 0x1773: 0x00c0, 0x1774: 0x00c0, 0x1775: 0x00c0, + 0x1776: 0x00c0, 0x1777: 0x00c0, 0x1778: 0x00c0, 0x1779: 0x00c0, 0x177a: 0x00c0, 0x177b: 0x00c0, + 0x177c: 0x00c0, 0x177d: 0x00c0, 0x177e: 0x00c0, 0x177f: 0x00c0, + // Block 0x5e, offset 0x1780 + 0x1780: 0x00c0, 0x1781: 0x00c0, 0x1782: 0x00c0, 0x1783: 0x00c0, 0x1784: 0x00c0, 0x1785: 0x00c0, + 0x1786: 0x00c0, 0x1787: 0x00c0, 0x1788: 0x00c0, 0x1789: 0x00c0, 0x178a: 0x00c0, 0x178b: 0x00c0, + 0x178c: 0x00c0, 0x178d: 0x00c0, 0x178e: 0x00c0, 0x178f: 0x00c0, 0x1790: 0x00c0, 0x1791: 0x00c0, + 0x1792: 0x00c0, 0x1793: 0x00c0, 0x1794: 0x00c0, 0x1795: 0x00c0, 0x1796: 0x00c0, 0x1797: 0x00c0, + 0x1798: 0x00c0, 0x1799: 0x00c0, 0x179a: 0x00c0, 0x179b: 0x00c0, 0x179c: 0x00c0, 0x179d: 0x00c0, + 0x179e: 0x00c0, 0x179f: 0x00c0, 0x17a0: 0x00c0, 0x17a1: 0x00c0, 0x17a2: 0x00c0, 0x17a3: 0x00c0, + 0x17a4: 0x00c0, 0x17a5: 0x00c0, 0x17a6: 0x00c3, 0x17a7: 0x00c0, 0x17a8: 0x00c3, 0x17a9: 0x00c3, + 0x17aa: 0x00c0, 0x17ab: 0x00c0, 0x17ac: 0x00c0, 0x17ad: 0x00c3, 0x17ae: 0x00c0, 0x17af: 0x00c3, + 0x17b0: 0x00c3, 0x17b1: 0x00c3, 0x17b2: 0x00c5, 0x17b3: 0x00c5, + 0x17bc: 0x0080, 0x17bd: 0x0080, 0x17be: 0x0080, 0x17bf: 0x0080, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x00c0, 0x17c1: 0x00c0, 0x17c2: 0x00c0, 0x17c3: 0x00c0, 0x17c4: 0x00c0, 0x17c5: 0x00c0, + 0x17c6: 0x00c0, 0x17c7: 0x00c0, 0x17c8: 0x00c0, 0x17c9: 0x00c0, 0x17ca: 0x00c0, 0x17cb: 0x00c0, + 0x17cc: 0x00c0, 0x17cd: 0x00c0, 0x17ce: 0x00c0, 0x17cf: 0x00c0, 0x17d0: 0x00c0, 0x17d1: 0x00c0, + 0x17d2: 0x00c0, 0x17d3: 0x00c0, 0x17d4: 0x00c0, 0x17d5: 0x00c0, 0x17d6: 0x00c0, 0x17d7: 0x00c0, + 0x17d8: 0x00c0, 0x17d9: 0x00c0, 0x17da: 0x00c0, 0x17db: 0x00c0, 0x17dc: 0x00c0, 0x17dd: 0x00c0, + 0x17de: 0x00c0, 0x17df: 0x00c0, 0x17e0: 0x00c0, 0x17e1: 0x00c0, 0x17e2: 0x00c0, 0x17e3: 0x00c0, + 0x17e4: 0x00c0, 0x17e5: 0x00c0, 0x17e6: 0x00c0, 0x17e7: 0x00c0, 0x17e8: 0x00c0, 0x17e9: 0x00c0, + 0x17ea: 0x00c0, 0x17eb: 0x00c0, 0x17ec: 0x00c3, 0x17ed: 0x00c3, 0x17ee: 0x00c3, 0x17ef: 0x00c3, + 0x17f0: 0x00c3, 0x17f1: 0x00c3, 0x17f2: 0x00c3, 0x17f3: 0x00c3, 0x17f4: 0x00c0, 0x17f5: 0x00c0, + 0x17f6: 0x00c3, 0x17f7: 0x00c3, 0x17fb: 0x0080, + 0x17fc: 0x0080, 0x17fd: 0x0080, 0x17fe: 0x0080, 0x17ff: 0x0080, + // Block 0x60, offset 0x1800 + 0x1800: 0x00c0, 0x1801: 0x00c0, 0x1802: 0x00c0, 0x1803: 0x00c0, 0x1804: 0x00c0, 0x1805: 0x00c0, + 0x1806: 0x00c0, 0x1807: 0x00c0, 0x1808: 0x00c0, 0x1809: 0x00c0, + 0x180d: 0x00c0, 0x180e: 0x00c0, 0x180f: 0x00c0, 0x1810: 0x00c0, 0x1811: 0x00c0, + 0x1812: 0x00c0, 0x1813: 0x00c0, 0x1814: 0x00c0, 0x1815: 0x00c0, 0x1816: 0x00c0, 0x1817: 0x00c0, + 0x1818: 0x00c0, 0x1819: 0x00c0, 0x181a: 0x00c0, 0x181b: 0x00c0, 0x181c: 0x00c0, 0x181d: 0x00c0, + 0x181e: 0x00c0, 0x181f: 0x00c0, 0x1820: 0x00c0, 0x1821: 0x00c0, 0x1822: 0x00c0, 0x1823: 0x00c0, + 0x1824: 0x00c0, 0x1825: 0x00c0, 0x1826: 0x00c0, 0x1827: 0x00c0, 0x1828: 0x00c0, 0x1829: 0x00c0, + 0x182a: 0x00c0, 0x182b: 0x00c0, 0x182c: 0x00c0, 0x182d: 0x00c0, 0x182e: 0x00c0, 0x182f: 0x00c0, + 0x1830: 0x00c0, 0x1831: 0x00c0, 0x1832: 0x00c0, 0x1833: 0x00c0, 0x1834: 0x00c0, 0x1835: 0x00c0, + 0x1836: 0x00c0, 0x1837: 0x00c0, 0x1838: 0x00c0, 0x1839: 0x00c0, 0x183a: 0x00c0, 0x183b: 0x00c0, + 0x183c: 0x00c0, 0x183d: 0x00c0, 0x183e: 0x0080, 0x183f: 0x0080, + // Block 0x61, offset 0x1840 + 0x1840: 0x00c0, 0x1841: 0x00c0, 0x1842: 0x00c0, 0x1843: 0x00c0, 0x1844: 0x00c0, 0x1845: 0x00c0, + 0x1846: 0x00c0, 0x1847: 0x00c0, 0x1848: 0x00c0, + // Block 0x62, offset 0x1880 + 0x1880: 0x0080, 0x1881: 0x0080, 0x1882: 0x0080, 0x1883: 0x0080, 0x1884: 0x0080, 0x1885: 0x0080, + 0x1886: 0x0080, 0x1887: 0x0080, + 0x1890: 0x00c3, 0x1891: 0x00c3, + 0x1892: 0x00c3, 0x1893: 0x0080, 0x1894: 0x00c3, 0x1895: 0x00c3, 0x1896: 0x00c3, 0x1897: 0x00c3, + 0x1898: 0x00c3, 0x1899: 0x00c3, 0x189a: 0x00c3, 0x189b: 0x00c3, 0x189c: 0x00c3, 0x189d: 0x00c3, + 0x189e: 0x00c3, 0x189f: 0x00c3, 0x18a0: 0x00c3, 0x18a1: 0x00c0, 0x18a2: 0x00c3, 0x18a3: 0x00c3, + 0x18a4: 0x00c3, 0x18a5: 0x00c3, 0x18a6: 0x00c3, 0x18a7: 0x00c3, 0x18a8: 0x00c3, 0x18a9: 0x00c0, + 0x18aa: 0x00c0, 0x18ab: 0x00c0, 0x18ac: 0x00c0, 0x18ad: 0x00c3, 0x18ae: 0x00c0, 0x18af: 0x00c0, + 0x18b0: 0x00c0, 0x18b1: 0x00c0, 0x18b2: 0x00c0, 0x18b3: 0x00c0, 0x18b4: 0x00c3, 0x18b5: 0x00c0, + 0x18b6: 0x00c0, 0x18b8: 0x00c3, 0x18b9: 0x00c3, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x00c0, 0x18c1: 0x00c0, 0x18c2: 0x00c0, 0x18c3: 0x00c0, 0x18c4: 0x00c0, 0x18c5: 0x00c0, + 0x18c6: 0x00c0, 0x18c7: 0x00c0, 0x18c8: 0x00c0, 0x18c9: 0x00c0, 0x18ca: 0x00c0, 0x18cb: 0x00c0, + 0x18cc: 0x00c0, 0x18cd: 0x00c0, 0x18ce: 0x00c0, 0x18cf: 0x00c0, 0x18d0: 0x00c0, 0x18d1: 0x00c0, + 0x18d2: 0x00c0, 0x18d3: 0x00c0, 0x18d4: 0x00c0, 0x18d5: 0x00c0, 0x18d6: 0x00c0, 0x18d7: 0x00c0, + 0x18d8: 0x00c0, 0x18d9: 0x00c0, 0x18da: 0x00c0, 0x18db: 0x00c0, 0x18dc: 0x00c0, 0x18dd: 0x00c0, + 0x18de: 0x00c0, 0x18df: 0x00c0, 0x18e0: 0x00c0, 0x18e1: 0x00c0, 0x18e2: 0x00c0, 0x18e3: 0x00c0, + 0x18e4: 0x00c0, 0x18e5: 0x00c0, 0x18e6: 0x00c8, 0x18e7: 0x00c8, 0x18e8: 0x00c8, 0x18e9: 0x00c8, + 0x18ea: 0x00c8, 0x18eb: 0x00c0, 0x18ec: 0x0080, 0x18ed: 0x0080, 0x18ee: 0x0080, 0x18ef: 0x00c0, + 0x18f0: 0x0080, 0x18f1: 0x0080, 0x18f2: 0x0080, 0x18f3: 0x0080, 0x18f4: 0x0080, 0x18f5: 0x0080, + 0x18f6: 0x0080, 0x18f7: 0x0080, 0x18f8: 0x0080, 0x18f9: 0x0080, 0x18fa: 0x0080, 0x18fb: 0x00c0, + 0x18fc: 0x0080, 0x18fd: 0x0080, 0x18fe: 0x0080, 0x18ff: 0x0080, + // Block 0x64, offset 0x1900 + 0x1900: 0x0080, 0x1901: 0x0080, 0x1902: 0x0080, 0x1903: 0x0080, 0x1904: 0x0080, 0x1905: 0x0080, + 0x1906: 0x0080, 0x1907: 0x0080, 0x1908: 0x0080, 0x1909: 0x0080, 0x190a: 0x0080, 0x190b: 0x0080, + 0x190c: 0x0080, 0x190d: 0x0080, 0x190e: 0x00c0, 0x190f: 0x0080, 0x1910: 0x0080, 0x1911: 0x0080, + 0x1912: 0x0080, 0x1913: 0x0080, 0x1914: 0x0080, 0x1915: 0x0080, 0x1916: 0x0080, 0x1917: 0x0080, + 0x1918: 0x0080, 0x1919: 0x0080, 0x191a: 0x0080, 0x191b: 0x0080, 0x191c: 0x0080, 0x191d: 0x0088, + 0x191e: 0x0088, 0x191f: 0x0088, 0x1920: 0x0088, 0x1921: 0x0088, 0x1922: 0x0080, 0x1923: 0x0080, + 0x1924: 0x0080, 0x1925: 0x0080, 0x1926: 0x0088, 0x1927: 0x0088, 0x1928: 0x0088, 0x1929: 0x0088, + 0x192a: 0x0088, 0x192b: 0x00c0, 0x192c: 0x00c0, 0x192d: 0x00c0, 0x192e: 0x00c0, 0x192f: 0x00c0, + 0x1930: 0x00c0, 0x1931: 0x00c0, 0x1932: 0x00c0, 0x1933: 0x00c0, 0x1934: 0x00c0, 0x1935: 0x00c0, + 0x1936: 0x00c0, 0x1937: 0x00c0, 0x1938: 0x0080, 0x1939: 0x00c0, 0x193a: 0x00c0, 0x193b: 0x00c0, + 0x193c: 0x00c0, 0x193d: 0x00c0, 0x193e: 0x00c0, 0x193f: 0x00c0, + // Block 0x65, offset 0x1940 + 0x1940: 0x00c0, 0x1941: 0x00c0, 0x1942: 0x00c0, 0x1943: 0x00c0, 0x1944: 0x00c0, 0x1945: 0x00c0, + 0x1946: 0x00c0, 0x1947: 0x00c0, 0x1948: 0x00c0, 0x1949: 0x00c0, 0x194a: 0x00c0, 0x194b: 0x00c0, + 0x194c: 0x00c0, 0x194d: 0x00c0, 0x194e: 0x00c0, 0x194f: 0x00c0, 0x1950: 0x00c0, 0x1951: 0x00c0, + 0x1952: 0x00c0, 0x1953: 0x00c0, 0x1954: 0x00c0, 0x1955: 0x00c0, 0x1956: 0x00c0, 0x1957: 0x00c0, + 0x1958: 0x00c0, 0x1959: 0x00c0, 0x195a: 0x00c0, 0x195b: 0x0080, 0x195c: 0x0080, 0x195d: 0x0080, + 0x195e: 0x0080, 0x195f: 0x0080, 0x1960: 0x0080, 0x1961: 0x0080, 0x1962: 0x0080, 0x1963: 0x0080, + 0x1964: 0x0080, 0x1965: 0x0080, 0x1966: 0x0080, 0x1967: 0x0080, 0x1968: 0x0080, 0x1969: 0x0080, + 0x196a: 0x0080, 0x196b: 0x0080, 0x196c: 0x0080, 0x196d: 0x0080, 0x196e: 0x0080, 0x196f: 0x0080, + 0x1970: 0x0080, 0x1971: 0x0080, 0x1972: 0x0080, 0x1973: 0x0080, 0x1974: 0x0080, 0x1975: 0x0080, + 0x1976: 0x0080, 0x1977: 0x0080, 0x1978: 0x0080, 0x1979: 0x0080, 0x197a: 0x0080, 0x197b: 0x0080, + 0x197c: 0x0080, 0x197d: 0x0080, 0x197e: 0x0080, 0x197f: 0x0088, + // Block 0x66, offset 0x1980 + 0x1980: 0x00c3, 0x1981: 0x00c3, 0x1982: 0x00c3, 0x1983: 0x00c3, 0x1984: 0x00c3, 0x1985: 0x00c3, + 0x1986: 0x00c3, 0x1987: 0x00c3, 0x1988: 0x00c3, 0x1989: 0x00c3, 0x198a: 0x00c3, 0x198b: 0x00c3, + 0x198c: 0x00c3, 0x198d: 0x00c3, 0x198e: 0x00c3, 0x198f: 0x00c3, 0x1990: 0x00c3, 0x1991: 0x00c3, + 0x1992: 0x00c3, 0x1993: 0x00c3, 0x1994: 0x00c3, 0x1995: 0x00c3, 0x1996: 0x00c3, 0x1997: 0x00c3, + 0x1998: 0x00c3, 0x1999: 0x00c3, 0x199a: 0x00c3, 0x199b: 0x00c3, 0x199c: 0x00c3, 0x199d: 0x00c3, + 0x199e: 0x00c3, 0x199f: 0x00c3, 0x19a0: 0x00c3, 0x19a1: 0x00c3, 0x19a2: 0x00c3, 0x19a3: 0x00c3, + 0x19a4: 0x00c3, 0x19a5: 0x00c3, 0x19a6: 0x00c3, 0x19a7: 0x00c3, 0x19a8: 0x00c3, 0x19a9: 0x00c3, + 0x19aa: 0x00c3, 0x19ab: 0x00c3, 0x19ac: 0x00c3, 0x19ad: 0x00c3, 0x19ae: 0x00c3, 0x19af: 0x00c3, + 0x19b0: 0x00c3, 0x19b1: 0x00c3, 0x19b2: 0x00c3, 0x19b3: 0x00c3, 0x19b4: 0x00c3, 0x19b5: 0x00c3, + 0x19bb: 0x00c3, + 0x19bc: 0x00c3, 0x19bd: 0x00c3, 0x19be: 0x00c3, 0x19bf: 0x00c3, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x00c0, 0x19c1: 0x00c0, 0x19c2: 0x00c0, 0x19c3: 0x00c0, 0x19c4: 0x00c0, 0x19c5: 0x00c0, + 0x19c6: 0x00c0, 0x19c7: 0x00c0, 0x19c8: 0x00c0, 0x19c9: 0x00c0, 0x19ca: 0x00c0, 0x19cb: 0x00c0, + 0x19cc: 0x00c0, 0x19cd: 0x00c0, 0x19ce: 0x00c0, 0x19cf: 0x00c0, 0x19d0: 0x00c0, 0x19d1: 0x00c0, + 0x19d2: 0x00c0, 0x19d3: 0x00c0, 0x19d4: 0x00c0, 0x19d5: 0x00c0, 0x19d6: 0x00c0, 0x19d7: 0x00c0, + 0x19d8: 0x00c0, 0x19d9: 0x00c0, 0x19da: 0x0080, 0x19db: 0x0080, 0x19dc: 0x00c0, 0x19dd: 0x00c0, + 0x19de: 0x00c0, 0x19df: 0x00c0, 0x19e0: 0x00c0, 0x19e1: 0x00c0, 0x19e2: 0x00c0, 0x19e3: 0x00c0, + 0x19e4: 0x00c0, 0x19e5: 0x00c0, 0x19e6: 0x00c0, 0x19e7: 0x00c0, 0x19e8: 0x00c0, 0x19e9: 0x00c0, + 0x19ea: 0x00c0, 0x19eb: 0x00c0, 0x19ec: 0x00c0, 0x19ed: 0x00c0, 0x19ee: 0x00c0, 0x19ef: 0x00c0, + 0x19f0: 0x00c0, 0x19f1: 0x00c0, 0x19f2: 0x00c0, 0x19f3: 0x00c0, 0x19f4: 0x00c0, 0x19f5: 0x00c0, + 0x19f6: 0x00c0, 0x19f7: 0x00c0, 0x19f8: 0x00c0, 0x19f9: 0x00c0, 0x19fa: 0x00c0, 0x19fb: 0x00c0, + 0x19fc: 0x00c0, 0x19fd: 0x00c0, 0x19fe: 0x00c0, 0x19ff: 0x00c0, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x00c8, 0x1a01: 0x00c8, 0x1a02: 0x00c8, 0x1a03: 0x00c8, 0x1a04: 0x00c8, 0x1a05: 0x00c8, + 0x1a06: 0x00c8, 0x1a07: 0x00c8, 0x1a08: 0x00c8, 0x1a09: 0x00c8, 0x1a0a: 0x00c8, 0x1a0b: 0x00c8, + 0x1a0c: 0x00c8, 0x1a0d: 0x00c8, 0x1a0e: 0x00c8, 0x1a0f: 0x00c8, 0x1a10: 0x00c8, 0x1a11: 0x00c8, + 0x1a12: 0x00c8, 0x1a13: 0x00c8, 0x1a14: 0x00c8, 0x1a15: 0x00c8, + 0x1a18: 0x00c8, 0x1a19: 0x00c8, 0x1a1a: 0x00c8, 0x1a1b: 0x00c8, 0x1a1c: 0x00c8, 0x1a1d: 0x00c8, + 0x1a20: 0x00c8, 0x1a21: 0x00c8, 0x1a22: 0x00c8, 0x1a23: 0x00c8, + 0x1a24: 0x00c8, 0x1a25: 0x00c8, 0x1a26: 0x00c8, 0x1a27: 0x00c8, 0x1a28: 0x00c8, 0x1a29: 0x00c8, + 0x1a2a: 0x00c8, 0x1a2b: 0x00c8, 0x1a2c: 0x00c8, 0x1a2d: 0x00c8, 0x1a2e: 0x00c8, 0x1a2f: 0x00c8, + 0x1a30: 0x00c8, 0x1a31: 0x00c8, 0x1a32: 0x00c8, 0x1a33: 0x00c8, 0x1a34: 0x00c8, 0x1a35: 0x00c8, + 0x1a36: 0x00c8, 0x1a37: 0x00c8, 0x1a38: 0x00c8, 0x1a39: 0x00c8, 0x1a3a: 0x00c8, 0x1a3b: 0x00c8, + 0x1a3c: 0x00c8, 0x1a3d: 0x00c8, 0x1a3e: 0x00c8, 0x1a3f: 0x00c8, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x00c8, 0x1a41: 0x00c8, 0x1a42: 0x00c8, 0x1a43: 0x00c8, 0x1a44: 0x00c8, 0x1a45: 0x00c8, + 0x1a48: 0x00c8, 0x1a49: 0x00c8, 0x1a4a: 0x00c8, 0x1a4b: 0x00c8, + 0x1a4c: 0x00c8, 0x1a4d: 0x00c8, 0x1a50: 0x00c8, 0x1a51: 0x00c8, + 0x1a52: 0x00c8, 0x1a53: 0x00c8, 0x1a54: 0x00c8, 0x1a55: 0x00c8, 0x1a56: 0x00c8, 0x1a57: 0x00c8, + 0x1a59: 0x00c8, 0x1a5b: 0x00c8, 0x1a5d: 0x00c8, + 0x1a5f: 0x00c8, 0x1a60: 0x00c8, 0x1a61: 0x00c8, 0x1a62: 0x00c8, 0x1a63: 0x00c8, + 0x1a64: 0x00c8, 0x1a65: 0x00c8, 0x1a66: 0x00c8, 0x1a67: 0x00c8, 0x1a68: 0x00c8, 0x1a69: 0x00c8, + 0x1a6a: 0x00c8, 0x1a6b: 0x00c8, 0x1a6c: 0x00c8, 0x1a6d: 0x00c8, 0x1a6e: 0x00c8, 0x1a6f: 0x00c8, + 0x1a70: 0x00c8, 0x1a71: 0x0088, 0x1a72: 0x00c8, 0x1a73: 0x0088, 0x1a74: 0x00c8, 0x1a75: 0x0088, + 0x1a76: 0x00c8, 0x1a77: 0x0088, 0x1a78: 0x00c8, 0x1a79: 0x0088, 0x1a7a: 0x00c8, 0x1a7b: 0x0088, + 0x1a7c: 0x00c8, 0x1a7d: 0x0088, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x00c8, 0x1a81: 0x00c8, 0x1a82: 0x00c8, 0x1a83: 0x00c8, 0x1a84: 0x00c8, 0x1a85: 0x00c8, + 0x1a86: 0x00c8, 0x1a87: 0x00c8, 0x1a88: 0x0088, 0x1a89: 0x0088, 0x1a8a: 0x0088, 0x1a8b: 0x0088, + 0x1a8c: 0x0088, 0x1a8d: 0x0088, 0x1a8e: 0x0088, 0x1a8f: 0x0088, 0x1a90: 0x00c8, 0x1a91: 0x00c8, + 0x1a92: 0x00c8, 0x1a93: 0x00c8, 0x1a94: 0x00c8, 0x1a95: 0x00c8, 0x1a96: 0x00c8, 0x1a97: 0x00c8, + 0x1a98: 0x0088, 0x1a99: 0x0088, 0x1a9a: 0x0088, 0x1a9b: 0x0088, 0x1a9c: 0x0088, 0x1a9d: 0x0088, + 0x1a9e: 0x0088, 0x1a9f: 0x0088, 0x1aa0: 0x00c8, 0x1aa1: 0x00c8, 0x1aa2: 0x00c8, 0x1aa3: 0x00c8, + 0x1aa4: 0x00c8, 0x1aa5: 0x00c8, 0x1aa6: 0x00c8, 0x1aa7: 0x00c8, 0x1aa8: 0x0088, 0x1aa9: 0x0088, + 0x1aaa: 0x0088, 0x1aab: 0x0088, 0x1aac: 0x0088, 0x1aad: 0x0088, 0x1aae: 0x0088, 0x1aaf: 0x0088, + 0x1ab0: 0x00c8, 0x1ab1: 0x00c8, 0x1ab2: 0x00c8, 0x1ab3: 0x00c8, 0x1ab4: 0x00c8, + 0x1ab6: 0x00c8, 0x1ab7: 0x00c8, 0x1ab8: 0x00c8, 0x1ab9: 0x00c8, 0x1aba: 0x00c8, 0x1abb: 0x0088, + 0x1abc: 0x0088, 0x1abd: 0x0088, 0x1abe: 0x0088, 0x1abf: 0x0088, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x0088, 0x1ac1: 0x0088, 0x1ac2: 0x00c8, 0x1ac3: 0x00c8, 0x1ac4: 0x00c8, + 0x1ac6: 0x00c8, 0x1ac7: 0x00c8, 0x1ac8: 0x00c8, 0x1ac9: 0x0088, 0x1aca: 0x00c8, 0x1acb: 0x0088, + 0x1acc: 0x0088, 0x1acd: 0x0088, 0x1ace: 0x0088, 0x1acf: 0x0088, 0x1ad0: 0x00c8, 0x1ad1: 0x00c8, + 0x1ad2: 0x00c8, 0x1ad3: 0x0088, 0x1ad6: 0x00c8, 0x1ad7: 0x00c8, + 0x1ad8: 0x00c8, 0x1ad9: 0x00c8, 0x1ada: 0x00c8, 0x1adb: 0x0088, 0x1add: 0x0088, + 0x1ade: 0x0088, 0x1adf: 0x0088, 0x1ae0: 0x00c8, 0x1ae1: 0x00c8, 0x1ae2: 0x00c8, 0x1ae3: 0x0088, + 0x1ae4: 0x00c8, 0x1ae5: 0x00c8, 0x1ae6: 0x00c8, 0x1ae7: 0x00c8, 0x1ae8: 0x00c8, 0x1ae9: 0x00c8, + 0x1aea: 0x00c8, 0x1aeb: 0x0088, 0x1aec: 0x00c8, 0x1aed: 0x0088, 0x1aee: 0x0088, 0x1aef: 0x0088, + 0x1af2: 0x00c8, 0x1af3: 0x00c8, 0x1af4: 0x00c8, + 0x1af6: 0x00c8, 0x1af7: 0x00c8, 0x1af8: 0x00c8, 0x1af9: 0x0088, 0x1afa: 0x00c8, 0x1afb: 0x0088, + 0x1afc: 0x0088, 0x1afd: 0x0088, 0x1afe: 0x0088, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x0080, 0x1b01: 0x0080, 0x1b02: 0x0080, 0x1b03: 0x0080, 0x1b04: 0x0080, 0x1b05: 0x0080, + 0x1b06: 0x0080, 0x1b07: 0x0080, 0x1b08: 0x0080, 0x1b09: 0x0080, 0x1b0a: 0x0080, 0x1b0b: 0x0040, + 0x1b0c: 0x004d, 0x1b0d: 0x004e, 0x1b0e: 0x0040, 0x1b0f: 0x0040, 0x1b10: 0x0080, 0x1b11: 0x0080, + 0x1b12: 0x0080, 0x1b13: 0x0080, 0x1b14: 0x0080, 0x1b15: 0x0080, 0x1b16: 0x0080, 0x1b17: 0x0080, + 0x1b18: 0x0080, 0x1b19: 0x0080, 0x1b1a: 0x0080, 0x1b1b: 0x0080, 0x1b1c: 0x0080, 0x1b1d: 0x0080, + 0x1b1e: 0x0080, 0x1b1f: 0x0080, 0x1b20: 0x0080, 0x1b21: 0x0080, 0x1b22: 0x0080, 0x1b23: 0x0080, + 0x1b24: 0x0080, 0x1b25: 0x0080, 0x1b26: 0x0080, 0x1b27: 0x0080, 0x1b28: 0x0040, 0x1b29: 0x0040, + 0x1b2a: 0x0040, 0x1b2b: 0x0040, 0x1b2c: 0x0040, 0x1b2d: 0x0040, 0x1b2e: 0x0040, 0x1b2f: 0x0080, + 0x1b30: 0x0080, 0x1b31: 0x0080, 0x1b32: 0x0080, 0x1b33: 0x0080, 0x1b34: 0x0080, 0x1b35: 0x0080, + 0x1b36: 0x0080, 0x1b37: 0x0080, 0x1b38: 0x0080, 0x1b39: 0x0080, 0x1b3a: 0x0080, 0x1b3b: 0x0080, + 0x1b3c: 0x0080, 0x1b3d: 0x0080, 0x1b3e: 0x0080, 0x1b3f: 0x0080, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x0080, 0x1b41: 0x0080, 0x1b42: 0x0080, 0x1b43: 0x0080, 0x1b44: 0x0080, 0x1b45: 0x0080, + 0x1b46: 0x0080, 0x1b47: 0x0080, 0x1b48: 0x0080, 0x1b49: 0x0080, 0x1b4a: 0x0080, 0x1b4b: 0x0080, + 0x1b4c: 0x0080, 0x1b4d: 0x0080, 0x1b4e: 0x0080, 0x1b4f: 0x0080, 0x1b50: 0x0080, 0x1b51: 0x0080, + 0x1b52: 0x0080, 0x1b53: 0x0080, 0x1b54: 0x0080, 0x1b55: 0x0080, 0x1b56: 0x0080, 0x1b57: 0x0080, + 0x1b58: 0x0080, 0x1b59: 0x0080, 0x1b5a: 0x0080, 0x1b5b: 0x0080, 0x1b5c: 0x0080, 0x1b5d: 0x0080, + 0x1b5e: 0x0080, 0x1b5f: 0x0080, 0x1b60: 0x0040, 0x1b61: 0x0040, 0x1b62: 0x0040, 0x1b63: 0x0040, + 0x1b64: 0x0040, 0x1b66: 0x0040, 0x1b67: 0x0040, 0x1b68: 0x0040, 0x1b69: 0x0040, + 0x1b6a: 0x0040, 0x1b6b: 0x0040, 0x1b6c: 0x0040, 0x1b6d: 0x0040, 0x1b6e: 0x0040, 0x1b6f: 0x0040, + 0x1b70: 0x0080, 0x1b71: 0x0080, 0x1b74: 0x0080, 0x1b75: 0x0080, + 0x1b76: 0x0080, 0x1b77: 0x0080, 0x1b78: 0x0080, 0x1b79: 0x0080, 0x1b7a: 0x0080, 0x1b7b: 0x0080, + 0x1b7c: 0x0080, 0x1b7d: 0x0080, 0x1b7e: 0x0080, 0x1b7f: 0x0080, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x0080, 0x1b81: 0x0080, 0x1b82: 0x0080, 0x1b83: 0x0080, 0x1b84: 0x0080, 0x1b85: 0x0080, + 0x1b86: 0x0080, 0x1b87: 0x0080, 0x1b88: 0x0080, 0x1b89: 0x0080, 0x1b8a: 0x0080, 0x1b8b: 0x0080, + 0x1b8c: 0x0080, 0x1b8d: 0x0080, 0x1b8e: 0x0080, 0x1b90: 0x0080, 0x1b91: 0x0080, + 0x1b92: 0x0080, 0x1b93: 0x0080, 0x1b94: 0x0080, 0x1b95: 0x0080, 0x1b96: 0x0080, 0x1b97: 0x0080, + 0x1b98: 0x0080, 0x1b99: 0x0080, 0x1b9a: 0x0080, 0x1b9b: 0x0080, 0x1b9c: 0x0080, + 0x1ba0: 0x0080, 0x1ba1: 0x0080, 0x1ba2: 0x0080, 0x1ba3: 0x0080, + 0x1ba4: 0x0080, 0x1ba5: 0x0080, 0x1ba6: 0x0080, 0x1ba7: 0x0080, 0x1ba8: 0x0080, 0x1ba9: 0x0080, + 0x1baa: 0x0080, 0x1bab: 0x0080, 0x1bac: 0x0080, 0x1bad: 0x0080, 0x1bae: 0x0080, 0x1baf: 0x0080, + 0x1bb0: 0x0080, 0x1bb1: 0x0080, 0x1bb2: 0x0080, 0x1bb3: 0x0080, 0x1bb4: 0x0080, 0x1bb5: 0x0080, + 0x1bb6: 0x0080, 0x1bb7: 0x0080, 0x1bb8: 0x0080, 0x1bb9: 0x0080, 0x1bba: 0x0080, 0x1bbb: 0x0080, + 0x1bbc: 0x0080, 0x1bbd: 0x0080, 0x1bbe: 0x0080, + // Block 0x6f, offset 0x1bc0 + 0x1bd0: 0x00c3, 0x1bd1: 0x00c3, + 0x1bd2: 0x00c3, 0x1bd3: 0x00c3, 0x1bd4: 0x00c3, 0x1bd5: 0x00c3, 0x1bd6: 0x00c3, 0x1bd7: 0x00c3, + 0x1bd8: 0x00c3, 0x1bd9: 0x00c3, 0x1bda: 0x00c3, 0x1bdb: 0x00c3, 0x1bdc: 0x00c3, 0x1bdd: 0x0083, + 0x1bde: 0x0083, 0x1bdf: 0x0083, 0x1be0: 0x0083, 0x1be1: 0x00c3, 0x1be2: 0x0083, 0x1be3: 0x0083, + 0x1be4: 0x0083, 0x1be5: 0x00c3, 0x1be6: 0x00c3, 0x1be7: 0x00c3, 0x1be8: 0x00c3, 0x1be9: 0x00c3, + 0x1bea: 0x00c3, 0x1beb: 0x00c3, 0x1bec: 0x00c3, 0x1bed: 0x00c3, 0x1bee: 0x00c3, 0x1bef: 0x00c3, + 0x1bf0: 0x00c3, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0080, 0x1c01: 0x0080, 0x1c02: 0x0080, 0x1c03: 0x0080, 0x1c04: 0x0080, 0x1c05: 0x0080, + 0x1c06: 0x0080, 0x1c07: 0x0080, 0x1c08: 0x0080, 0x1c09: 0x0080, 0x1c0a: 0x0080, 0x1c0b: 0x0080, + 0x1c0c: 0x0080, 0x1c0d: 0x0080, 0x1c0e: 0x0080, 0x1c0f: 0x0080, 0x1c10: 0x0080, 0x1c11: 0x0080, + 0x1c12: 0x0080, 0x1c13: 0x0080, 0x1c14: 0x0080, 0x1c15: 0x0080, 0x1c16: 0x0080, 0x1c17: 0x0080, + 0x1c18: 0x0080, 0x1c19: 0x0080, 0x1c1a: 0x0080, 0x1c1b: 0x0080, 0x1c1c: 0x0080, 0x1c1d: 0x0080, + 0x1c1e: 0x0080, 0x1c1f: 0x0080, 0x1c20: 0x0080, 0x1c21: 0x0080, 0x1c22: 0x0080, 0x1c23: 0x0080, + 0x1c24: 0x0080, 0x1c25: 0x0080, 0x1c26: 0x0088, 0x1c27: 0x0080, 0x1c28: 0x0080, 0x1c29: 0x0080, + 0x1c2a: 0x0080, 0x1c2b: 0x0080, 0x1c2c: 0x0080, 0x1c2d: 0x0080, 0x1c2e: 0x0080, 0x1c2f: 0x0080, + 0x1c30: 0x0080, 0x1c31: 0x0080, 0x1c32: 0x00c0, 0x1c33: 0x0080, 0x1c34: 0x0080, 0x1c35: 0x0080, + 0x1c36: 0x0080, 0x1c37: 0x0080, 0x1c38: 0x0080, 0x1c39: 0x0080, 0x1c3a: 0x0080, 0x1c3b: 0x0080, + 0x1c3c: 0x0080, 0x1c3d: 0x0080, 0x1c3e: 0x0080, 0x1c3f: 0x0080, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x0080, 0x1c41: 0x0080, 0x1c42: 0x0080, 0x1c43: 0x0080, 0x1c44: 0x0080, 0x1c45: 0x0080, + 0x1c46: 0x0080, 0x1c47: 0x0080, 0x1c48: 0x0080, 0x1c49: 0x0080, 0x1c4a: 0x0080, 0x1c4b: 0x0080, + 0x1c4c: 0x0080, 0x1c4d: 0x0080, 0x1c4e: 0x00c0, 0x1c4f: 0x0080, 0x1c50: 0x0080, 0x1c51: 0x0080, + 0x1c52: 0x0080, 0x1c53: 0x0080, 0x1c54: 0x0080, 0x1c55: 0x0080, 0x1c56: 0x0080, 0x1c57: 0x0080, + 0x1c58: 0x0080, 0x1c59: 0x0080, 0x1c5a: 0x0080, 0x1c5b: 0x0080, 0x1c5c: 0x0080, 0x1c5d: 0x0080, + 0x1c5e: 0x0080, 0x1c5f: 0x0080, 0x1c60: 0x0080, 0x1c61: 0x0080, 0x1c62: 0x0080, 0x1c63: 0x0080, + 0x1c64: 0x0080, 0x1c65: 0x0080, 0x1c66: 0x0080, 0x1c67: 0x0080, 0x1c68: 0x0080, 0x1c69: 0x0080, + 0x1c6a: 0x0080, 0x1c6b: 0x0080, 0x1c6c: 0x0080, 0x1c6d: 0x0080, 0x1c6e: 0x0080, 0x1c6f: 0x0080, + 0x1c70: 0x0080, 0x1c71: 0x0080, 0x1c72: 0x0080, 0x1c73: 0x0080, 0x1c74: 0x0080, 0x1c75: 0x0080, + 0x1c76: 0x0080, 0x1c77: 0x0080, 0x1c78: 0x0080, 0x1c79: 0x0080, 0x1c7a: 0x0080, 0x1c7b: 0x0080, + 0x1c7c: 0x0080, 0x1c7d: 0x0080, 0x1c7e: 0x0080, 0x1c7f: 0x0080, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0080, 0x1c81: 0x0080, 0x1c82: 0x0080, 0x1c83: 0x00c0, 0x1c84: 0x00c0, 0x1c85: 0x0080, + 0x1c86: 0x0080, 0x1c87: 0x0080, 0x1c88: 0x0080, 0x1c89: 0x0080, 0x1c8a: 0x0080, 0x1c8b: 0x0080, + 0x1c90: 0x0080, 0x1c91: 0x0080, + 0x1c92: 0x0080, 0x1c93: 0x0080, 0x1c94: 0x0080, 0x1c95: 0x0080, 0x1c96: 0x0080, 0x1c97: 0x0080, + 0x1c98: 0x0080, 0x1c99: 0x0080, 0x1c9a: 0x0080, 0x1c9b: 0x0080, 0x1c9c: 0x0080, 0x1c9d: 0x0080, + 0x1c9e: 0x0080, 0x1c9f: 0x0080, 0x1ca0: 0x0080, 0x1ca1: 0x0080, 0x1ca2: 0x0080, 0x1ca3: 0x0080, + 0x1ca4: 0x0080, 0x1ca5: 0x0080, 0x1ca6: 0x0080, 0x1ca7: 0x0080, 0x1ca8: 0x0080, 0x1ca9: 0x0080, + 0x1caa: 0x0080, 0x1cab: 0x0080, 0x1cac: 0x0080, 0x1cad: 0x0080, 0x1cae: 0x0080, 0x1caf: 0x0080, + 0x1cb0: 0x0080, 0x1cb1: 0x0080, 0x1cb2: 0x0080, 0x1cb3: 0x0080, 0x1cb4: 0x0080, 0x1cb5: 0x0080, + 0x1cb6: 0x0080, 0x1cb7: 0x0080, 0x1cb8: 0x0080, 0x1cb9: 0x0080, 0x1cba: 0x0080, 0x1cbb: 0x0080, + 0x1cbc: 0x0080, 0x1cbd: 0x0080, 0x1cbe: 0x0080, 0x1cbf: 0x0080, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0080, 0x1cc1: 0x0080, 0x1cc2: 0x0080, 0x1cc3: 0x0080, 0x1cc4: 0x0080, 0x1cc5: 0x0080, + 0x1cc6: 0x0080, 0x1cc7: 0x0080, 0x1cc8: 0x0080, 0x1cc9: 0x0080, 0x1cca: 0x0080, 0x1ccb: 0x0080, + 0x1ccc: 0x0080, 0x1ccd: 0x0080, 0x1cce: 0x0080, 0x1ccf: 0x0080, 0x1cd0: 0x0080, 0x1cd1: 0x0080, + 0x1cd2: 0x0080, 0x1cd3: 0x0080, 0x1cd4: 0x0080, 0x1cd5: 0x0080, 0x1cd6: 0x0080, 0x1cd7: 0x0080, + 0x1cd8: 0x0080, 0x1cd9: 0x0080, 0x1cda: 0x0080, 0x1cdb: 0x0080, 0x1cdc: 0x0080, 0x1cdd: 0x0080, + 0x1cde: 0x0080, 0x1cdf: 0x0080, 0x1ce0: 0x0080, 0x1ce1: 0x0080, 0x1ce2: 0x0080, 0x1ce3: 0x0080, + 0x1ce4: 0x0080, 0x1ce5: 0x0080, 0x1ce6: 0x0080, 0x1ce7: 0x0080, 0x1ce8: 0x0080, 0x1ce9: 0x0080, + 0x1cea: 0x0080, 0x1ceb: 0x0080, 0x1cec: 0x0080, 0x1ced: 0x0080, 0x1cee: 0x0080, 0x1cef: 0x0080, + 0x1cf0: 0x0080, 0x1cf1: 0x0080, 0x1cf2: 0x0080, 0x1cf3: 0x0080, 0x1cf4: 0x0080, 0x1cf5: 0x0080, + 0x1cf6: 0x0080, 0x1cf7: 0x0080, 0x1cf8: 0x0080, 0x1cf9: 0x0080, 0x1cfa: 0x0080, 0x1cfb: 0x0080, + 0x1cfc: 0x0080, 0x1cfd: 0x0080, 0x1cfe: 0x0080, 0x1cff: 0x0080, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0080, 0x1d01: 0x0080, 0x1d02: 0x0080, 0x1d03: 0x0080, 0x1d04: 0x0080, 0x1d05: 0x0080, + 0x1d06: 0x0080, 0x1d07: 0x0080, 0x1d08: 0x0080, 0x1d09: 0x0080, 0x1d0a: 0x0080, 0x1d0b: 0x0080, + 0x1d0c: 0x0080, 0x1d0d: 0x0080, 0x1d0e: 0x0080, 0x1d0f: 0x0080, 0x1d10: 0x0080, 0x1d11: 0x0080, + 0x1d12: 0x0080, 0x1d13: 0x0080, 0x1d14: 0x0080, 0x1d15: 0x0080, 0x1d16: 0x0080, 0x1d17: 0x0080, + 0x1d18: 0x0080, 0x1d19: 0x0080, 0x1d1a: 0x0080, 0x1d1b: 0x0080, 0x1d1c: 0x0080, 0x1d1d: 0x0080, + 0x1d1e: 0x0080, 0x1d1f: 0x0080, 0x1d20: 0x0080, 0x1d21: 0x0080, 0x1d22: 0x0080, 0x1d23: 0x0080, + 0x1d24: 0x0080, 0x1d25: 0x0080, 0x1d26: 0x0080, 0x1d27: 0x0080, 0x1d28: 0x0080, 0x1d29: 0x0080, + 0x1d2a: 0x0080, 0x1d2b: 0x0080, 0x1d2c: 0x0080, 0x1d2d: 0x0080, 0x1d2e: 0x0080, 0x1d2f: 0x0080, + 0x1d30: 0x0080, 0x1d31: 0x0080, 0x1d32: 0x0080, 0x1d33: 0x0080, 0x1d34: 0x0080, 0x1d35: 0x0080, + 0x1d36: 0x0080, 0x1d37: 0x0080, 0x1d38: 0x0080, 0x1d39: 0x0080, 0x1d3a: 0x0080, 0x1d3b: 0x0080, + 0x1d3c: 0x0080, 0x1d3d: 0x0080, 0x1d3e: 0x0080, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0080, 0x1d41: 0x0080, 0x1d42: 0x0080, 0x1d43: 0x0080, 0x1d44: 0x0080, 0x1d45: 0x0080, + 0x1d46: 0x0080, 0x1d47: 0x0080, 0x1d48: 0x0080, 0x1d49: 0x0080, 0x1d4a: 0x0080, 0x1d4b: 0x0080, + 0x1d4c: 0x0080, 0x1d4d: 0x0080, 0x1d4e: 0x0080, 0x1d4f: 0x0080, 0x1d50: 0x0080, 0x1d51: 0x0080, + 0x1d52: 0x0080, 0x1d53: 0x0080, 0x1d54: 0x0080, 0x1d55: 0x0080, 0x1d56: 0x0080, 0x1d57: 0x0080, + 0x1d58: 0x0080, 0x1d59: 0x0080, 0x1d5a: 0x0080, 0x1d5b: 0x0080, 0x1d5c: 0x0080, 0x1d5d: 0x0080, + 0x1d5e: 0x0080, 0x1d5f: 0x0080, 0x1d60: 0x0080, 0x1d61: 0x0080, 0x1d62: 0x0080, 0x1d63: 0x0080, + 0x1d64: 0x0080, 0x1d65: 0x0080, 0x1d66: 0x0080, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x0080, 0x1d81: 0x0080, 0x1d82: 0x0080, 0x1d83: 0x0080, 0x1d84: 0x0080, 0x1d85: 0x0080, + 0x1d86: 0x0080, 0x1d87: 0x0080, 0x1d88: 0x0080, 0x1d89: 0x0080, 0x1d8a: 0x0080, + 0x1da0: 0x0080, 0x1da1: 0x0080, 0x1da2: 0x0080, 0x1da3: 0x0080, + 0x1da4: 0x0080, 0x1da5: 0x0080, 0x1da6: 0x0080, 0x1da7: 0x0080, 0x1da8: 0x0080, 0x1da9: 0x0080, + 0x1daa: 0x0080, 0x1dab: 0x0080, 0x1dac: 0x0080, 0x1dad: 0x0080, 0x1dae: 0x0080, 0x1daf: 0x0080, + 0x1db0: 0x0080, 0x1db1: 0x0080, 0x1db2: 0x0080, 0x1db3: 0x0080, 0x1db4: 0x0080, 0x1db5: 0x0080, + 0x1db6: 0x0080, 0x1db7: 0x0080, 0x1db8: 0x0080, 0x1db9: 0x0080, 0x1dba: 0x0080, 0x1dbb: 0x0080, + 0x1dbc: 0x0080, 0x1dbd: 0x0080, 0x1dbe: 0x0080, 0x1dbf: 0x0080, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0x0080, 0x1dc1: 0x0080, 0x1dc2: 0x0080, 0x1dc3: 0x0080, 0x1dc4: 0x0080, 0x1dc5: 0x0080, + 0x1dc6: 0x0080, 0x1dc7: 0x0080, 0x1dc8: 0x0080, 0x1dc9: 0x0080, 0x1dca: 0x0080, 0x1dcb: 0x0080, + 0x1dcc: 0x0080, 0x1dcd: 0x0080, 0x1dce: 0x0080, 0x1dcf: 0x0080, 0x1dd0: 0x0080, 0x1dd1: 0x0080, + 0x1dd2: 0x0080, 0x1dd3: 0x0080, 0x1dd4: 0x0080, 0x1dd5: 0x0080, 0x1dd6: 0x0080, 0x1dd7: 0x0080, + 0x1dd8: 0x0080, 0x1dd9: 0x0080, 0x1dda: 0x0080, 0x1ddb: 0x0080, 0x1ddc: 0x0080, 0x1ddd: 0x0080, + 0x1dde: 0x0080, 0x1ddf: 0x0080, 0x1de0: 0x0080, 0x1de1: 0x0080, 0x1de2: 0x0080, 0x1de3: 0x0080, + 0x1de4: 0x0080, 0x1de5: 0x0080, 0x1de6: 0x0080, 0x1de7: 0x0080, 0x1de8: 0x0080, 0x1de9: 0x0080, + 0x1dea: 0x0080, 0x1deb: 0x0080, 0x1dec: 0x0080, 0x1ded: 0x0080, 0x1dee: 0x0080, 0x1def: 0x0080, + 0x1df0: 0x0080, 0x1df1: 0x0080, 0x1df2: 0x0080, 0x1df3: 0x0080, + 0x1df6: 0x0080, 0x1df7: 0x0080, 0x1df8: 0x0080, 0x1df9: 0x0080, 0x1dfa: 0x0080, 0x1dfb: 0x0080, + 0x1dfc: 0x0080, 0x1dfd: 0x0080, 0x1dfe: 0x0080, 0x1dff: 0x0080, + // Block 0x78, offset 0x1e00 + 0x1e00: 0x0080, 0x1e01: 0x0080, 0x1e02: 0x0080, 0x1e03: 0x0080, 0x1e04: 0x0080, 0x1e05: 0x0080, + 0x1e06: 0x0080, 0x1e07: 0x0080, 0x1e08: 0x0080, 0x1e09: 0x0080, 0x1e0a: 0x0080, 0x1e0b: 0x0080, + 0x1e0c: 0x0080, 0x1e0d: 0x0080, 0x1e0e: 0x0080, 0x1e0f: 0x0080, 0x1e10: 0x0080, 0x1e11: 0x0080, + 0x1e12: 0x0080, 0x1e13: 0x0080, 0x1e14: 0x0080, 0x1e15: 0x0080, + 0x1e18: 0x0080, 0x1e19: 0x0080, 0x1e1a: 0x0080, 0x1e1b: 0x0080, 0x1e1c: 0x0080, 0x1e1d: 0x0080, + 0x1e1e: 0x0080, 0x1e1f: 0x0080, 0x1e20: 0x0080, 0x1e21: 0x0080, 0x1e22: 0x0080, 0x1e23: 0x0080, + 0x1e24: 0x0080, 0x1e25: 0x0080, 0x1e26: 0x0080, 0x1e27: 0x0080, 0x1e28: 0x0080, 0x1e29: 0x0080, + 0x1e2a: 0x0080, 0x1e2b: 0x0080, 0x1e2c: 0x0080, 0x1e2d: 0x0080, 0x1e2e: 0x0080, 0x1e2f: 0x0080, + 0x1e30: 0x0080, 0x1e31: 0x0080, 0x1e32: 0x0080, 0x1e33: 0x0080, 0x1e34: 0x0080, 0x1e35: 0x0080, + 0x1e36: 0x0080, 0x1e37: 0x0080, 0x1e38: 0x0080, 0x1e39: 0x0080, + 0x1e3d: 0x0080, 0x1e3e: 0x0080, 0x1e3f: 0x0080, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x0080, 0x1e41: 0x0080, 0x1e42: 0x0080, 0x1e43: 0x0080, 0x1e44: 0x0080, 0x1e45: 0x0080, + 0x1e46: 0x0080, 0x1e47: 0x0080, 0x1e48: 0x0080, 0x1e4a: 0x0080, 0x1e4b: 0x0080, + 0x1e4c: 0x0080, 0x1e4d: 0x0080, 0x1e4e: 0x0080, 0x1e4f: 0x0080, 0x1e50: 0x0080, 0x1e51: 0x0080, + 0x1e6c: 0x0080, 0x1e6d: 0x0080, 0x1e6e: 0x0080, 0x1e6f: 0x0080, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x00c0, 0x1e81: 0x00c0, 0x1e82: 0x00c0, 0x1e83: 0x00c0, 0x1e84: 0x00c0, 0x1e85: 0x00c0, + 0x1e86: 0x00c0, 0x1e87: 0x00c0, 0x1e88: 0x00c0, 0x1e89: 0x00c0, 0x1e8a: 0x00c0, 0x1e8b: 0x00c0, + 0x1e8c: 0x00c0, 0x1e8d: 0x00c0, 0x1e8e: 0x00c0, 0x1e8f: 0x00c0, 0x1e90: 0x00c0, 0x1e91: 0x00c0, + 0x1e92: 0x00c0, 0x1e93: 0x00c0, 0x1e94: 0x00c0, 0x1e95: 0x00c0, 0x1e96: 0x00c0, 0x1e97: 0x00c0, + 0x1e98: 0x00c0, 0x1e99: 0x00c0, 0x1e9a: 0x00c0, 0x1e9b: 0x00c0, 0x1e9c: 0x00c0, 0x1e9d: 0x00c0, + 0x1e9e: 0x00c0, 0x1e9f: 0x00c0, 0x1ea0: 0x00c0, 0x1ea1: 0x00c0, 0x1ea2: 0x00c0, 0x1ea3: 0x00c0, + 0x1ea4: 0x00c0, 0x1ea5: 0x00c0, 0x1ea6: 0x00c0, 0x1ea7: 0x00c0, 0x1ea8: 0x00c0, 0x1ea9: 0x00c0, + 0x1eaa: 0x00c0, 0x1eab: 0x00c0, 0x1eac: 0x00c0, 0x1ead: 0x00c0, 0x1eae: 0x00c0, + 0x1eb0: 0x00c0, 0x1eb1: 0x00c0, 0x1eb2: 0x00c0, 0x1eb3: 0x00c0, 0x1eb4: 0x00c0, 0x1eb5: 0x00c0, + 0x1eb6: 0x00c0, 0x1eb7: 0x00c0, 0x1eb8: 0x00c0, 0x1eb9: 0x00c0, 0x1eba: 0x00c0, 0x1ebb: 0x00c0, + 0x1ebc: 0x00c0, 0x1ebd: 0x00c0, 0x1ebe: 0x00c0, 0x1ebf: 0x00c0, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x00c0, 0x1ec1: 0x00c0, 0x1ec2: 0x00c0, 0x1ec3: 0x00c0, 0x1ec4: 0x00c0, 0x1ec5: 0x00c0, + 0x1ec6: 0x00c0, 0x1ec7: 0x00c0, 0x1ec8: 0x00c0, 0x1ec9: 0x00c0, 0x1eca: 0x00c0, 0x1ecb: 0x00c0, + 0x1ecc: 0x00c0, 0x1ecd: 0x00c0, 0x1ece: 0x00c0, 0x1ecf: 0x00c0, 0x1ed0: 0x00c0, 0x1ed1: 0x00c0, + 0x1ed2: 0x00c0, 0x1ed3: 0x00c0, 0x1ed4: 0x00c0, 0x1ed5: 0x00c0, 0x1ed6: 0x00c0, 0x1ed7: 0x00c0, + 0x1ed8: 0x00c0, 0x1ed9: 0x00c0, 0x1eda: 0x00c0, 0x1edb: 0x00c0, 0x1edc: 0x00c0, 0x1edd: 0x00c0, + 0x1ede: 0x00c0, 0x1ee0: 0x00c0, 0x1ee1: 0x00c0, 0x1ee2: 0x00c0, 0x1ee3: 0x00c0, + 0x1ee4: 0x00c0, 0x1ee5: 0x00c0, 0x1ee6: 0x00c0, 0x1ee7: 0x00c0, 0x1ee8: 0x00c0, 0x1ee9: 0x00c0, + 0x1eea: 0x00c0, 0x1eeb: 0x00c0, 0x1eec: 0x00c0, 0x1eed: 0x00c0, 0x1eee: 0x00c0, 0x1eef: 0x00c0, + 0x1ef0: 0x00c0, 0x1ef1: 0x00c0, 0x1ef2: 0x00c0, 0x1ef3: 0x00c0, 0x1ef4: 0x00c0, 0x1ef5: 0x00c0, + 0x1ef6: 0x00c0, 0x1ef7: 0x00c0, 0x1ef8: 0x00c0, 0x1ef9: 0x00c0, 0x1efa: 0x00c0, 0x1efb: 0x00c0, + 0x1efc: 0x0080, 0x1efd: 0x0080, 0x1efe: 0x00c0, 0x1eff: 0x00c0, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0x00c0, 0x1f01: 0x00c0, 0x1f02: 0x00c0, 0x1f03: 0x00c0, 0x1f04: 0x00c0, 0x1f05: 0x00c0, + 0x1f06: 0x00c0, 0x1f07: 0x00c0, 0x1f08: 0x00c0, 0x1f09: 0x00c0, 0x1f0a: 0x00c0, 0x1f0b: 0x00c0, + 0x1f0c: 0x00c0, 0x1f0d: 0x00c0, 0x1f0e: 0x00c0, 0x1f0f: 0x00c0, 0x1f10: 0x00c0, 0x1f11: 0x00c0, + 0x1f12: 0x00c0, 0x1f13: 0x00c0, 0x1f14: 0x00c0, 0x1f15: 0x00c0, 0x1f16: 0x00c0, 0x1f17: 0x00c0, + 0x1f18: 0x00c0, 0x1f19: 0x00c0, 0x1f1a: 0x00c0, 0x1f1b: 0x00c0, 0x1f1c: 0x00c0, 0x1f1d: 0x00c0, + 0x1f1e: 0x00c0, 0x1f1f: 0x00c0, 0x1f20: 0x00c0, 0x1f21: 0x00c0, 0x1f22: 0x00c0, 0x1f23: 0x00c0, + 0x1f24: 0x00c0, 0x1f25: 0x0080, 0x1f26: 0x0080, 0x1f27: 0x0080, 0x1f28: 0x0080, 0x1f29: 0x0080, + 0x1f2a: 0x0080, 0x1f2b: 0x00c0, 0x1f2c: 0x00c0, 0x1f2d: 0x00c0, 0x1f2e: 0x00c0, 0x1f2f: 0x00c3, + 0x1f30: 0x00c3, 0x1f31: 0x00c3, 0x1f32: 0x00c0, 0x1f33: 0x00c0, + 0x1f39: 0x0080, 0x1f3a: 0x0080, 0x1f3b: 0x0080, + 0x1f3c: 0x0080, 0x1f3d: 0x0080, 0x1f3e: 0x0080, 0x1f3f: 0x0080, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0x00c0, 0x1f41: 0x00c0, 0x1f42: 0x00c0, 0x1f43: 0x00c0, 0x1f44: 0x00c0, 0x1f45: 0x00c0, + 0x1f46: 0x00c0, 0x1f47: 0x00c0, 0x1f48: 0x00c0, 0x1f49: 0x00c0, 0x1f4a: 0x00c0, 0x1f4b: 0x00c0, + 0x1f4c: 0x00c0, 0x1f4d: 0x00c0, 0x1f4e: 0x00c0, 0x1f4f: 0x00c0, 0x1f50: 0x00c0, 0x1f51: 0x00c0, + 0x1f52: 0x00c0, 0x1f53: 0x00c0, 0x1f54: 0x00c0, 0x1f55: 0x00c0, 0x1f56: 0x00c0, 0x1f57: 0x00c0, + 0x1f58: 0x00c0, 0x1f59: 0x00c0, 0x1f5a: 0x00c0, 0x1f5b: 0x00c0, 0x1f5c: 0x00c0, 0x1f5d: 0x00c0, + 0x1f5e: 0x00c0, 0x1f5f: 0x00c0, 0x1f60: 0x00c0, 0x1f61: 0x00c0, 0x1f62: 0x00c0, 0x1f63: 0x00c0, + 0x1f64: 0x00c0, 0x1f65: 0x00c0, 0x1f67: 0x00c0, + 0x1f6d: 0x00c0, + 0x1f70: 0x00c0, 0x1f71: 0x00c0, 0x1f72: 0x00c0, 0x1f73: 0x00c0, 0x1f74: 0x00c0, 0x1f75: 0x00c0, + 0x1f76: 0x00c0, 0x1f77: 0x00c0, 0x1f78: 0x00c0, 0x1f79: 0x00c0, 0x1f7a: 0x00c0, 0x1f7b: 0x00c0, + 0x1f7c: 0x00c0, 0x1f7d: 0x00c0, 0x1f7e: 0x00c0, 0x1f7f: 0x00c0, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0x00c0, 0x1f81: 0x00c0, 0x1f82: 0x00c0, 0x1f83: 0x00c0, 0x1f84: 0x00c0, 0x1f85: 0x00c0, + 0x1f86: 0x00c0, 0x1f87: 0x00c0, 0x1f88: 0x00c0, 0x1f89: 0x00c0, 0x1f8a: 0x00c0, 0x1f8b: 0x00c0, + 0x1f8c: 0x00c0, 0x1f8d: 0x00c0, 0x1f8e: 0x00c0, 0x1f8f: 0x00c0, 0x1f90: 0x00c0, 0x1f91: 0x00c0, + 0x1f92: 0x00c0, 0x1f93: 0x00c0, 0x1f94: 0x00c0, 0x1f95: 0x00c0, 0x1f96: 0x00c0, 0x1f97: 0x00c0, + 0x1f98: 0x00c0, 0x1f99: 0x00c0, 0x1f9a: 0x00c0, 0x1f9b: 0x00c0, 0x1f9c: 0x00c0, 0x1f9d: 0x00c0, + 0x1f9e: 0x00c0, 0x1f9f: 0x00c0, 0x1fa0: 0x00c0, 0x1fa1: 0x00c0, 0x1fa2: 0x00c0, 0x1fa3: 0x00c0, + 0x1fa4: 0x00c0, 0x1fa5: 0x00c0, 0x1fa6: 0x00c0, 0x1fa7: 0x00c0, + 0x1faf: 0x0080, + 0x1fb0: 0x0080, + 0x1fbf: 0x00c6, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x00c0, 0x1fc1: 0x00c0, 0x1fc2: 0x00c0, 0x1fc3: 0x00c0, 0x1fc4: 0x00c0, 0x1fc5: 0x00c0, + 0x1fc6: 0x00c0, 0x1fc7: 0x00c0, 0x1fc8: 0x00c0, 0x1fc9: 0x00c0, 0x1fca: 0x00c0, 0x1fcb: 0x00c0, + 0x1fcc: 0x00c0, 0x1fcd: 0x00c0, 0x1fce: 0x00c0, 0x1fcf: 0x00c0, 0x1fd0: 0x00c0, 0x1fd1: 0x00c0, + 0x1fd2: 0x00c0, 0x1fd3: 0x00c0, 0x1fd4: 0x00c0, 0x1fd5: 0x00c0, 0x1fd6: 0x00c0, + 0x1fe0: 0x00c0, 0x1fe1: 0x00c0, 0x1fe2: 0x00c0, 0x1fe3: 0x00c0, + 0x1fe4: 0x00c0, 0x1fe5: 0x00c0, 0x1fe6: 0x00c0, 0x1fe8: 0x00c0, 0x1fe9: 0x00c0, + 0x1fea: 0x00c0, 0x1feb: 0x00c0, 0x1fec: 0x00c0, 0x1fed: 0x00c0, 0x1fee: 0x00c0, + 0x1ff0: 0x00c0, 0x1ff1: 0x00c0, 0x1ff2: 0x00c0, 0x1ff3: 0x00c0, 0x1ff4: 0x00c0, 0x1ff5: 0x00c0, + 0x1ff6: 0x00c0, 0x1ff8: 0x00c0, 0x1ff9: 0x00c0, 0x1ffa: 0x00c0, 0x1ffb: 0x00c0, + 0x1ffc: 0x00c0, 0x1ffd: 0x00c0, 0x1ffe: 0x00c0, + // Block 0x80, offset 0x2000 + 0x2000: 0x00c0, 0x2001: 0x00c0, 0x2002: 0x00c0, 0x2003: 0x00c0, 0x2004: 0x00c0, 0x2005: 0x00c0, + 0x2006: 0x00c0, 0x2008: 0x00c0, 0x2009: 0x00c0, 0x200a: 0x00c0, 0x200b: 0x00c0, + 0x200c: 0x00c0, 0x200d: 0x00c0, 0x200e: 0x00c0, 0x2010: 0x00c0, 0x2011: 0x00c0, + 0x2012: 0x00c0, 0x2013: 0x00c0, 0x2014: 0x00c0, 0x2015: 0x00c0, 0x2016: 0x00c0, + 0x2018: 0x00c0, 0x2019: 0x00c0, 0x201a: 0x00c0, 0x201b: 0x00c0, 0x201c: 0x00c0, 0x201d: 0x00c0, + 0x201e: 0x00c0, 0x2020: 0x00c3, 0x2021: 0x00c3, 0x2022: 0x00c3, 0x2023: 0x00c3, + 0x2024: 0x00c3, 0x2025: 0x00c3, 0x2026: 0x00c3, 0x2027: 0x00c3, 0x2028: 0x00c3, 0x2029: 0x00c3, + 0x202a: 0x00c3, 0x202b: 0x00c3, 0x202c: 0x00c3, 0x202d: 0x00c3, 0x202e: 0x00c3, 0x202f: 0x00c3, + 0x2030: 0x00c3, 0x2031: 0x00c3, 0x2032: 0x00c3, 0x2033: 0x00c3, 0x2034: 0x00c3, 0x2035: 0x00c3, + 0x2036: 0x00c3, 0x2037: 0x00c3, 0x2038: 0x00c3, 0x2039: 0x00c3, 0x203a: 0x00c3, 0x203b: 0x00c3, + 0x203c: 0x00c3, 0x203d: 0x00c3, 0x203e: 0x00c3, 0x203f: 0x00c3, + // Block 0x81, offset 0x2040 + 0x2040: 0x0080, 0x2041: 0x0080, 0x2042: 0x0080, 0x2043: 0x0080, 0x2044: 0x0080, 0x2045: 0x0080, + 0x2046: 0x0080, 0x2047: 0x0080, 0x2048: 0x0080, 0x2049: 0x0080, 0x204a: 0x0080, 0x204b: 0x0080, + 0x204c: 0x0080, 0x204d: 0x0080, 0x204e: 0x0080, 0x204f: 0x0080, 0x2050: 0x0080, 0x2051: 0x0080, + 0x2052: 0x0080, 0x2053: 0x0080, 0x2054: 0x0080, 0x2055: 0x0080, 0x2056: 0x0080, 0x2057: 0x0080, + 0x2058: 0x0080, 0x2059: 0x0080, 0x205a: 0x0080, 0x205b: 0x0080, 0x205c: 0x0080, 0x205d: 0x0080, + 0x205e: 0x0080, 0x205f: 0x0080, 0x2060: 0x0080, 0x2061: 0x0080, 0x2062: 0x0080, 0x2063: 0x0080, + 0x2064: 0x0080, 0x2065: 0x0080, 0x2066: 0x0080, 0x2067: 0x0080, 0x2068: 0x0080, 0x2069: 0x0080, + 0x206a: 0x0080, 0x206b: 0x0080, 0x206c: 0x0080, 0x206d: 0x0080, 0x206e: 0x0080, 0x206f: 0x00c0, + 0x2070: 0x0080, 0x2071: 0x0080, 0x2072: 0x0080, 0x2073: 0x0080, 0x2074: 0x0080, 0x2075: 0x0080, + 0x2076: 0x0080, 0x2077: 0x0080, 0x2078: 0x0080, 0x2079: 0x0080, 0x207a: 0x0080, 0x207b: 0x0080, + 0x207c: 0x0080, 0x207d: 0x0080, 0x207e: 0x0080, 0x207f: 0x0080, + // Block 0x82, offset 0x2080 + 0x2080: 0x0080, 0x2081: 0x0080, 0x2082: 0x0080, 0x2083: 0x0080, 0x2084: 0x0080, + // Block 0x83, offset 0x20c0 + 0x20c0: 0x008c, 0x20c1: 0x008c, 0x20c2: 0x008c, 0x20c3: 0x008c, 0x20c4: 0x008c, 0x20c5: 0x008c, + 0x20c6: 0x008c, 0x20c7: 0x008c, 0x20c8: 0x008c, 0x20c9: 0x008c, 0x20ca: 0x008c, 0x20cb: 0x008c, + 0x20cc: 0x008c, 0x20cd: 0x008c, 0x20ce: 0x008c, 0x20cf: 0x008c, 0x20d0: 0x008c, 0x20d1: 0x008c, + 0x20d2: 0x008c, 0x20d3: 0x008c, 0x20d4: 0x008c, 0x20d5: 0x008c, 0x20d6: 0x008c, 0x20d7: 0x008c, + 0x20d8: 0x008c, 0x20d9: 0x008c, 0x20db: 0x008c, 0x20dc: 0x008c, 0x20dd: 0x008c, + 0x20de: 0x008c, 0x20df: 0x008c, 0x20e0: 0x008c, 0x20e1: 0x008c, 0x20e2: 0x008c, 0x20e3: 0x008c, + 0x20e4: 0x008c, 0x20e5: 0x008c, 0x20e6: 0x008c, 0x20e7: 0x008c, 0x20e8: 0x008c, 0x20e9: 0x008c, + 0x20ea: 0x008c, 0x20eb: 0x008c, 0x20ec: 0x008c, 0x20ed: 0x008c, 0x20ee: 0x008c, 0x20ef: 0x008c, + 0x20f0: 0x008c, 0x20f1: 0x008c, 0x20f2: 0x008c, 0x20f3: 0x008c, 0x20f4: 0x008c, 0x20f5: 0x008c, + 0x20f6: 0x008c, 0x20f7: 0x008c, 0x20f8: 0x008c, 0x20f9: 0x008c, 0x20fa: 0x008c, 0x20fb: 0x008c, + 0x20fc: 0x008c, 0x20fd: 0x008c, 0x20fe: 0x008c, 0x20ff: 0x008c, + // Block 0x84, offset 0x2100 + 0x2100: 0x008c, 0x2101: 0x008c, 0x2102: 0x008c, 0x2103: 0x008c, 0x2104: 0x008c, 0x2105: 0x008c, + 0x2106: 0x008c, 0x2107: 0x008c, 0x2108: 0x008c, 0x2109: 0x008c, 0x210a: 0x008c, 0x210b: 0x008c, + 0x210c: 0x008c, 0x210d: 0x008c, 0x210e: 0x008c, 0x210f: 0x008c, 0x2110: 0x008c, 0x2111: 0x008c, + 0x2112: 0x008c, 0x2113: 0x008c, 0x2114: 0x008c, 0x2115: 0x008c, 0x2116: 0x008c, 0x2117: 0x008c, + 0x2118: 0x008c, 0x2119: 0x008c, 0x211a: 0x008c, 0x211b: 0x008c, 0x211c: 0x008c, 0x211d: 0x008c, + 0x211e: 0x008c, 0x211f: 0x008c, 0x2120: 0x008c, 0x2121: 0x008c, 0x2122: 0x008c, 0x2123: 0x008c, + 0x2124: 0x008c, 0x2125: 0x008c, 0x2126: 0x008c, 0x2127: 0x008c, 0x2128: 0x008c, 0x2129: 0x008c, + 0x212a: 0x008c, 0x212b: 0x008c, 0x212c: 0x008c, 0x212d: 0x008c, 0x212e: 0x008c, 0x212f: 0x008c, + 0x2130: 0x008c, 0x2131: 0x008c, 0x2132: 0x008c, 0x2133: 0x008c, + // Block 0x85, offset 0x2140 + 0x2140: 0x008c, 0x2141: 0x008c, 0x2142: 0x008c, 0x2143: 0x008c, 0x2144: 0x008c, 0x2145: 0x008c, + 0x2146: 0x008c, 0x2147: 0x008c, 0x2148: 0x008c, 0x2149: 0x008c, 0x214a: 0x008c, 0x214b: 0x008c, + 0x214c: 0x008c, 0x214d: 0x008c, 0x214e: 0x008c, 0x214f: 0x008c, 0x2150: 0x008c, 0x2151: 0x008c, + 0x2152: 0x008c, 0x2153: 0x008c, 0x2154: 0x008c, 0x2155: 0x008c, 0x2156: 0x008c, 0x2157: 0x008c, + 0x2158: 0x008c, 0x2159: 0x008c, 0x215a: 0x008c, 0x215b: 0x008c, 0x215c: 0x008c, 0x215d: 0x008c, + 0x215e: 0x008c, 0x215f: 0x008c, 0x2160: 0x008c, 0x2161: 0x008c, 0x2162: 0x008c, 0x2163: 0x008c, + 0x2164: 0x008c, 0x2165: 0x008c, 0x2166: 0x008c, 0x2167: 0x008c, 0x2168: 0x008c, 0x2169: 0x008c, + 0x216a: 0x008c, 0x216b: 0x008c, 0x216c: 0x008c, 0x216d: 0x008c, 0x216e: 0x008c, 0x216f: 0x008c, + 0x2170: 0x008c, 0x2171: 0x008c, 0x2172: 0x008c, 0x2173: 0x008c, 0x2174: 0x008c, 0x2175: 0x008c, + 0x2176: 0x008c, 0x2177: 0x008c, 0x2178: 0x008c, 0x2179: 0x008c, 0x217a: 0x008c, 0x217b: 0x008c, + 0x217c: 0x008c, 0x217d: 0x008c, 0x217e: 0x008c, 0x217f: 0x008c, + // Block 0x86, offset 0x2180 + 0x2180: 0x008c, 0x2181: 0x008c, 0x2182: 0x008c, 0x2183: 0x008c, 0x2184: 0x008c, 0x2185: 0x008c, + 0x2186: 0x008c, 0x2187: 0x008c, 0x2188: 0x008c, 0x2189: 0x008c, 0x218a: 0x008c, 0x218b: 0x008c, + 0x218c: 0x008c, 0x218d: 0x008c, 0x218e: 0x008c, 0x218f: 0x008c, 0x2190: 0x008c, 0x2191: 0x008c, + 0x2192: 0x008c, 0x2193: 0x008c, 0x2194: 0x008c, 0x2195: 0x008c, + 0x21b0: 0x0080, 0x21b1: 0x0080, 0x21b2: 0x0080, 0x21b3: 0x0080, 0x21b4: 0x0080, 0x21b5: 0x0080, + 0x21b6: 0x0080, 0x21b7: 0x0080, 0x21b8: 0x0080, 0x21b9: 0x0080, 0x21ba: 0x0080, 0x21bb: 0x0080, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x0080, 0x21c1: 0x0080, 0x21c2: 0x0080, 0x21c3: 0x0080, 0x21c4: 0x0080, 0x21c5: 0x00cc, + 0x21c6: 0x00c0, 0x21c7: 0x00cc, 0x21c8: 0x0080, 0x21c9: 0x0080, 0x21ca: 0x0080, 0x21cb: 0x0080, + 0x21cc: 0x0080, 0x21cd: 0x0080, 0x21ce: 0x0080, 0x21cf: 0x0080, 0x21d0: 0x0080, 0x21d1: 0x0080, + 0x21d2: 0x0080, 0x21d3: 0x0080, 0x21d4: 0x0080, 0x21d5: 0x0080, 0x21d6: 0x0080, 0x21d7: 0x0080, + 0x21d8: 0x0080, 0x21d9: 0x0080, 0x21da: 0x0080, 0x21db: 0x0080, 0x21dc: 0x0080, 0x21dd: 0x0080, + 0x21de: 0x0080, 0x21df: 0x0080, 0x21e0: 0x0080, 0x21e1: 0x008c, 0x21e2: 0x008c, 0x21e3: 0x008c, + 0x21e4: 0x008c, 0x21e5: 0x008c, 0x21e6: 0x008c, 0x21e7: 0x008c, 0x21e8: 0x008c, 0x21e9: 0x008c, + 0x21ea: 0x00c3, 0x21eb: 0x00c3, 0x21ec: 0x00c3, 0x21ed: 0x00c3, 0x21ee: 0x0040, 0x21ef: 0x0040, + 0x21f0: 0x0080, 0x21f1: 0x0040, 0x21f2: 0x0040, 0x21f3: 0x0040, 0x21f4: 0x0040, 0x21f5: 0x0040, + 0x21f6: 0x0080, 0x21f7: 0x0080, 0x21f8: 0x008c, 0x21f9: 0x008c, 0x21fa: 0x008c, 0x21fb: 0x0040, + 0x21fc: 0x00c0, 0x21fd: 0x0080, 0x21fe: 0x0080, 0x21ff: 0x0080, + // Block 0x88, offset 0x2200 + 0x2201: 0x00cc, 0x2202: 0x00cc, 0x2203: 0x00cc, 0x2204: 0x00cc, 0x2205: 0x00cc, + 0x2206: 0x00cc, 0x2207: 0x00cc, 0x2208: 0x00cc, 0x2209: 0x00cc, 0x220a: 0x00cc, 0x220b: 0x00cc, + 0x220c: 0x00cc, 0x220d: 0x00cc, 0x220e: 0x00cc, 0x220f: 0x00cc, 0x2210: 0x00cc, 0x2211: 0x00cc, + 0x2212: 0x00cc, 0x2213: 0x00cc, 0x2214: 0x00cc, 0x2215: 0x00cc, 0x2216: 0x00cc, 0x2217: 0x00cc, + 0x2218: 0x00cc, 0x2219: 0x00cc, 0x221a: 0x00cc, 0x221b: 0x00cc, 0x221c: 0x00cc, 0x221d: 0x00cc, + 0x221e: 0x00cc, 0x221f: 0x00cc, 0x2220: 0x00cc, 0x2221: 0x00cc, 0x2222: 0x00cc, 0x2223: 0x00cc, + 0x2224: 0x00cc, 0x2225: 0x00cc, 0x2226: 0x00cc, 0x2227: 0x00cc, 0x2228: 0x00cc, 0x2229: 0x00cc, + 0x222a: 0x00cc, 0x222b: 0x00cc, 0x222c: 0x00cc, 0x222d: 0x00cc, 0x222e: 0x00cc, 0x222f: 0x00cc, + 0x2230: 0x00cc, 0x2231: 0x00cc, 0x2232: 0x00cc, 0x2233: 0x00cc, 0x2234: 0x00cc, 0x2235: 0x00cc, + 0x2236: 0x00cc, 0x2237: 0x00cc, 0x2238: 0x00cc, 0x2239: 0x00cc, 0x223a: 0x00cc, 0x223b: 0x00cc, + 0x223c: 0x00cc, 0x223d: 0x00cc, 0x223e: 0x00cc, 0x223f: 0x00cc, + // Block 0x89, offset 0x2240 + 0x2240: 0x00cc, 0x2241: 0x00cc, 0x2242: 0x00cc, 0x2243: 0x00cc, 0x2244: 0x00cc, 0x2245: 0x00cc, + 0x2246: 0x00cc, 0x2247: 0x00cc, 0x2248: 0x00cc, 0x2249: 0x00cc, 0x224a: 0x00cc, 0x224b: 0x00cc, + 0x224c: 0x00cc, 0x224d: 0x00cc, 0x224e: 0x00cc, 0x224f: 0x00cc, 0x2250: 0x00cc, 0x2251: 0x00cc, + 0x2252: 0x00cc, 0x2253: 0x00cc, 0x2254: 0x00cc, 0x2255: 0x00cc, 0x2256: 0x00cc, + 0x2259: 0x00c3, 0x225a: 0x00c3, 0x225b: 0x0080, 0x225c: 0x0080, 0x225d: 0x00cc, + 0x225e: 0x00cc, 0x225f: 0x008c, 0x2260: 0x0080, 0x2261: 0x00cc, 0x2262: 0x00cc, 0x2263: 0x00cc, + 0x2264: 0x00cc, 0x2265: 0x00cc, 0x2266: 0x00cc, 0x2267: 0x00cc, 0x2268: 0x00cc, 0x2269: 0x00cc, + 0x226a: 0x00cc, 0x226b: 0x00cc, 0x226c: 0x00cc, 0x226d: 0x00cc, 0x226e: 0x00cc, 0x226f: 0x00cc, + 0x2270: 0x00cc, 0x2271: 0x00cc, 0x2272: 0x00cc, 0x2273: 0x00cc, 0x2274: 0x00cc, 0x2275: 0x00cc, + 0x2276: 0x00cc, 0x2277: 0x00cc, 0x2278: 0x00cc, 0x2279: 0x00cc, 0x227a: 0x00cc, 0x227b: 0x00cc, + 0x227c: 0x00cc, 0x227d: 0x00cc, 0x227e: 0x00cc, 0x227f: 0x00cc, + // Block 0x8a, offset 0x2280 + 0x2280: 0x00cc, 0x2281: 0x00cc, 0x2282: 0x00cc, 0x2283: 0x00cc, 0x2284: 0x00cc, 0x2285: 0x00cc, + 0x2286: 0x00cc, 0x2287: 0x00cc, 0x2288: 0x00cc, 0x2289: 0x00cc, 0x228a: 0x00cc, 0x228b: 0x00cc, + 0x228c: 0x00cc, 0x228d: 0x00cc, 0x228e: 0x00cc, 0x228f: 0x00cc, 0x2290: 0x00cc, 0x2291: 0x00cc, + 0x2292: 0x00cc, 0x2293: 0x00cc, 0x2294: 0x00cc, 0x2295: 0x00cc, 0x2296: 0x00cc, 0x2297: 0x00cc, + 0x2298: 0x00cc, 0x2299: 0x00cc, 0x229a: 0x00cc, 0x229b: 0x00cc, 0x229c: 0x00cc, 0x229d: 0x00cc, + 0x229e: 0x00cc, 0x229f: 0x00cc, 0x22a0: 0x00cc, 0x22a1: 0x00cc, 0x22a2: 0x00cc, 0x22a3: 0x00cc, + 0x22a4: 0x00cc, 0x22a5: 0x00cc, 0x22a6: 0x00cc, 0x22a7: 0x00cc, 0x22a8: 0x00cc, 0x22a9: 0x00cc, + 0x22aa: 0x00cc, 0x22ab: 0x00cc, 0x22ac: 0x00cc, 0x22ad: 0x00cc, 0x22ae: 0x00cc, 0x22af: 0x00cc, + 0x22b0: 0x00cc, 0x22b1: 0x00cc, 0x22b2: 0x00cc, 0x22b3: 0x00cc, 0x22b4: 0x00cc, 0x22b5: 0x00cc, + 0x22b6: 0x00cc, 0x22b7: 0x00cc, 0x22b8: 0x00cc, 0x22b9: 0x00cc, 0x22ba: 0x00cc, 0x22bb: 0x00d2, + 0x22bc: 0x00c0, 0x22bd: 0x00cc, 0x22be: 0x00cc, 0x22bf: 0x008c, + // Block 0x8b, offset 0x22c0 + 0x22c5: 0x00c0, + 0x22c6: 0x00c0, 0x22c7: 0x00c0, 0x22c8: 0x00c0, 0x22c9: 0x00c0, 0x22ca: 0x00c0, 0x22cb: 0x00c0, + 0x22cc: 0x00c0, 0x22cd: 0x00c0, 0x22ce: 0x00c0, 0x22cf: 0x00c0, 0x22d0: 0x00c0, 0x22d1: 0x00c0, + 0x22d2: 0x00c0, 0x22d3: 0x00c0, 0x22d4: 0x00c0, 0x22d5: 0x00c0, 0x22d6: 0x00c0, 0x22d7: 0x00c0, + 0x22d8: 0x00c0, 0x22d9: 0x00c0, 0x22da: 0x00c0, 0x22db: 0x00c0, 0x22dc: 0x00c0, 0x22dd: 0x00c0, + 0x22de: 0x00c0, 0x22df: 0x00c0, 0x22e0: 0x00c0, 0x22e1: 0x00c0, 0x22e2: 0x00c0, 0x22e3: 0x00c0, + 0x22e4: 0x00c0, 0x22e5: 0x00c0, 0x22e6: 0x00c0, 0x22e7: 0x00c0, 0x22e8: 0x00c0, 0x22e9: 0x00c0, + 0x22ea: 0x00c0, 0x22eb: 0x00c0, 0x22ec: 0x00c0, 0x22ed: 0x00c0, + 0x22f1: 0x0080, 0x22f2: 0x0080, 0x22f3: 0x0080, 0x22f4: 0x0080, 0x22f5: 0x0080, + 0x22f6: 0x0080, 0x22f7: 0x0080, 0x22f8: 0x0080, 0x22f9: 0x0080, 0x22fa: 0x0080, 0x22fb: 0x0080, + 0x22fc: 0x0080, 0x22fd: 0x0080, 0x22fe: 0x0080, 0x22ff: 0x0080, + // Block 0x8c, offset 0x2300 + 0x2300: 0x0080, 0x2301: 0x0080, 0x2302: 0x0080, 0x2303: 0x0080, 0x2304: 0x0080, 0x2305: 0x0080, + 0x2306: 0x0080, 0x2307: 0x0080, 0x2308: 0x0080, 0x2309: 0x0080, 0x230a: 0x0080, 0x230b: 0x0080, + 0x230c: 0x0080, 0x230d: 0x0080, 0x230e: 0x0080, 0x230f: 0x0080, 0x2310: 0x0080, 0x2311: 0x0080, + 0x2312: 0x0080, 0x2313: 0x0080, 0x2314: 0x0080, 0x2315: 0x0080, 0x2316: 0x0080, 0x2317: 0x0080, + 0x2318: 0x0080, 0x2319: 0x0080, 0x231a: 0x0080, 0x231b: 0x0080, 0x231c: 0x0080, 0x231d: 0x0080, + 0x231e: 0x0080, 0x231f: 0x0080, 0x2320: 0x0080, 0x2321: 0x0080, 0x2322: 0x0080, 0x2323: 0x0080, + 0x2324: 0x0040, 0x2325: 0x0080, 0x2326: 0x0080, 0x2327: 0x0080, 0x2328: 0x0080, 0x2329: 0x0080, + 0x232a: 0x0080, 0x232b: 0x0080, 0x232c: 0x0080, 0x232d: 0x0080, 0x232e: 0x0080, 0x232f: 0x0080, + 0x2330: 0x0080, 0x2331: 0x0080, 0x2332: 0x0080, 0x2333: 0x0080, 0x2334: 0x0080, 0x2335: 0x0080, + 0x2336: 0x0080, 0x2337: 0x0080, 0x2338: 0x0080, 0x2339: 0x0080, 0x233a: 0x0080, 0x233b: 0x0080, + 0x233c: 0x0080, 0x233d: 0x0080, 0x233e: 0x0080, 0x233f: 0x0080, + // Block 0x8d, offset 0x2340 + 0x2340: 0x0080, 0x2341: 0x0080, 0x2342: 0x0080, 0x2343: 0x0080, 0x2344: 0x0080, 0x2345: 0x0080, + 0x2346: 0x0080, 0x2347: 0x0080, 0x2348: 0x0080, 0x2349: 0x0080, 0x234a: 0x0080, 0x234b: 0x0080, + 0x234c: 0x0080, 0x234d: 0x0080, 0x234e: 0x0080, 0x2350: 0x0080, 0x2351: 0x0080, + 0x2352: 0x0080, 0x2353: 0x0080, 0x2354: 0x0080, 0x2355: 0x0080, 0x2356: 0x0080, 0x2357: 0x0080, + 0x2358: 0x0080, 0x2359: 0x0080, 0x235a: 0x0080, 0x235b: 0x0080, 0x235c: 0x0080, 0x235d: 0x0080, + 0x235e: 0x0080, 0x235f: 0x0080, 0x2360: 0x00c0, 0x2361: 0x00c0, 0x2362: 0x00c0, 0x2363: 0x00c0, + 0x2364: 0x00c0, 0x2365: 0x00c0, 0x2366: 0x00c0, 0x2367: 0x00c0, 0x2368: 0x00c0, 0x2369: 0x00c0, + 0x236a: 0x00c0, 0x236b: 0x00c0, 0x236c: 0x00c0, 0x236d: 0x00c0, 0x236e: 0x00c0, 0x236f: 0x00c0, + 0x2370: 0x00c0, 0x2371: 0x00c0, 0x2372: 0x00c0, 0x2373: 0x00c0, 0x2374: 0x00c0, 0x2375: 0x00c0, + 0x2376: 0x00c0, 0x2377: 0x00c0, 0x2378: 0x00c0, 0x2379: 0x00c0, 0x237a: 0x00c0, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0080, 0x2381: 0x0080, 0x2382: 0x0080, 0x2383: 0x0080, 0x2384: 0x0080, 0x2385: 0x0080, + 0x2386: 0x0080, 0x2387: 0x0080, 0x2388: 0x0080, 0x2389: 0x0080, 0x238a: 0x0080, 0x238b: 0x0080, + 0x238c: 0x0080, 0x238d: 0x0080, 0x238e: 0x0080, 0x238f: 0x0080, 0x2390: 0x0080, 0x2391: 0x0080, + 0x2392: 0x0080, 0x2393: 0x0080, 0x2394: 0x0080, 0x2395: 0x0080, 0x2396: 0x0080, 0x2397: 0x0080, + 0x2398: 0x0080, 0x2399: 0x0080, 0x239a: 0x0080, 0x239b: 0x0080, 0x239c: 0x0080, 0x239d: 0x0080, + 0x239e: 0x0080, 0x239f: 0x0080, 0x23a0: 0x0080, 0x23a1: 0x0080, 0x23a2: 0x0080, 0x23a3: 0x0080, + 0x23b0: 0x00cc, 0x23b1: 0x00cc, 0x23b2: 0x00cc, 0x23b3: 0x00cc, 0x23b4: 0x00cc, 0x23b5: 0x00cc, + 0x23b6: 0x00cc, 0x23b7: 0x00cc, 0x23b8: 0x00cc, 0x23b9: 0x00cc, 0x23ba: 0x00cc, 0x23bb: 0x00cc, + 0x23bc: 0x00cc, 0x23bd: 0x00cc, 0x23be: 0x00cc, 0x23bf: 0x00cc, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0080, 0x23c1: 0x0080, 0x23c2: 0x0080, 0x23c3: 0x0080, 0x23c4: 0x0080, 0x23c5: 0x0080, + 0x23c6: 0x0080, 0x23c7: 0x0080, 0x23c8: 0x0080, 0x23c9: 0x0080, 0x23ca: 0x0080, 0x23cb: 0x0080, + 0x23cc: 0x0080, 0x23cd: 0x0080, 0x23ce: 0x0080, 0x23cf: 0x0080, 0x23d0: 0x0080, 0x23d1: 0x0080, + 0x23d2: 0x0080, 0x23d3: 0x0080, 0x23d4: 0x0080, 0x23d5: 0x0080, 0x23d6: 0x0080, 0x23d7: 0x0080, + 0x23d8: 0x0080, 0x23d9: 0x0080, 0x23da: 0x0080, 0x23db: 0x0080, 0x23dc: 0x0080, 0x23dd: 0x0080, + 0x23de: 0x0080, 0x23e0: 0x0080, 0x23e1: 0x0080, 0x23e2: 0x0080, 0x23e3: 0x0080, + 0x23e4: 0x0080, 0x23e5: 0x0080, 0x23e6: 0x0080, 0x23e7: 0x0080, 0x23e8: 0x0080, 0x23e9: 0x0080, + 0x23ea: 0x0080, 0x23eb: 0x0080, 0x23ec: 0x0080, 0x23ed: 0x0080, 0x23ee: 0x0080, 0x23ef: 0x0080, + 0x23f0: 0x0080, 0x23f1: 0x0080, 0x23f2: 0x0080, 0x23f3: 0x0080, 0x23f4: 0x0080, 0x23f5: 0x0080, + 0x23f6: 0x0080, 0x23f7: 0x0080, 0x23f8: 0x0080, 0x23f9: 0x0080, 0x23fa: 0x0080, 0x23fb: 0x0080, + 0x23fc: 0x0080, 0x23fd: 0x0080, 0x23fe: 0x0080, 0x23ff: 0x0080, + // Block 0x90, offset 0x2400 + 0x2400: 0x0080, 0x2401: 0x0080, 0x2402: 0x0080, 0x2403: 0x0080, 0x2404: 0x0080, 0x2405: 0x0080, + 0x2406: 0x0080, 0x2407: 0x0080, 0x2408: 0x0080, 0x2409: 0x0080, 0x240a: 0x0080, 0x240b: 0x0080, + 0x240c: 0x0080, 0x240d: 0x0080, 0x240e: 0x0080, 0x240f: 0x0080, 0x2410: 0x008c, 0x2411: 0x008c, + 0x2412: 0x008c, 0x2413: 0x008c, 0x2414: 0x008c, 0x2415: 0x008c, 0x2416: 0x008c, 0x2417: 0x008c, + 0x2418: 0x008c, 0x2419: 0x008c, 0x241a: 0x008c, 0x241b: 0x008c, 0x241c: 0x008c, 0x241d: 0x008c, + 0x241e: 0x008c, 0x241f: 0x008c, 0x2420: 0x008c, 0x2421: 0x008c, 0x2422: 0x008c, 0x2423: 0x008c, + 0x2424: 0x008c, 0x2425: 0x008c, 0x2426: 0x008c, 0x2427: 0x008c, 0x2428: 0x008c, 0x2429: 0x008c, + 0x242a: 0x008c, 0x242b: 0x008c, 0x242c: 0x008c, 0x242d: 0x008c, 0x242e: 0x008c, 0x242f: 0x008c, + 0x2430: 0x008c, 0x2431: 0x008c, 0x2432: 0x008c, 0x2433: 0x008c, 0x2434: 0x008c, 0x2435: 0x008c, + 0x2436: 0x008c, 0x2437: 0x008c, 0x2438: 0x008c, 0x2439: 0x008c, 0x243a: 0x008c, 0x243b: 0x008c, + 0x243c: 0x008c, 0x243d: 0x008c, 0x243e: 0x008c, + // Block 0x91, offset 0x2440 + 0x2440: 0x008c, 0x2441: 0x008c, 0x2442: 0x008c, 0x2443: 0x008c, 0x2444: 0x008c, 0x2445: 0x008c, + 0x2446: 0x008c, 0x2447: 0x008c, 0x2448: 0x008c, 0x2449: 0x008c, 0x244a: 0x008c, 0x244b: 0x008c, + 0x244c: 0x008c, 0x244d: 0x008c, 0x244e: 0x008c, 0x244f: 0x008c, 0x2450: 0x008c, 0x2451: 0x008c, + 0x2452: 0x008c, 0x2453: 0x008c, 0x2454: 0x008c, 0x2455: 0x008c, 0x2456: 0x008c, 0x2457: 0x008c, + 0x2458: 0x0080, 0x2459: 0x0080, 0x245a: 0x0080, 0x245b: 0x0080, 0x245c: 0x0080, 0x245d: 0x0080, + 0x245e: 0x0080, 0x245f: 0x0080, 0x2460: 0x0080, 0x2461: 0x0080, 0x2462: 0x0080, 0x2463: 0x0080, + 0x2464: 0x0080, 0x2465: 0x0080, 0x2466: 0x0080, 0x2467: 0x0080, 0x2468: 0x0080, 0x2469: 0x0080, + 0x246a: 0x0080, 0x246b: 0x0080, 0x246c: 0x0080, 0x246d: 0x0080, 0x246e: 0x0080, 0x246f: 0x0080, + 0x2470: 0x0080, 0x2471: 0x0080, 0x2472: 0x0080, 0x2473: 0x0080, 0x2474: 0x0080, 0x2475: 0x0080, + 0x2476: 0x0080, 0x2477: 0x0080, 0x2478: 0x0080, 0x2479: 0x0080, 0x247a: 0x0080, 0x247b: 0x0080, + 0x247c: 0x0080, 0x247d: 0x0080, 0x247e: 0x0080, 0x247f: 0x0080, + // Block 0x92, offset 0x2480 + 0x2480: 0x00cc, 0x2481: 0x00cc, 0x2482: 0x00cc, 0x2483: 0x00cc, 0x2484: 0x00cc, 0x2485: 0x00cc, + 0x2486: 0x00cc, 0x2487: 0x00cc, 0x2488: 0x00cc, 0x2489: 0x00cc, 0x248a: 0x00cc, 0x248b: 0x00cc, + 0x248c: 0x00cc, 0x248d: 0x00cc, 0x248e: 0x00cc, 0x248f: 0x00cc, 0x2490: 0x00cc, 0x2491: 0x00cc, + 0x2492: 0x00cc, 0x2493: 0x00cc, 0x2494: 0x00cc, 0x2495: 0x00cc, 0x2496: 0x00cc, 0x2497: 0x00cc, + 0x2498: 0x00cc, 0x2499: 0x00cc, 0x249a: 0x00cc, 0x249b: 0x00cc, 0x249c: 0x00cc, 0x249d: 0x00cc, + 0x249e: 0x00cc, 0x249f: 0x00cc, 0x24a0: 0x00cc, 0x24a1: 0x00cc, 0x24a2: 0x00cc, 0x24a3: 0x00cc, + 0x24a4: 0x00cc, 0x24a5: 0x00cc, 0x24a6: 0x00cc, 0x24a7: 0x00cc, 0x24a8: 0x00cc, 0x24a9: 0x00cc, + 0x24aa: 0x00cc, 0x24ab: 0x00cc, 0x24ac: 0x00cc, 0x24ad: 0x00cc, 0x24ae: 0x00cc, 0x24af: 0x00cc, + 0x24b0: 0x00cc, 0x24b1: 0x00cc, 0x24b2: 0x00cc, 0x24b3: 0x00cc, 0x24b4: 0x00cc, 0x24b5: 0x00cc, + 0x24b6: 0x00cc, 0x24b7: 0x00cc, 0x24b8: 0x00cc, 0x24b9: 0x00cc, 0x24ba: 0x00cc, 0x24bb: 0x00cc, + 0x24bc: 0x00cc, 0x24bd: 0x00cc, 0x24be: 0x00cc, 0x24bf: 0x00cc, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x00cc, 0x24c1: 0x00cc, 0x24c2: 0x00cc, 0x24c3: 0x00cc, 0x24c4: 0x00cc, 0x24c5: 0x00cc, + 0x24c6: 0x00cc, 0x24c7: 0x00cc, 0x24c8: 0x00cc, 0x24c9: 0x00cc, 0x24ca: 0x00cc, 0x24cb: 0x00cc, + 0x24cc: 0x00cc, 0x24cd: 0x00cc, 0x24ce: 0x00cc, 0x24cf: 0x00cc, 0x24d0: 0x00cc, 0x24d1: 0x00cc, + 0x24d2: 0x00cc, 0x24d3: 0x00cc, 0x24d4: 0x00cc, 0x24d5: 0x00cc, 0x24d6: 0x00cc, 0x24d7: 0x00cc, + 0x24d8: 0x00cc, 0x24d9: 0x00cc, 0x24da: 0x00cc, 0x24db: 0x00cc, 0x24dc: 0x00cc, 0x24dd: 0x00cc, + 0x24de: 0x00cc, 0x24df: 0x00cc, 0x24e0: 0x00cc, 0x24e1: 0x00cc, 0x24e2: 0x00cc, 0x24e3: 0x00cc, + 0x24e4: 0x00cc, 0x24e5: 0x00cc, 0x24e6: 0x00cc, 0x24e7: 0x00cc, 0x24e8: 0x00cc, 0x24e9: 0x00cc, + 0x24ea: 0x00cc, 0x24eb: 0x00cc, 0x24ec: 0x00cc, 0x24ed: 0x00cc, 0x24ee: 0x00cc, 0x24ef: 0x00cc, + 0x24f0: 0x00cc, 0x24f1: 0x00cc, 0x24f2: 0x00cc, 0x24f3: 0x00cc, 0x24f4: 0x00cc, 0x24f5: 0x00cc, + // Block 0x94, offset 0x2500 + 0x2500: 0x00cc, 0x2501: 0x00cc, 0x2502: 0x00cc, 0x2503: 0x00cc, 0x2504: 0x00cc, 0x2505: 0x00cc, + 0x2506: 0x00cc, 0x2507: 0x00cc, 0x2508: 0x00cc, 0x2509: 0x00cc, 0x250a: 0x00cc, 0x250b: 0x00cc, + 0x250c: 0x00cc, 0x250d: 0x00cc, 0x250e: 0x00cc, 0x250f: 0x00cc, 0x2510: 0x00cc, 0x2511: 0x00cc, + 0x2512: 0x00cc, 0x2513: 0x00cc, 0x2514: 0x00cc, 0x2515: 0x00cc, + // Block 0x95, offset 0x2540 + 0x2540: 0x00c0, 0x2541: 0x00c0, 0x2542: 0x00c0, 0x2543: 0x00c0, 0x2544: 0x00c0, 0x2545: 0x00c0, + 0x2546: 0x00c0, 0x2547: 0x00c0, 0x2548: 0x00c0, 0x2549: 0x00c0, 0x254a: 0x00c0, 0x254b: 0x00c0, + 0x254c: 0x00c0, 0x2550: 0x0080, 0x2551: 0x0080, + 0x2552: 0x0080, 0x2553: 0x0080, 0x2554: 0x0080, 0x2555: 0x0080, 0x2556: 0x0080, 0x2557: 0x0080, + 0x2558: 0x0080, 0x2559: 0x0080, 0x255a: 0x0080, 0x255b: 0x0080, 0x255c: 0x0080, 0x255d: 0x0080, + 0x255e: 0x0080, 0x255f: 0x0080, 0x2560: 0x0080, 0x2561: 0x0080, 0x2562: 0x0080, 0x2563: 0x0080, + 0x2564: 0x0080, 0x2565: 0x0080, 0x2566: 0x0080, 0x2567: 0x0080, 0x2568: 0x0080, 0x2569: 0x0080, + 0x256a: 0x0080, 0x256b: 0x0080, 0x256c: 0x0080, 0x256d: 0x0080, 0x256e: 0x0080, 0x256f: 0x0080, + 0x2570: 0x0080, 0x2571: 0x0080, 0x2572: 0x0080, 0x2573: 0x0080, 0x2574: 0x0080, 0x2575: 0x0080, + 0x2576: 0x0080, 0x2577: 0x0080, 0x2578: 0x0080, 0x2579: 0x0080, 0x257a: 0x0080, 0x257b: 0x0080, + 0x257c: 0x0080, 0x257d: 0x0080, 0x257e: 0x0080, 0x257f: 0x0080, + // Block 0x96, offset 0x2580 + 0x2580: 0x0080, 0x2581: 0x0080, 0x2582: 0x0080, 0x2583: 0x0080, 0x2584: 0x0080, 0x2585: 0x0080, + 0x2586: 0x0080, + 0x2590: 0x00c0, 0x2591: 0x00c0, + 0x2592: 0x00c0, 0x2593: 0x00c0, 0x2594: 0x00c0, 0x2595: 0x00c0, 0x2596: 0x00c0, 0x2597: 0x00c0, + 0x2598: 0x00c0, 0x2599: 0x00c0, 0x259a: 0x00c0, 0x259b: 0x00c0, 0x259c: 0x00c0, 0x259d: 0x00c0, + 0x259e: 0x00c0, 0x259f: 0x00c0, 0x25a0: 0x00c0, 0x25a1: 0x00c0, 0x25a2: 0x00c0, 0x25a3: 0x00c0, + 0x25a4: 0x00c0, 0x25a5: 0x00c0, 0x25a6: 0x00c0, 0x25a7: 0x00c0, 0x25a8: 0x00c0, 0x25a9: 0x00c0, + 0x25aa: 0x00c0, 0x25ab: 0x00c0, 0x25ac: 0x00c0, 0x25ad: 0x00c0, 0x25ae: 0x00c0, 0x25af: 0x00c0, + 0x25b0: 0x00c0, 0x25b1: 0x00c0, 0x25b2: 0x00c0, 0x25b3: 0x00c0, 0x25b4: 0x00c0, 0x25b5: 0x00c0, + 0x25b6: 0x00c0, 0x25b7: 0x00c0, 0x25b8: 0x00c0, 0x25b9: 0x00c0, 0x25ba: 0x00c0, 0x25bb: 0x00c0, + 0x25bc: 0x00c0, 0x25bd: 0x00c0, 0x25be: 0x0080, 0x25bf: 0x0080, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x00c0, 0x25c1: 0x00c0, 0x25c2: 0x00c0, 0x25c3: 0x00c0, 0x25c4: 0x00c0, 0x25c5: 0x00c0, + 0x25c6: 0x00c0, 0x25c7: 0x00c0, 0x25c8: 0x00c0, 0x25c9: 0x00c0, 0x25ca: 0x00c0, 0x25cb: 0x00c0, + 0x25cc: 0x00c0, 0x25cd: 0x0080, 0x25ce: 0x0080, 0x25cf: 0x0080, 0x25d0: 0x00c0, 0x25d1: 0x00c0, + 0x25d2: 0x00c0, 0x25d3: 0x00c0, 0x25d4: 0x00c0, 0x25d5: 0x00c0, 0x25d6: 0x00c0, 0x25d7: 0x00c0, + 0x25d8: 0x00c0, 0x25d9: 0x00c0, 0x25da: 0x00c0, 0x25db: 0x00c0, 0x25dc: 0x00c0, 0x25dd: 0x00c0, + 0x25de: 0x00c0, 0x25df: 0x00c0, 0x25e0: 0x00c0, 0x25e1: 0x00c0, 0x25e2: 0x00c0, 0x25e3: 0x00c0, + 0x25e4: 0x00c0, 0x25e5: 0x00c0, 0x25e6: 0x00c0, 0x25e7: 0x00c0, 0x25e8: 0x00c0, 0x25e9: 0x00c0, + 0x25ea: 0x00c0, 0x25eb: 0x00c0, + // Block 0x98, offset 0x2600 + 0x2600: 0x00c0, 0x2601: 0x00c0, 0x2602: 0x00c0, 0x2603: 0x00c0, 0x2604: 0x00c0, 0x2605: 0x00c0, + 0x2606: 0x00c0, 0x2607: 0x00c0, 0x2608: 0x00c0, 0x2609: 0x00c0, 0x260a: 0x00c0, 0x260b: 0x00c0, + 0x260c: 0x00c0, 0x260d: 0x00c0, 0x260e: 0x00c0, 0x260f: 0x00c0, 0x2610: 0x00c0, 0x2611: 0x00c0, + 0x2612: 0x00c0, 0x2613: 0x00c0, 0x2614: 0x00c0, 0x2615: 0x00c0, 0x2616: 0x00c0, 0x2617: 0x00c0, + 0x2618: 0x00c0, 0x2619: 0x00c0, 0x261a: 0x00c0, 0x261b: 0x00c0, 0x261c: 0x00c0, 0x261d: 0x00c0, + 0x261e: 0x00c0, 0x261f: 0x00c0, 0x2620: 0x00c0, 0x2621: 0x00c0, 0x2622: 0x00c0, 0x2623: 0x00c0, + 0x2624: 0x00c0, 0x2625: 0x00c0, 0x2626: 0x00c0, 0x2627: 0x00c0, 0x2628: 0x00c0, 0x2629: 0x00c0, + 0x262a: 0x00c0, 0x262b: 0x00c0, 0x262c: 0x00c0, 0x262d: 0x00c0, 0x262e: 0x00c0, 0x262f: 0x00c3, + 0x2630: 0x0083, 0x2631: 0x0083, 0x2632: 0x0083, 0x2633: 0x0080, 0x2634: 0x00c3, 0x2635: 0x00c3, + 0x2636: 0x00c3, 0x2637: 0x00c3, 0x2638: 0x00c3, 0x2639: 0x00c3, 0x263a: 0x00c3, 0x263b: 0x00c3, + 0x263c: 0x00c3, 0x263d: 0x00c3, 0x263e: 0x0080, 0x263f: 0x00c0, + // Block 0x99, offset 0x2640 + 0x2640: 0x00c0, 0x2641: 0x00c0, 0x2642: 0x00c0, 0x2643: 0x00c0, 0x2644: 0x00c0, 0x2645: 0x00c0, + 0x2646: 0x00c0, 0x2647: 0x00c0, 0x2648: 0x00c0, 0x2649: 0x00c0, 0x264a: 0x00c0, 0x264b: 0x00c0, + 0x264c: 0x00c0, 0x264d: 0x00c0, 0x264e: 0x00c0, 0x264f: 0x00c0, 0x2650: 0x00c0, 0x2651: 0x00c0, + 0x2652: 0x00c0, 0x2653: 0x00c0, 0x2654: 0x00c0, 0x2655: 0x00c0, 0x2656: 0x00c0, 0x2657: 0x00c0, + 0x2658: 0x00c0, 0x2659: 0x00c0, 0x265a: 0x00c0, 0x265b: 0x00c0, 0x265c: 0x0080, 0x265d: 0x0080, + 0x265e: 0x00c3, 0x265f: 0x00c3, 0x2660: 0x00c0, 0x2661: 0x00c0, 0x2662: 0x00c0, 0x2663: 0x00c0, + 0x2664: 0x00c0, 0x2665: 0x00c0, 0x2666: 0x00c0, 0x2667: 0x00c0, 0x2668: 0x00c0, 0x2669: 0x00c0, + 0x266a: 0x00c0, 0x266b: 0x00c0, 0x266c: 0x00c0, 0x266d: 0x00c0, 0x266e: 0x00c0, 0x266f: 0x00c0, + 0x2670: 0x00c0, 0x2671: 0x00c0, 0x2672: 0x00c0, 0x2673: 0x00c0, 0x2674: 0x00c0, 0x2675: 0x00c0, + 0x2676: 0x00c0, 0x2677: 0x00c0, 0x2678: 0x00c0, 0x2679: 0x00c0, 0x267a: 0x00c0, 0x267b: 0x00c0, + 0x267c: 0x00c0, 0x267d: 0x00c0, 0x267e: 0x00c0, 0x267f: 0x00c0, + // Block 0x9a, offset 0x2680 + 0x2680: 0x00c0, 0x2681: 0x00c0, 0x2682: 0x00c0, 0x2683: 0x00c0, 0x2684: 0x00c0, 0x2685: 0x00c0, + 0x2686: 0x00c0, 0x2687: 0x00c0, 0x2688: 0x00c0, 0x2689: 0x00c0, 0x268a: 0x00c0, 0x268b: 0x00c0, + 0x268c: 0x00c0, 0x268d: 0x00c0, 0x268e: 0x00c0, 0x268f: 0x00c0, 0x2690: 0x00c0, 0x2691: 0x00c0, + 0x2692: 0x00c0, 0x2693: 0x00c0, 0x2694: 0x00c0, 0x2695: 0x00c0, 0x2696: 0x00c0, 0x2697: 0x00c0, + 0x2698: 0x00c0, 0x2699: 0x00c0, 0x269a: 0x00c0, 0x269b: 0x00c0, 0x269c: 0x00c0, 0x269d: 0x00c0, + 0x269e: 0x00c0, 0x269f: 0x00c0, 0x26a0: 0x00c0, 0x26a1: 0x00c0, 0x26a2: 0x00c0, 0x26a3: 0x00c0, + 0x26a4: 0x00c0, 0x26a5: 0x00c0, 0x26a6: 0x0080, 0x26a7: 0x0080, 0x26a8: 0x0080, 0x26a9: 0x0080, + 0x26aa: 0x0080, 0x26ab: 0x0080, 0x26ac: 0x0080, 0x26ad: 0x0080, 0x26ae: 0x0080, 0x26af: 0x0080, + 0x26b0: 0x00c3, 0x26b1: 0x00c3, 0x26b2: 0x0080, 0x26b3: 0x0080, 0x26b4: 0x0080, 0x26b5: 0x0080, + 0x26b6: 0x0080, 0x26b7: 0x0080, + // Block 0x9b, offset 0x26c0 + 0x26c0: 0x0080, 0x26c1: 0x0080, 0x26c2: 0x0080, 0x26c3: 0x0080, 0x26c4: 0x0080, 0x26c5: 0x0080, + 0x26c6: 0x0080, 0x26c7: 0x0080, 0x26c8: 0x0080, 0x26c9: 0x0080, 0x26ca: 0x0080, 0x26cb: 0x0080, + 0x26cc: 0x0080, 0x26cd: 0x0080, 0x26ce: 0x0080, 0x26cf: 0x0080, 0x26d0: 0x0080, 0x26d1: 0x0080, + 0x26d2: 0x0080, 0x26d3: 0x0080, 0x26d4: 0x0080, 0x26d5: 0x0080, 0x26d6: 0x0080, 0x26d7: 0x00c0, + 0x26d8: 0x00c0, 0x26d9: 0x00c0, 0x26da: 0x00c0, 0x26db: 0x00c0, 0x26dc: 0x00c0, 0x26dd: 0x00c0, + 0x26de: 0x00c0, 0x26df: 0x00c0, 0x26e0: 0x0080, 0x26e1: 0x0080, 0x26e2: 0x00c0, 0x26e3: 0x00c0, + 0x26e4: 0x00c0, 0x26e5: 0x00c0, 0x26e6: 0x00c0, 0x26e7: 0x00c0, 0x26e8: 0x00c0, 0x26e9: 0x00c0, + 0x26ea: 0x00c0, 0x26eb: 0x00c0, 0x26ec: 0x00c0, 0x26ed: 0x00c0, 0x26ee: 0x00c0, 0x26ef: 0x00c0, + 0x26f0: 0x00c0, 0x26f1: 0x00c0, 0x26f2: 0x00c0, 0x26f3: 0x00c0, 0x26f4: 0x00c0, 0x26f5: 0x00c0, + 0x26f6: 0x00c0, 0x26f7: 0x00c0, 0x26f8: 0x00c0, 0x26f9: 0x00c0, 0x26fa: 0x00c0, 0x26fb: 0x00c0, + 0x26fc: 0x00c0, 0x26fd: 0x00c0, 0x26fe: 0x00c0, 0x26ff: 0x00c0, + // Block 0x9c, offset 0x2700 + 0x2700: 0x00c0, 0x2701: 0x00c0, 0x2702: 0x00c0, 0x2703: 0x00c0, 0x2704: 0x00c0, 0x2705: 0x00c0, + 0x2706: 0x00c0, 0x2707: 0x00c0, 0x2708: 0x00c0, 0x2709: 0x00c0, 0x270a: 0x00c0, 0x270b: 0x00c0, + 0x270c: 0x00c0, 0x270d: 0x00c0, 0x270e: 0x00c0, 0x270f: 0x00c0, 0x2710: 0x00c0, 0x2711: 0x00c0, + 0x2712: 0x00c0, 0x2713: 0x00c0, 0x2714: 0x00c0, 0x2715: 0x00c0, 0x2716: 0x00c0, 0x2717: 0x00c0, + 0x2718: 0x00c0, 0x2719: 0x00c0, 0x271a: 0x00c0, 0x271b: 0x00c0, 0x271c: 0x00c0, 0x271d: 0x00c0, + 0x271e: 0x00c0, 0x271f: 0x00c0, 0x2720: 0x00c0, 0x2721: 0x00c0, 0x2722: 0x00c0, 0x2723: 0x00c0, + 0x2724: 0x00c0, 0x2725: 0x00c0, 0x2726: 0x00c0, 0x2727: 0x00c0, 0x2728: 0x00c0, 0x2729: 0x00c0, + 0x272a: 0x00c0, 0x272b: 0x00c0, 0x272c: 0x00c0, 0x272d: 0x00c0, 0x272e: 0x00c0, 0x272f: 0x00c0, + 0x2730: 0x0080, 0x2731: 0x00c0, 0x2732: 0x00c0, 0x2733: 0x00c0, 0x2734: 0x00c0, 0x2735: 0x00c0, + 0x2736: 0x00c0, 0x2737: 0x00c0, 0x2738: 0x00c0, 0x2739: 0x00c0, 0x273a: 0x00c0, 0x273b: 0x00c0, + 0x273c: 0x00c0, 0x273d: 0x00c0, 0x273e: 0x00c0, 0x273f: 0x00c0, + // Block 0x9d, offset 0x2740 + 0x2740: 0x00c0, 0x2741: 0x00c0, 0x2742: 0x00c0, 0x2743: 0x00c0, 0x2744: 0x00c0, 0x2745: 0x00c0, + 0x2746: 0x00c0, 0x2747: 0x00c0, 0x2748: 0x00c0, 0x2749: 0x0080, 0x274a: 0x0080, 0x274b: 0x00c0, + 0x274c: 0x00c0, 0x274d: 0x00c0, 0x274e: 0x00c0, 0x274f: 0x00c0, 0x2750: 0x00c0, 0x2751: 0x00c0, + 0x2752: 0x00c0, 0x2753: 0x00c0, 0x2754: 0x00c0, 0x2755: 0x00c0, 0x2756: 0x00c0, 0x2757: 0x00c0, + 0x2758: 0x00c0, 0x2759: 0x00c0, 0x275a: 0x00c0, 0x275b: 0x00c0, 0x275c: 0x00c0, 0x275d: 0x00c0, + 0x275e: 0x00c0, 0x275f: 0x00c0, 0x2760: 0x00c0, 0x2761: 0x00c0, 0x2762: 0x00c0, 0x2763: 0x00c0, + 0x2764: 0x00c0, 0x2765: 0x00c0, 0x2766: 0x00c0, 0x2767: 0x00c0, 0x2768: 0x00c0, 0x2769: 0x00c0, + 0x276a: 0x00c0, 0x276b: 0x00c0, 0x276c: 0x00c0, 0x276d: 0x00c0, 0x276e: 0x00c0, + 0x2770: 0x00c0, 0x2771: 0x00c0, 0x2772: 0x00c0, 0x2773: 0x00c0, 0x2774: 0x00c0, 0x2775: 0x00c0, + 0x2776: 0x00c0, 0x2777: 0x00c0, + // Block 0x9e, offset 0x2780 + 0x27b7: 0x00c0, 0x27b8: 0x0080, 0x27b9: 0x0080, 0x27ba: 0x00c0, 0x27bb: 0x00c0, + 0x27bc: 0x00c0, 0x27bd: 0x00c0, 0x27be: 0x00c0, 0x27bf: 0x00c0, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x00c0, 0x27c1: 0x00c0, 0x27c2: 0x00c3, 0x27c3: 0x00c0, 0x27c4: 0x00c0, 0x27c5: 0x00c0, + 0x27c6: 0x00c6, 0x27c7: 0x00c0, 0x27c8: 0x00c0, 0x27c9: 0x00c0, 0x27ca: 0x00c0, 0x27cb: 0x00c3, + 0x27cc: 0x00c0, 0x27cd: 0x00c0, 0x27ce: 0x00c0, 0x27cf: 0x00c0, 0x27d0: 0x00c0, 0x27d1: 0x00c0, + 0x27d2: 0x00c0, 0x27d3: 0x00c0, 0x27d4: 0x00c0, 0x27d5: 0x00c0, 0x27d6: 0x00c0, 0x27d7: 0x00c0, + 0x27d8: 0x00c0, 0x27d9: 0x00c0, 0x27da: 0x00c0, 0x27db: 0x00c0, 0x27dc: 0x00c0, 0x27dd: 0x00c0, + 0x27de: 0x00c0, 0x27df: 0x00c0, 0x27e0: 0x00c0, 0x27e1: 0x00c0, 0x27e2: 0x00c0, 0x27e3: 0x00c0, + 0x27e4: 0x00c0, 0x27e5: 0x00c3, 0x27e6: 0x00c3, 0x27e7: 0x00c0, 0x27e8: 0x0080, 0x27e9: 0x0080, + 0x27ea: 0x0080, 0x27eb: 0x0080, + 0x27f0: 0x0080, 0x27f1: 0x0080, 0x27f2: 0x0080, 0x27f3: 0x0080, 0x27f4: 0x0080, 0x27f5: 0x0080, + 0x27f6: 0x0080, 0x27f7: 0x0080, 0x27f8: 0x0080, 0x27f9: 0x0080, + // Block 0xa0, offset 0x2800 + 0x2800: 0x00c2, 0x2801: 0x00c2, 0x2802: 0x00c2, 0x2803: 0x00c2, 0x2804: 0x00c2, 0x2805: 0x00c2, + 0x2806: 0x00c2, 0x2807: 0x00c2, 0x2808: 0x00c2, 0x2809: 0x00c2, 0x280a: 0x00c2, 0x280b: 0x00c2, + 0x280c: 0x00c2, 0x280d: 0x00c2, 0x280e: 0x00c2, 0x280f: 0x00c2, 0x2810: 0x00c2, 0x2811: 0x00c2, + 0x2812: 0x00c2, 0x2813: 0x00c2, 0x2814: 0x00c2, 0x2815: 0x00c2, 0x2816: 0x00c2, 0x2817: 0x00c2, + 0x2818: 0x00c2, 0x2819: 0x00c2, 0x281a: 0x00c2, 0x281b: 0x00c2, 0x281c: 0x00c2, 0x281d: 0x00c2, + 0x281e: 0x00c2, 0x281f: 0x00c2, 0x2820: 0x00c2, 0x2821: 0x00c2, 0x2822: 0x00c2, 0x2823: 0x00c2, + 0x2824: 0x00c2, 0x2825: 0x00c2, 0x2826: 0x00c2, 0x2827: 0x00c2, 0x2828: 0x00c2, 0x2829: 0x00c2, + 0x282a: 0x00c2, 0x282b: 0x00c2, 0x282c: 0x00c2, 0x282d: 0x00c2, 0x282e: 0x00c2, 0x282f: 0x00c2, + 0x2830: 0x00c2, 0x2831: 0x00c2, 0x2832: 0x00c1, 0x2833: 0x00c0, 0x2834: 0x0080, 0x2835: 0x0080, + 0x2836: 0x0080, 0x2837: 0x0080, + // Block 0xa1, offset 0x2840 + 0x2840: 0x00c0, 0x2841: 0x00c0, 0x2842: 0x00c0, 0x2843: 0x00c0, 0x2844: 0x00c6, 0x2845: 0x00c3, + 0x284e: 0x0080, 0x284f: 0x0080, 0x2850: 0x00c0, 0x2851: 0x00c0, + 0x2852: 0x00c0, 0x2853: 0x00c0, 0x2854: 0x00c0, 0x2855: 0x00c0, 0x2856: 0x00c0, 0x2857: 0x00c0, + 0x2858: 0x00c0, 0x2859: 0x00c0, + 0x2860: 0x00c3, 0x2861: 0x00c3, 0x2862: 0x00c3, 0x2863: 0x00c3, + 0x2864: 0x00c3, 0x2865: 0x00c3, 0x2866: 0x00c3, 0x2867: 0x00c3, 0x2868: 0x00c3, 0x2869: 0x00c3, + 0x286a: 0x00c3, 0x286b: 0x00c3, 0x286c: 0x00c3, 0x286d: 0x00c3, 0x286e: 0x00c3, 0x286f: 0x00c3, + 0x2870: 0x00c3, 0x2871: 0x00c3, 0x2872: 0x00c0, 0x2873: 0x00c0, 0x2874: 0x00c0, 0x2875: 0x00c0, + 0x2876: 0x00c0, 0x2877: 0x00c0, 0x2878: 0x0080, 0x2879: 0x0080, 0x287a: 0x0080, 0x287b: 0x00c0, + 0x287c: 0x0080, 0x287d: 0x00c0, + // Block 0xa2, offset 0x2880 + 0x2880: 0x00c0, 0x2881: 0x00c0, 0x2882: 0x00c0, 0x2883: 0x00c0, 0x2884: 0x00c0, 0x2885: 0x00c0, + 0x2886: 0x00c0, 0x2887: 0x00c0, 0x2888: 0x00c0, 0x2889: 0x00c0, 0x288a: 0x00c0, 0x288b: 0x00c0, + 0x288c: 0x00c0, 0x288d: 0x00c0, 0x288e: 0x00c0, 0x288f: 0x00c0, 0x2890: 0x00c0, 0x2891: 0x00c0, + 0x2892: 0x00c0, 0x2893: 0x00c0, 0x2894: 0x00c0, 0x2895: 0x00c0, 0x2896: 0x00c0, 0x2897: 0x00c0, + 0x2898: 0x00c0, 0x2899: 0x00c0, 0x289a: 0x00c0, 0x289b: 0x00c0, 0x289c: 0x00c0, 0x289d: 0x00c0, + 0x289e: 0x00c0, 0x289f: 0x00c0, 0x28a0: 0x00c0, 0x28a1: 0x00c0, 0x28a2: 0x00c0, 0x28a3: 0x00c0, + 0x28a4: 0x00c0, 0x28a5: 0x00c0, 0x28a6: 0x00c3, 0x28a7: 0x00c3, 0x28a8: 0x00c3, 0x28a9: 0x00c3, + 0x28aa: 0x00c3, 0x28ab: 0x00c3, 0x28ac: 0x00c3, 0x28ad: 0x00c3, 0x28ae: 0x0080, 0x28af: 0x0080, + 0x28b0: 0x00c0, 0x28b1: 0x00c0, 0x28b2: 0x00c0, 0x28b3: 0x00c0, 0x28b4: 0x00c0, 0x28b5: 0x00c0, + 0x28b6: 0x00c0, 0x28b7: 0x00c0, 0x28b8: 0x00c0, 0x28b9: 0x00c0, 0x28ba: 0x00c0, 0x28bb: 0x00c0, + 0x28bc: 0x00c0, 0x28bd: 0x00c0, 0x28be: 0x00c0, 0x28bf: 0x00c0, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x00c0, 0x28c1: 0x00c0, 0x28c2: 0x00c0, 0x28c3: 0x00c0, 0x28c4: 0x00c0, 0x28c5: 0x00c0, + 0x28c6: 0x00c0, 0x28c7: 0x00c3, 0x28c8: 0x00c3, 0x28c9: 0x00c3, 0x28ca: 0x00c3, 0x28cb: 0x00c3, + 0x28cc: 0x00c3, 0x28cd: 0x00c3, 0x28ce: 0x00c3, 0x28cf: 0x00c3, 0x28d0: 0x00c3, 0x28d1: 0x00c3, + 0x28d2: 0x00c0, 0x28d3: 0x00c5, + 0x28df: 0x0080, 0x28e0: 0x0040, 0x28e1: 0x0040, 0x28e2: 0x0040, 0x28e3: 0x0040, + 0x28e4: 0x0040, 0x28e5: 0x0040, 0x28e6: 0x0040, 0x28e7: 0x0040, 0x28e8: 0x0040, 0x28e9: 0x0040, + 0x28ea: 0x0040, 0x28eb: 0x0040, 0x28ec: 0x0040, 0x28ed: 0x0040, 0x28ee: 0x0040, 0x28ef: 0x0040, + 0x28f0: 0x0040, 0x28f1: 0x0040, 0x28f2: 0x0040, 0x28f3: 0x0040, 0x28f4: 0x0040, 0x28f5: 0x0040, + 0x28f6: 0x0040, 0x28f7: 0x0040, 0x28f8: 0x0040, 0x28f9: 0x0040, 0x28fa: 0x0040, 0x28fb: 0x0040, + 0x28fc: 0x0040, + // Block 0xa4, offset 0x2900 + 0x2900: 0x00c3, 0x2901: 0x00c3, 0x2902: 0x00c3, 0x2903: 0x00c0, 0x2904: 0x00c0, 0x2905: 0x00c0, + 0x2906: 0x00c0, 0x2907: 0x00c0, 0x2908: 0x00c0, 0x2909: 0x00c0, 0x290a: 0x00c0, 0x290b: 0x00c0, + 0x290c: 0x00c0, 0x290d: 0x00c0, 0x290e: 0x00c0, 0x290f: 0x00c0, 0x2910: 0x00c0, 0x2911: 0x00c0, + 0x2912: 0x00c0, 0x2913: 0x00c0, 0x2914: 0x00c0, 0x2915: 0x00c0, 0x2916: 0x00c0, 0x2917: 0x00c0, + 0x2918: 0x00c0, 0x2919: 0x00c0, 0x291a: 0x00c0, 0x291b: 0x00c0, 0x291c: 0x00c0, 0x291d: 0x00c0, + 0x291e: 0x00c0, 0x291f: 0x00c0, 0x2920: 0x00c0, 0x2921: 0x00c0, 0x2922: 0x00c0, 0x2923: 0x00c0, + 0x2924: 0x00c0, 0x2925: 0x00c0, 0x2926: 0x00c0, 0x2927: 0x00c0, 0x2928: 0x00c0, 0x2929: 0x00c0, + 0x292a: 0x00c0, 0x292b: 0x00c0, 0x292c: 0x00c0, 0x292d: 0x00c0, 0x292e: 0x00c0, 0x292f: 0x00c0, + 0x2930: 0x00c0, 0x2931: 0x00c0, 0x2932: 0x00c0, 0x2933: 0x00c3, 0x2934: 0x00c0, 0x2935: 0x00c0, + 0x2936: 0x00c3, 0x2937: 0x00c3, 0x2938: 0x00c3, 0x2939: 0x00c3, 0x293a: 0x00c0, 0x293b: 0x00c0, + 0x293c: 0x00c3, 0x293d: 0x00c0, 0x293e: 0x00c0, 0x293f: 0x00c0, + // Block 0xa5, offset 0x2940 + 0x2940: 0x00c5, 0x2941: 0x0080, 0x2942: 0x0080, 0x2943: 0x0080, 0x2944: 0x0080, 0x2945: 0x0080, + 0x2946: 0x0080, 0x2947: 0x0080, 0x2948: 0x0080, 0x2949: 0x0080, 0x294a: 0x0080, 0x294b: 0x0080, + 0x294c: 0x0080, 0x294d: 0x0080, 0x294f: 0x00c0, 0x2950: 0x00c0, 0x2951: 0x00c0, + 0x2952: 0x00c0, 0x2953: 0x00c0, 0x2954: 0x00c0, 0x2955: 0x00c0, 0x2956: 0x00c0, 0x2957: 0x00c0, + 0x2958: 0x00c0, 0x2959: 0x00c0, + 0x295e: 0x0080, 0x295f: 0x0080, 0x2960: 0x00c0, 0x2961: 0x00c0, 0x2962: 0x00c0, 0x2963: 0x00c0, + 0x2964: 0x00c0, 0x2965: 0x00c3, 0x2966: 0x00c0, 0x2967: 0x00c0, 0x2968: 0x00c0, 0x2969: 0x00c0, + 0x296a: 0x00c0, 0x296b: 0x00c0, 0x296c: 0x00c0, 0x296d: 0x00c0, 0x296e: 0x00c0, 0x296f: 0x00c0, + 0x2970: 0x00c0, 0x2971: 0x00c0, 0x2972: 0x00c0, 0x2973: 0x00c0, 0x2974: 0x00c0, 0x2975: 0x00c0, + 0x2976: 0x00c0, 0x2977: 0x00c0, 0x2978: 0x00c0, 0x2979: 0x00c0, 0x297a: 0x00c0, 0x297b: 0x00c0, + 0x297c: 0x00c0, 0x297d: 0x00c0, 0x297e: 0x00c0, + // Block 0xa6, offset 0x2980 + 0x2980: 0x00c0, 0x2981: 0x00c0, 0x2982: 0x00c0, 0x2983: 0x00c0, 0x2984: 0x00c0, 0x2985: 0x00c0, + 0x2986: 0x00c0, 0x2987: 0x00c0, 0x2988: 0x00c0, 0x2989: 0x00c0, 0x298a: 0x00c0, 0x298b: 0x00c0, + 0x298c: 0x00c0, 0x298d: 0x00c0, 0x298e: 0x00c0, 0x298f: 0x00c0, 0x2990: 0x00c0, 0x2991: 0x00c0, + 0x2992: 0x00c0, 0x2993: 0x00c0, 0x2994: 0x00c0, 0x2995: 0x00c0, 0x2996: 0x00c0, 0x2997: 0x00c0, + 0x2998: 0x00c0, 0x2999: 0x00c0, 0x299a: 0x00c0, 0x299b: 0x00c0, 0x299c: 0x00c0, 0x299d: 0x00c0, + 0x299e: 0x00c0, 0x299f: 0x00c0, 0x29a0: 0x00c0, 0x29a1: 0x00c0, 0x29a2: 0x00c0, 0x29a3: 0x00c0, + 0x29a4: 0x00c0, 0x29a5: 0x00c0, 0x29a6: 0x00c0, 0x29a7: 0x00c0, 0x29a8: 0x00c0, 0x29a9: 0x00c3, + 0x29aa: 0x00c3, 0x29ab: 0x00c3, 0x29ac: 0x00c3, 0x29ad: 0x00c3, 0x29ae: 0x00c3, 0x29af: 0x00c0, + 0x29b0: 0x00c0, 0x29b1: 0x00c3, 0x29b2: 0x00c3, 0x29b3: 0x00c0, 0x29b4: 0x00c0, 0x29b5: 0x00c3, + 0x29b6: 0x00c3, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x00c0, 0x29c1: 0x00c0, 0x29c2: 0x00c0, 0x29c3: 0x00c3, 0x29c4: 0x00c0, 0x29c5: 0x00c0, + 0x29c6: 0x00c0, 0x29c7: 0x00c0, 0x29c8: 0x00c0, 0x29c9: 0x00c0, 0x29ca: 0x00c0, 0x29cb: 0x00c0, + 0x29cc: 0x00c3, 0x29cd: 0x00c0, 0x29d0: 0x00c0, 0x29d1: 0x00c0, + 0x29d2: 0x00c0, 0x29d3: 0x00c0, 0x29d4: 0x00c0, 0x29d5: 0x00c0, 0x29d6: 0x00c0, 0x29d7: 0x00c0, + 0x29d8: 0x00c0, 0x29d9: 0x00c0, 0x29dc: 0x0080, 0x29dd: 0x0080, + 0x29de: 0x0080, 0x29df: 0x0080, 0x29e0: 0x00c0, 0x29e1: 0x00c0, 0x29e2: 0x00c0, 0x29e3: 0x00c0, + 0x29e4: 0x00c0, 0x29e5: 0x00c0, 0x29e6: 0x00c0, 0x29e7: 0x00c0, 0x29e8: 0x00c0, 0x29e9: 0x00c0, + 0x29ea: 0x00c0, 0x29eb: 0x00c0, 0x29ec: 0x00c0, 0x29ed: 0x00c0, 0x29ee: 0x00c0, 0x29ef: 0x00c0, + 0x29f0: 0x00c0, 0x29f1: 0x00c0, 0x29f2: 0x00c0, 0x29f3: 0x00c0, 0x29f4: 0x00c0, 0x29f5: 0x00c0, + 0x29f6: 0x00c0, 0x29f7: 0x0080, 0x29f8: 0x0080, 0x29f9: 0x0080, 0x29fa: 0x00c0, 0x29fb: 0x00c0, + 0x29fc: 0x00c3, 0x29fd: 0x00c0, 0x29fe: 0x00c0, 0x29ff: 0x00c0, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x00c0, 0x2a01: 0x00c0, 0x2a02: 0x00c0, 0x2a03: 0x00c0, 0x2a04: 0x00c0, 0x2a05: 0x00c0, + 0x2a06: 0x00c0, 0x2a07: 0x00c0, 0x2a08: 0x00c0, 0x2a09: 0x00c0, 0x2a0a: 0x00c0, 0x2a0b: 0x00c0, + 0x2a0c: 0x00c0, 0x2a0d: 0x00c0, 0x2a0e: 0x00c0, 0x2a0f: 0x00c0, 0x2a10: 0x00c0, 0x2a11: 0x00c0, + 0x2a12: 0x00c0, 0x2a13: 0x00c0, 0x2a14: 0x00c0, 0x2a15: 0x00c0, 0x2a16: 0x00c0, 0x2a17: 0x00c0, + 0x2a18: 0x00c0, 0x2a19: 0x00c0, 0x2a1a: 0x00c0, 0x2a1b: 0x00c0, 0x2a1c: 0x00c0, 0x2a1d: 0x00c0, + 0x2a1e: 0x00c0, 0x2a1f: 0x00c0, 0x2a20: 0x00c0, 0x2a21: 0x00c0, 0x2a22: 0x00c0, 0x2a23: 0x00c0, + 0x2a24: 0x00c0, 0x2a25: 0x00c0, 0x2a26: 0x00c0, 0x2a27: 0x00c0, 0x2a28: 0x00c0, 0x2a29: 0x00c0, + 0x2a2a: 0x00c0, 0x2a2b: 0x00c0, 0x2a2c: 0x00c0, 0x2a2d: 0x00c0, 0x2a2e: 0x00c0, 0x2a2f: 0x00c0, + 0x2a30: 0x00c3, 0x2a31: 0x00c0, 0x2a32: 0x00c3, 0x2a33: 0x00c3, 0x2a34: 0x00c3, 0x2a35: 0x00c0, + 0x2a36: 0x00c0, 0x2a37: 0x00c3, 0x2a38: 0x00c3, 0x2a39: 0x00c0, 0x2a3a: 0x00c0, 0x2a3b: 0x00c0, + 0x2a3c: 0x00c0, 0x2a3d: 0x00c0, 0x2a3e: 0x00c3, 0x2a3f: 0x00c3, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x00c0, 0x2a41: 0x00c3, 0x2a42: 0x00c0, + 0x2a5b: 0x00c0, 0x2a5c: 0x00c0, 0x2a5d: 0x00c0, + 0x2a5e: 0x0080, 0x2a5f: 0x0080, 0x2a60: 0x00c0, 0x2a61: 0x00c0, 0x2a62: 0x00c0, 0x2a63: 0x00c0, + 0x2a64: 0x00c0, 0x2a65: 0x00c0, 0x2a66: 0x00c0, 0x2a67: 0x00c0, 0x2a68: 0x00c0, 0x2a69: 0x00c0, + 0x2a6a: 0x00c0, 0x2a6b: 0x00c0, 0x2a6c: 0x00c3, 0x2a6d: 0x00c3, 0x2a6e: 0x00c0, 0x2a6f: 0x00c0, + 0x2a70: 0x0080, 0x2a71: 0x0080, 0x2a72: 0x00c0, 0x2a73: 0x00c0, 0x2a74: 0x00c0, 0x2a75: 0x00c0, + 0x2a76: 0x00c6, + // Block 0xaa, offset 0x2a80 + 0x2a81: 0x00c0, 0x2a82: 0x00c0, 0x2a83: 0x00c0, 0x2a84: 0x00c0, 0x2a85: 0x00c0, + 0x2a86: 0x00c0, 0x2a89: 0x00c0, 0x2a8a: 0x00c0, 0x2a8b: 0x00c0, + 0x2a8c: 0x00c0, 0x2a8d: 0x00c0, 0x2a8e: 0x00c0, 0x2a91: 0x00c0, + 0x2a92: 0x00c0, 0x2a93: 0x00c0, 0x2a94: 0x00c0, 0x2a95: 0x00c0, 0x2a96: 0x00c0, + 0x2aa0: 0x00c0, 0x2aa1: 0x00c0, 0x2aa2: 0x00c0, 0x2aa3: 0x00c0, + 0x2aa4: 0x00c0, 0x2aa5: 0x00c0, 0x2aa6: 0x00c0, 0x2aa8: 0x00c0, 0x2aa9: 0x00c0, + 0x2aaa: 0x00c0, 0x2aab: 0x00c0, 0x2aac: 0x00c0, 0x2aad: 0x00c0, 0x2aae: 0x00c0, + 0x2ab0: 0x00c0, 0x2ab1: 0x00c0, 0x2ab2: 0x00c0, 0x2ab3: 0x00c0, 0x2ab4: 0x00c0, 0x2ab5: 0x00c0, + 0x2ab6: 0x00c0, 0x2ab7: 0x00c0, 0x2ab8: 0x00c0, 0x2ab9: 0x00c0, 0x2aba: 0x00c0, 0x2abb: 0x00c0, + 0x2abc: 0x00c0, 0x2abd: 0x00c0, 0x2abe: 0x00c0, 0x2abf: 0x00c0, + // Block 0xab, offset 0x2ac0 + 0x2ac0: 0x00c0, 0x2ac1: 0x00c0, 0x2ac2: 0x00c0, 0x2ac3: 0x00c0, 0x2ac4: 0x00c0, 0x2ac5: 0x00c0, + 0x2ac6: 0x00c0, 0x2ac7: 0x00c0, 0x2ac8: 0x00c0, 0x2ac9: 0x00c0, 0x2aca: 0x00c0, 0x2acb: 0x00c0, + 0x2acc: 0x00c0, 0x2acd: 0x00c0, 0x2ace: 0x00c0, 0x2acf: 0x00c0, 0x2ad0: 0x00c0, 0x2ad1: 0x00c0, + 0x2ad2: 0x00c0, 0x2ad3: 0x00c0, 0x2ad4: 0x00c0, 0x2ad5: 0x00c0, 0x2ad6: 0x00c0, 0x2ad7: 0x00c0, + 0x2ad8: 0x00c0, 0x2ad9: 0x00c0, 0x2ada: 0x00c0, 0x2adb: 0x0080, 0x2adc: 0x0080, 0x2add: 0x0080, + 0x2ade: 0x0080, 0x2adf: 0x0080, 0x2ae0: 0x00c0, 0x2ae1: 0x00c0, 0x2ae2: 0x00c0, 0x2ae3: 0x00c0, + 0x2ae4: 0x00c0, 0x2ae5: 0x00c8, + 0x2af0: 0x00c0, 0x2af1: 0x00c0, 0x2af2: 0x00c0, 0x2af3: 0x00c0, 0x2af4: 0x00c0, 0x2af5: 0x00c0, + 0x2af6: 0x00c0, 0x2af7: 0x00c0, 0x2af8: 0x00c0, 0x2af9: 0x00c0, 0x2afa: 0x00c0, 0x2afb: 0x00c0, + 0x2afc: 0x00c0, 0x2afd: 0x00c0, 0x2afe: 0x00c0, 0x2aff: 0x00c0, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x00c0, 0x2b01: 0x00c0, 0x2b02: 0x00c0, 0x2b03: 0x00c0, 0x2b04: 0x00c0, 0x2b05: 0x00c0, + 0x2b06: 0x00c0, 0x2b07: 0x00c0, 0x2b08: 0x00c0, 0x2b09: 0x00c0, 0x2b0a: 0x00c0, 0x2b0b: 0x00c0, + 0x2b0c: 0x00c0, 0x2b0d: 0x00c0, 0x2b0e: 0x00c0, 0x2b0f: 0x00c0, 0x2b10: 0x00c0, 0x2b11: 0x00c0, + 0x2b12: 0x00c0, 0x2b13: 0x00c0, 0x2b14: 0x00c0, 0x2b15: 0x00c0, 0x2b16: 0x00c0, 0x2b17: 0x00c0, + 0x2b18: 0x00c0, 0x2b19: 0x00c0, 0x2b1a: 0x00c0, 0x2b1b: 0x00c0, 0x2b1c: 0x00c0, 0x2b1d: 0x00c0, + 0x2b1e: 0x00c0, 0x2b1f: 0x00c0, 0x2b20: 0x00c0, 0x2b21: 0x00c0, 0x2b22: 0x00c0, 0x2b23: 0x00c0, + 0x2b24: 0x00c0, 0x2b25: 0x00c3, 0x2b26: 0x00c0, 0x2b27: 0x00c0, 0x2b28: 0x00c3, 0x2b29: 0x00c0, + 0x2b2a: 0x00c0, 0x2b2b: 0x0080, 0x2b2c: 0x00c0, 0x2b2d: 0x00c6, + 0x2b30: 0x00c0, 0x2b31: 0x00c0, 0x2b32: 0x00c0, 0x2b33: 0x00c0, 0x2b34: 0x00c0, 0x2b35: 0x00c0, + 0x2b36: 0x00c0, 0x2b37: 0x00c0, 0x2b38: 0x00c0, 0x2b39: 0x00c0, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x00c0, 0x2b41: 0x00c0, 0x2b42: 0x00c0, 0x2b43: 0x00c0, 0x2b44: 0x00c0, 0x2b45: 0x00c0, + 0x2b46: 0x00c0, 0x2b47: 0x00c0, 0x2b48: 0x00c0, 0x2b49: 0x00c0, 0x2b4a: 0x00c0, 0x2b4b: 0x00c0, + 0x2b4c: 0x00c0, 0x2b4d: 0x00c0, 0x2b4e: 0x00c0, 0x2b4f: 0x00c0, 0x2b50: 0x00c0, 0x2b51: 0x00c0, + 0x2b52: 0x00c0, 0x2b53: 0x00c0, 0x2b54: 0x00c0, 0x2b55: 0x00c0, 0x2b56: 0x00c0, 0x2b57: 0x00c0, + 0x2b58: 0x00c0, 0x2b59: 0x00c0, 0x2b5a: 0x00c0, 0x2b5b: 0x00c0, 0x2b5c: 0x00c0, 0x2b5d: 0x00c0, + 0x2b5e: 0x00c0, 0x2b5f: 0x00c0, 0x2b60: 0x00c0, 0x2b61: 0x00c0, 0x2b62: 0x00c0, 0x2b63: 0x00c0, + 0x2b70: 0x0040, 0x2b71: 0x0040, 0x2b72: 0x0040, 0x2b73: 0x0040, 0x2b74: 0x0040, 0x2b75: 0x0040, + 0x2b76: 0x0040, 0x2b77: 0x0040, 0x2b78: 0x0040, 0x2b79: 0x0040, 0x2b7a: 0x0040, 0x2b7b: 0x0040, + 0x2b7c: 0x0040, 0x2b7d: 0x0040, 0x2b7e: 0x0040, 0x2b7f: 0x0040, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x0040, 0x2b81: 0x0040, 0x2b82: 0x0040, 0x2b83: 0x0040, 0x2b84: 0x0040, 0x2b85: 0x0040, + 0x2b86: 0x0040, 0x2b8b: 0x0040, + 0x2b8c: 0x0040, 0x2b8d: 0x0040, 0x2b8e: 0x0040, 0x2b8f: 0x0040, 0x2b90: 0x0040, 0x2b91: 0x0040, + 0x2b92: 0x0040, 0x2b93: 0x0040, 0x2b94: 0x0040, 0x2b95: 0x0040, 0x2b96: 0x0040, 0x2b97: 0x0040, + 0x2b98: 0x0040, 0x2b99: 0x0040, 0x2b9a: 0x0040, 0x2b9b: 0x0040, 0x2b9c: 0x0040, 0x2b9d: 0x0040, + 0x2b9e: 0x0040, 0x2b9f: 0x0040, 0x2ba0: 0x0040, 0x2ba1: 0x0040, 0x2ba2: 0x0040, 0x2ba3: 0x0040, + 0x2ba4: 0x0040, 0x2ba5: 0x0040, 0x2ba6: 0x0040, 0x2ba7: 0x0040, 0x2ba8: 0x0040, 0x2ba9: 0x0040, + 0x2baa: 0x0040, 0x2bab: 0x0040, 0x2bac: 0x0040, 0x2bad: 0x0040, 0x2bae: 0x0040, 0x2baf: 0x0040, + 0x2bb0: 0x0040, 0x2bb1: 0x0040, 0x2bb2: 0x0040, 0x2bb3: 0x0040, 0x2bb4: 0x0040, 0x2bb5: 0x0040, + 0x2bb6: 0x0040, 0x2bb7: 0x0040, 0x2bb8: 0x0040, 0x2bb9: 0x0040, 0x2bba: 0x0040, 0x2bbb: 0x0040, + // Block 0xaf, offset 0x2bc0 + 0x2bc0: 0x008c, 0x2bc1: 0x008c, 0x2bc2: 0x008c, 0x2bc3: 0x008c, 0x2bc4: 0x008c, 0x2bc5: 0x008c, + 0x2bc6: 0x008c, 0x2bc7: 0x008c, 0x2bc8: 0x008c, 0x2bc9: 0x008c, 0x2bca: 0x008c, 0x2bcb: 0x008c, + 0x2bcc: 0x008c, 0x2bcd: 0x008c, 0x2bce: 0x00cc, 0x2bcf: 0x00cc, 0x2bd0: 0x008c, 0x2bd1: 0x00cc, + 0x2bd2: 0x008c, 0x2bd3: 0x00cc, 0x2bd4: 0x00cc, 0x2bd5: 0x008c, 0x2bd6: 0x008c, 0x2bd7: 0x008c, + 0x2bd8: 0x008c, 0x2bd9: 0x008c, 0x2bda: 0x008c, 0x2bdb: 0x008c, 0x2bdc: 0x008c, 0x2bdd: 0x008c, + 0x2bde: 0x008c, 0x2bdf: 0x00cc, 0x2be0: 0x008c, 0x2be1: 0x00cc, 0x2be2: 0x008c, 0x2be3: 0x00cc, + 0x2be4: 0x00cc, 0x2be5: 0x008c, 0x2be6: 0x008c, 0x2be7: 0x00cc, 0x2be8: 0x00cc, 0x2be9: 0x00cc, + 0x2bea: 0x008c, 0x2beb: 0x008c, 0x2bec: 0x008c, 0x2bed: 0x008c, 0x2bee: 0x008c, 0x2bef: 0x008c, + 0x2bf0: 0x008c, 0x2bf1: 0x008c, 0x2bf2: 0x008c, 0x2bf3: 0x008c, 0x2bf4: 0x008c, 0x2bf5: 0x008c, + 0x2bf6: 0x008c, 0x2bf7: 0x008c, 0x2bf8: 0x008c, 0x2bf9: 0x008c, 0x2bfa: 0x008c, 0x2bfb: 0x008c, + 0x2bfc: 0x008c, 0x2bfd: 0x008c, 0x2bfe: 0x008c, 0x2bff: 0x008c, + // Block 0xb0, offset 0x2c00 + 0x2c00: 0x008c, 0x2c01: 0x008c, 0x2c02: 0x008c, 0x2c03: 0x008c, 0x2c04: 0x008c, 0x2c05: 0x008c, + 0x2c06: 0x008c, 0x2c07: 0x008c, 0x2c08: 0x008c, 0x2c09: 0x008c, 0x2c0a: 0x008c, 0x2c0b: 0x008c, + 0x2c0c: 0x008c, 0x2c0d: 0x008c, 0x2c0e: 0x008c, 0x2c0f: 0x008c, 0x2c10: 0x008c, 0x2c11: 0x008c, + 0x2c12: 0x008c, 0x2c13: 0x008c, 0x2c14: 0x008c, 0x2c15: 0x008c, 0x2c16: 0x008c, 0x2c17: 0x008c, + 0x2c18: 0x008c, 0x2c19: 0x008c, 0x2c1a: 0x008c, 0x2c1b: 0x008c, 0x2c1c: 0x008c, 0x2c1d: 0x008c, + 0x2c1e: 0x008c, 0x2c1f: 0x008c, 0x2c20: 0x008c, 0x2c21: 0x008c, 0x2c22: 0x008c, 0x2c23: 0x008c, + 0x2c24: 0x008c, 0x2c25: 0x008c, 0x2c26: 0x008c, 0x2c27: 0x008c, 0x2c28: 0x008c, 0x2c29: 0x008c, + 0x2c2a: 0x008c, 0x2c2b: 0x008c, 0x2c2c: 0x008c, 0x2c2d: 0x008c, + 0x2c30: 0x008c, 0x2c31: 0x008c, 0x2c32: 0x008c, 0x2c33: 0x008c, 0x2c34: 0x008c, 0x2c35: 0x008c, + 0x2c36: 0x008c, 0x2c37: 0x008c, 0x2c38: 0x008c, 0x2c39: 0x008c, 0x2c3a: 0x008c, 0x2c3b: 0x008c, + 0x2c3c: 0x008c, 0x2c3d: 0x008c, 0x2c3e: 0x008c, 0x2c3f: 0x008c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x008c, 0x2c41: 0x008c, 0x2c42: 0x008c, 0x2c43: 0x008c, 0x2c44: 0x008c, 0x2c45: 0x008c, + 0x2c46: 0x008c, 0x2c47: 0x008c, 0x2c48: 0x008c, 0x2c49: 0x008c, 0x2c4a: 0x008c, 0x2c4b: 0x008c, + 0x2c4c: 0x008c, 0x2c4d: 0x008c, 0x2c4e: 0x008c, 0x2c4f: 0x008c, 0x2c50: 0x008c, 0x2c51: 0x008c, + 0x2c52: 0x008c, 0x2c53: 0x008c, 0x2c54: 0x008c, 0x2c55: 0x008c, 0x2c56: 0x008c, 0x2c57: 0x008c, + 0x2c58: 0x008c, 0x2c59: 0x008c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x0080, 0x2c81: 0x0080, 0x2c82: 0x0080, 0x2c83: 0x0080, 0x2c84: 0x0080, 0x2c85: 0x0080, + 0x2c86: 0x0080, + 0x2c93: 0x0080, 0x2c94: 0x0080, 0x2c95: 0x0080, 0x2c96: 0x0080, 0x2c97: 0x0080, + 0x2c9d: 0x008a, + 0x2c9e: 0x00cb, 0x2c9f: 0x008a, 0x2ca0: 0x008a, 0x2ca1: 0x008a, 0x2ca2: 0x008a, 0x2ca3: 0x008a, + 0x2ca4: 0x008a, 0x2ca5: 0x008a, 0x2ca6: 0x008a, 0x2ca7: 0x008a, 0x2ca8: 0x008a, 0x2ca9: 0x008a, + 0x2caa: 0x008a, 0x2cab: 0x008a, 0x2cac: 0x008a, 0x2cad: 0x008a, 0x2cae: 0x008a, 0x2caf: 0x008a, + 0x2cb0: 0x008a, 0x2cb1: 0x008a, 0x2cb2: 0x008a, 0x2cb3: 0x008a, 0x2cb4: 0x008a, 0x2cb5: 0x008a, + 0x2cb6: 0x008a, 0x2cb8: 0x008a, 0x2cb9: 0x008a, 0x2cba: 0x008a, 0x2cbb: 0x008a, + 0x2cbc: 0x008a, 0x2cbe: 0x008a, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x008a, 0x2cc1: 0x008a, 0x2cc3: 0x008a, 0x2cc4: 0x008a, + 0x2cc6: 0x008a, 0x2cc7: 0x008a, 0x2cc8: 0x008a, 0x2cc9: 0x008a, 0x2cca: 0x008a, 0x2ccb: 0x008a, + 0x2ccc: 0x008a, 0x2ccd: 0x008a, 0x2cce: 0x008a, 0x2ccf: 0x008a, 0x2cd0: 0x0080, 0x2cd1: 0x0080, + 0x2cd2: 0x0080, 0x2cd3: 0x0080, 0x2cd4: 0x0080, 0x2cd5: 0x0080, 0x2cd6: 0x0080, 0x2cd7: 0x0080, + 0x2cd8: 0x0080, 0x2cd9: 0x0080, 0x2cda: 0x0080, 0x2cdb: 0x0080, 0x2cdc: 0x0080, 0x2cdd: 0x0080, + 0x2cde: 0x0080, 0x2cdf: 0x0080, 0x2ce0: 0x0080, 0x2ce1: 0x0080, 0x2ce2: 0x0080, 0x2ce3: 0x0080, + 0x2ce4: 0x0080, 0x2ce5: 0x0080, 0x2ce6: 0x0080, 0x2ce7: 0x0080, 0x2ce8: 0x0080, 0x2ce9: 0x0080, + 0x2cea: 0x0080, 0x2ceb: 0x0080, 0x2cec: 0x0080, 0x2ced: 0x0080, 0x2cee: 0x0080, 0x2cef: 0x0080, + 0x2cf0: 0x0080, 0x2cf1: 0x0080, 0x2cf2: 0x0080, 0x2cf3: 0x0080, 0x2cf4: 0x0080, 0x2cf5: 0x0080, + 0x2cf6: 0x0080, 0x2cf7: 0x0080, 0x2cf8: 0x0080, 0x2cf9: 0x0080, 0x2cfa: 0x0080, 0x2cfb: 0x0080, + 0x2cfc: 0x0080, 0x2cfd: 0x0080, 0x2cfe: 0x0080, 0x2cff: 0x0080, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x0080, 0x2d01: 0x0080, + 0x2d13: 0x0080, 0x2d14: 0x0080, 0x2d15: 0x0080, 0x2d16: 0x0080, 0x2d17: 0x0080, + 0x2d18: 0x0080, 0x2d19: 0x0080, 0x2d1a: 0x0080, 0x2d1b: 0x0080, 0x2d1c: 0x0080, 0x2d1d: 0x0080, + 0x2d1e: 0x0080, 0x2d1f: 0x0080, 0x2d20: 0x0080, 0x2d21: 0x0080, 0x2d22: 0x0080, 0x2d23: 0x0080, + 0x2d24: 0x0080, 0x2d25: 0x0080, 0x2d26: 0x0080, 0x2d27: 0x0080, 0x2d28: 0x0080, 0x2d29: 0x0080, + 0x2d2a: 0x0080, 0x2d2b: 0x0080, 0x2d2c: 0x0080, 0x2d2d: 0x0080, 0x2d2e: 0x0080, 0x2d2f: 0x0080, + 0x2d30: 0x0080, 0x2d31: 0x0080, 0x2d32: 0x0080, 0x2d33: 0x0080, 0x2d34: 0x0080, 0x2d35: 0x0080, + 0x2d36: 0x0080, 0x2d37: 0x0080, 0x2d38: 0x0080, 0x2d39: 0x0080, 0x2d3a: 0x0080, 0x2d3b: 0x0080, + 0x2d3c: 0x0080, 0x2d3d: 0x0080, 0x2d3e: 0x0080, 0x2d3f: 0x0080, + // Block 0xb5, offset 0x2d40 + 0x2d50: 0x0080, 0x2d51: 0x0080, + 0x2d52: 0x0080, 0x2d53: 0x0080, 0x2d54: 0x0080, 0x2d55: 0x0080, 0x2d56: 0x0080, 0x2d57: 0x0080, + 0x2d58: 0x0080, 0x2d59: 0x0080, 0x2d5a: 0x0080, 0x2d5b: 0x0080, 0x2d5c: 0x0080, 0x2d5d: 0x0080, + 0x2d5e: 0x0080, 0x2d5f: 0x0080, 0x2d60: 0x0080, 0x2d61: 0x0080, 0x2d62: 0x0080, 0x2d63: 0x0080, + 0x2d64: 0x0080, 0x2d65: 0x0080, 0x2d66: 0x0080, 0x2d67: 0x0080, 0x2d68: 0x0080, 0x2d69: 0x0080, + 0x2d6a: 0x0080, 0x2d6b: 0x0080, 0x2d6c: 0x0080, 0x2d6d: 0x0080, 0x2d6e: 0x0080, 0x2d6f: 0x0080, + 0x2d70: 0x0080, 0x2d71: 0x0080, 0x2d72: 0x0080, 0x2d73: 0x0080, 0x2d74: 0x0080, 0x2d75: 0x0080, + 0x2d76: 0x0080, 0x2d77: 0x0080, 0x2d78: 0x0080, 0x2d79: 0x0080, 0x2d7a: 0x0080, 0x2d7b: 0x0080, + 0x2d7c: 0x0080, 0x2d7d: 0x0080, 0x2d7e: 0x0080, 0x2d7f: 0x0080, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x0080, 0x2d81: 0x0080, 0x2d82: 0x0080, 0x2d83: 0x0080, 0x2d84: 0x0080, 0x2d85: 0x0080, + 0x2d86: 0x0080, 0x2d87: 0x0080, 0x2d88: 0x0080, 0x2d89: 0x0080, 0x2d8a: 0x0080, 0x2d8b: 0x0080, + 0x2d8c: 0x0080, 0x2d8d: 0x0080, 0x2d8e: 0x0080, 0x2d8f: 0x0080, + 0x2d92: 0x0080, 0x2d93: 0x0080, 0x2d94: 0x0080, 0x2d95: 0x0080, 0x2d96: 0x0080, 0x2d97: 0x0080, + 0x2d98: 0x0080, 0x2d99: 0x0080, 0x2d9a: 0x0080, 0x2d9b: 0x0080, 0x2d9c: 0x0080, 0x2d9d: 0x0080, + 0x2d9e: 0x0080, 0x2d9f: 0x0080, 0x2da0: 0x0080, 0x2da1: 0x0080, 0x2da2: 0x0080, 0x2da3: 0x0080, + 0x2da4: 0x0080, 0x2da5: 0x0080, 0x2da6: 0x0080, 0x2da7: 0x0080, 0x2da8: 0x0080, 0x2da9: 0x0080, + 0x2daa: 0x0080, 0x2dab: 0x0080, 0x2dac: 0x0080, 0x2dad: 0x0080, 0x2dae: 0x0080, 0x2daf: 0x0080, + 0x2db0: 0x0080, 0x2db1: 0x0080, 0x2db2: 0x0080, 0x2db3: 0x0080, 0x2db4: 0x0080, 0x2db5: 0x0080, + 0x2db6: 0x0080, 0x2db7: 0x0080, 0x2db8: 0x0080, 0x2db9: 0x0080, 0x2dba: 0x0080, 0x2dbb: 0x0080, + 0x2dbc: 0x0080, 0x2dbd: 0x0080, 0x2dbe: 0x0080, 0x2dbf: 0x0080, + // Block 0xb7, offset 0x2dc0 + 0x2dc0: 0x0080, 0x2dc1: 0x0080, 0x2dc2: 0x0080, 0x2dc3: 0x0080, 0x2dc4: 0x0080, 0x2dc5: 0x0080, + 0x2dc6: 0x0080, 0x2dc7: 0x0080, + 0x2df0: 0x0080, 0x2df1: 0x0080, 0x2df2: 0x0080, 0x2df3: 0x0080, 0x2df4: 0x0080, 0x2df5: 0x0080, + 0x2df6: 0x0080, 0x2df7: 0x0080, 0x2df8: 0x0080, 0x2df9: 0x0080, 0x2dfa: 0x0080, 0x2dfb: 0x0080, + 0x2dfc: 0x0080, 0x2dfd: 0x0080, + // Block 0xb8, offset 0x2e00 + 0x2e00: 0x0040, 0x2e01: 0x0040, 0x2e02: 0x0040, 0x2e03: 0x0040, 0x2e04: 0x0040, 0x2e05: 0x0040, + 0x2e06: 0x0040, 0x2e07: 0x0040, 0x2e08: 0x0040, 0x2e09: 0x0040, 0x2e0a: 0x0040, 0x2e0b: 0x0040, + 0x2e0c: 0x0040, 0x2e0d: 0x0040, 0x2e0e: 0x0040, 0x2e0f: 0x0040, 0x2e10: 0x0080, 0x2e11: 0x0080, + 0x2e12: 0x0080, 0x2e13: 0x0080, 0x2e14: 0x0080, 0x2e15: 0x0080, 0x2e16: 0x0080, 0x2e17: 0x0080, + 0x2e18: 0x0080, 0x2e19: 0x0080, + 0x2e20: 0x00c3, 0x2e21: 0x00c3, 0x2e22: 0x00c3, 0x2e23: 0x00c3, + 0x2e24: 0x00c3, 0x2e25: 0x00c3, 0x2e26: 0x00c3, 0x2e27: 0x00c3, 0x2e28: 0x00c3, 0x2e29: 0x00c3, + 0x2e2a: 0x00c3, 0x2e2b: 0x00c3, 0x2e2c: 0x00c3, 0x2e2d: 0x00c3, 0x2e2e: 0x00c3, 0x2e2f: 0x00c3, + 0x2e30: 0x0080, 0x2e31: 0x0080, 0x2e32: 0x0080, 0x2e33: 0x0080, 0x2e34: 0x0080, 0x2e35: 0x0080, + 0x2e36: 0x0080, 0x2e37: 0x0080, 0x2e38: 0x0080, 0x2e39: 0x0080, 0x2e3a: 0x0080, 0x2e3b: 0x0080, + 0x2e3c: 0x0080, 0x2e3d: 0x0080, 0x2e3e: 0x0080, 0x2e3f: 0x0080, + // Block 0xb9, offset 0x2e40 + 0x2e40: 0x0080, 0x2e41: 0x0080, 0x2e42: 0x0080, 0x2e43: 0x0080, 0x2e44: 0x0080, 0x2e45: 0x0080, + 0x2e46: 0x0080, 0x2e47: 0x0080, 0x2e48: 0x0080, 0x2e49: 0x0080, 0x2e4a: 0x0080, 0x2e4b: 0x0080, + 0x2e4c: 0x0080, 0x2e4d: 0x0080, 0x2e4e: 0x0080, 0x2e4f: 0x0080, 0x2e50: 0x0080, 0x2e51: 0x0080, + 0x2e52: 0x0080, 0x2e54: 0x0080, 0x2e55: 0x0080, 0x2e56: 0x0080, 0x2e57: 0x0080, + 0x2e58: 0x0080, 0x2e59: 0x0080, 0x2e5a: 0x0080, 0x2e5b: 0x0080, 0x2e5c: 0x0080, 0x2e5d: 0x0080, + 0x2e5e: 0x0080, 0x2e5f: 0x0080, 0x2e60: 0x0080, 0x2e61: 0x0080, 0x2e62: 0x0080, 0x2e63: 0x0080, + 0x2e64: 0x0080, 0x2e65: 0x0080, 0x2e66: 0x0080, 0x2e68: 0x0080, 0x2e69: 0x0080, + 0x2e6a: 0x0080, 0x2e6b: 0x0080, + 0x2e70: 0x0080, 0x2e71: 0x0080, 0x2e72: 0x0080, 0x2e73: 0x00c0, 0x2e74: 0x0080, + 0x2e76: 0x0080, 0x2e77: 0x0080, 0x2e78: 0x0080, 0x2e79: 0x0080, 0x2e7a: 0x0080, 0x2e7b: 0x0080, + 0x2e7c: 0x0080, 0x2e7d: 0x0080, 0x2e7e: 0x0080, 0x2e7f: 0x0080, + // Block 0xba, offset 0x2e80 + 0x2e80: 0x0080, 0x2e81: 0x0080, 0x2e82: 0x0080, 0x2e83: 0x0080, 0x2e84: 0x0080, 0x2e85: 0x0080, + 0x2e86: 0x0080, 0x2e87: 0x0080, 0x2e88: 0x0080, 0x2e89: 0x0080, 0x2e8a: 0x0080, 0x2e8b: 0x0080, + 0x2e8c: 0x0080, 0x2e8d: 0x0080, 0x2e8e: 0x0080, 0x2e8f: 0x0080, 0x2e90: 0x0080, 0x2e91: 0x0080, + 0x2e92: 0x0080, 0x2e93: 0x0080, 0x2e94: 0x0080, 0x2e95: 0x0080, 0x2e96: 0x0080, 0x2e97: 0x0080, + 0x2e98: 0x0080, 0x2e99: 0x0080, 0x2e9a: 0x0080, 0x2e9b: 0x0080, 0x2e9c: 0x0080, 0x2e9d: 0x0080, + 0x2e9e: 0x0080, 0x2e9f: 0x0080, 0x2ea0: 0x0080, 0x2ea1: 0x0080, 0x2ea2: 0x0080, 0x2ea3: 0x0080, + 0x2ea4: 0x0080, 0x2ea5: 0x0080, 0x2ea6: 0x0080, 0x2ea7: 0x0080, 0x2ea8: 0x0080, 0x2ea9: 0x0080, + 0x2eaa: 0x0080, 0x2eab: 0x0080, 0x2eac: 0x0080, 0x2ead: 0x0080, 0x2eae: 0x0080, 0x2eaf: 0x0080, + 0x2eb0: 0x0080, 0x2eb1: 0x0080, 0x2eb2: 0x0080, 0x2eb3: 0x0080, 0x2eb4: 0x0080, 0x2eb5: 0x0080, + 0x2eb6: 0x0080, 0x2eb7: 0x0080, 0x2eb8: 0x0080, 0x2eb9: 0x0080, 0x2eba: 0x0080, 0x2ebb: 0x0080, + 0x2ebc: 0x0080, 0x2ebf: 0x0040, + // Block 0xbb, offset 0x2ec0 + 0x2ec1: 0x0080, 0x2ec2: 0x0080, 0x2ec3: 0x0080, 0x2ec4: 0x0080, 0x2ec5: 0x0080, + 0x2ec6: 0x0080, 0x2ec7: 0x0080, 0x2ec8: 0x0080, 0x2ec9: 0x0080, 0x2eca: 0x0080, 0x2ecb: 0x0080, + 0x2ecc: 0x0080, 0x2ecd: 0x0080, 0x2ece: 0x0080, 0x2ecf: 0x0080, 0x2ed0: 0x0080, 0x2ed1: 0x0080, + 0x2ed2: 0x0080, 0x2ed3: 0x0080, 0x2ed4: 0x0080, 0x2ed5: 0x0080, 0x2ed6: 0x0080, 0x2ed7: 0x0080, + 0x2ed8: 0x0080, 0x2ed9: 0x0080, 0x2eda: 0x0080, 0x2edb: 0x0080, 0x2edc: 0x0080, 0x2edd: 0x0080, + 0x2ede: 0x0080, 0x2edf: 0x0080, 0x2ee0: 0x0080, 0x2ee1: 0x0080, 0x2ee2: 0x0080, 0x2ee3: 0x0080, + 0x2ee4: 0x0080, 0x2ee5: 0x0080, 0x2ee6: 0x0080, 0x2ee7: 0x0080, 0x2ee8: 0x0080, 0x2ee9: 0x0080, + 0x2eea: 0x0080, 0x2eeb: 0x0080, 0x2eec: 0x0080, 0x2eed: 0x0080, 0x2eee: 0x0080, 0x2eef: 0x0080, + 0x2ef0: 0x0080, 0x2ef1: 0x0080, 0x2ef2: 0x0080, 0x2ef3: 0x0080, 0x2ef4: 0x0080, 0x2ef5: 0x0080, + 0x2ef6: 0x0080, 0x2ef7: 0x0080, 0x2ef8: 0x0080, 0x2ef9: 0x0080, 0x2efa: 0x0080, 0x2efb: 0x0080, + 0x2efc: 0x0080, 0x2efd: 0x0080, 0x2efe: 0x0080, 0x2eff: 0x0080, + // Block 0xbc, offset 0x2f00 + 0x2f00: 0x0080, 0x2f01: 0x0080, 0x2f02: 0x0080, 0x2f03: 0x0080, 0x2f04: 0x0080, 0x2f05: 0x0080, + 0x2f06: 0x0080, 0x2f07: 0x0080, 0x2f08: 0x0080, 0x2f09: 0x0080, 0x2f0a: 0x0080, 0x2f0b: 0x0080, + 0x2f0c: 0x0080, 0x2f0d: 0x0080, 0x2f0e: 0x0080, 0x2f0f: 0x0080, 0x2f10: 0x0080, 0x2f11: 0x0080, + 0x2f12: 0x0080, 0x2f13: 0x0080, 0x2f14: 0x0080, 0x2f15: 0x0080, 0x2f16: 0x0080, 0x2f17: 0x0080, + 0x2f18: 0x0080, 0x2f19: 0x0080, 0x2f1a: 0x0080, 0x2f1b: 0x0080, 0x2f1c: 0x0080, 0x2f1d: 0x0080, + 0x2f1e: 0x0080, 0x2f1f: 0x0080, 0x2f20: 0x0080, 0x2f21: 0x0080, 0x2f22: 0x0080, 0x2f23: 0x0080, + 0x2f24: 0x0080, 0x2f25: 0x0080, 0x2f26: 0x008c, 0x2f27: 0x008c, 0x2f28: 0x008c, 0x2f29: 0x008c, + 0x2f2a: 0x008c, 0x2f2b: 0x008c, 0x2f2c: 0x008c, 0x2f2d: 0x008c, 0x2f2e: 0x008c, 0x2f2f: 0x008c, + 0x2f30: 0x0080, 0x2f31: 0x008c, 0x2f32: 0x008c, 0x2f33: 0x008c, 0x2f34: 0x008c, 0x2f35: 0x008c, + 0x2f36: 0x008c, 0x2f37: 0x008c, 0x2f38: 0x008c, 0x2f39: 0x008c, 0x2f3a: 0x008c, 0x2f3b: 0x008c, + 0x2f3c: 0x008c, 0x2f3d: 0x008c, 0x2f3e: 0x008c, 0x2f3f: 0x008c, + // Block 0xbd, offset 0x2f40 + 0x2f40: 0x008c, 0x2f41: 0x008c, 0x2f42: 0x008c, 0x2f43: 0x008c, 0x2f44: 0x008c, 0x2f45: 0x008c, + 0x2f46: 0x008c, 0x2f47: 0x008c, 0x2f48: 0x008c, 0x2f49: 0x008c, 0x2f4a: 0x008c, 0x2f4b: 0x008c, + 0x2f4c: 0x008c, 0x2f4d: 0x008c, 0x2f4e: 0x008c, 0x2f4f: 0x008c, 0x2f50: 0x008c, 0x2f51: 0x008c, + 0x2f52: 0x008c, 0x2f53: 0x008c, 0x2f54: 0x008c, 0x2f55: 0x008c, 0x2f56: 0x008c, 0x2f57: 0x008c, + 0x2f58: 0x008c, 0x2f59: 0x008c, 0x2f5a: 0x008c, 0x2f5b: 0x008c, 0x2f5c: 0x008c, 0x2f5d: 0x008c, + 0x2f5e: 0x0080, 0x2f5f: 0x0080, 0x2f60: 0x0040, 0x2f61: 0x0080, 0x2f62: 0x0080, 0x2f63: 0x0080, + 0x2f64: 0x0080, 0x2f65: 0x0080, 0x2f66: 0x0080, 0x2f67: 0x0080, 0x2f68: 0x0080, 0x2f69: 0x0080, + 0x2f6a: 0x0080, 0x2f6b: 0x0080, 0x2f6c: 0x0080, 0x2f6d: 0x0080, 0x2f6e: 0x0080, 0x2f6f: 0x0080, + 0x2f70: 0x0080, 0x2f71: 0x0080, 0x2f72: 0x0080, 0x2f73: 0x0080, 0x2f74: 0x0080, 0x2f75: 0x0080, + 0x2f76: 0x0080, 0x2f77: 0x0080, 0x2f78: 0x0080, 0x2f79: 0x0080, 0x2f7a: 0x0080, 0x2f7b: 0x0080, + 0x2f7c: 0x0080, 0x2f7d: 0x0080, 0x2f7e: 0x0080, + // Block 0xbe, offset 0x2f80 + 0x2f82: 0x0080, 0x2f83: 0x0080, 0x2f84: 0x0080, 0x2f85: 0x0080, + 0x2f86: 0x0080, 0x2f87: 0x0080, 0x2f8a: 0x0080, 0x2f8b: 0x0080, + 0x2f8c: 0x0080, 0x2f8d: 0x0080, 0x2f8e: 0x0080, 0x2f8f: 0x0080, + 0x2f92: 0x0080, 0x2f93: 0x0080, 0x2f94: 0x0080, 0x2f95: 0x0080, 0x2f96: 0x0080, 0x2f97: 0x0080, + 0x2f9a: 0x0080, 0x2f9b: 0x0080, 0x2f9c: 0x0080, + 0x2fa0: 0x0080, 0x2fa1: 0x0080, 0x2fa2: 0x0080, 0x2fa3: 0x0080, + 0x2fa4: 0x0080, 0x2fa5: 0x0080, 0x2fa6: 0x0080, 0x2fa8: 0x0080, 0x2fa9: 0x0080, + 0x2faa: 0x0080, 0x2fab: 0x0080, 0x2fac: 0x0080, 0x2fad: 0x0080, 0x2fae: 0x0080, + 0x2fb9: 0x0040, 0x2fba: 0x0040, 0x2fbb: 0x0040, + 0x2fbc: 0x0080, 0x2fbd: 0x0080, + // Block 0xbf, offset 0x2fc0 + 0x2fc0: 0x00c0, 0x2fc1: 0x00c0, 0x2fc2: 0x00c0, 0x2fc3: 0x00c0, 0x2fc4: 0x00c0, 0x2fc5: 0x00c0, + 0x2fc6: 0x00c0, 0x2fc7: 0x00c0, 0x2fc8: 0x00c0, 0x2fc9: 0x00c0, 0x2fca: 0x00c0, 0x2fcb: 0x00c0, + 0x2fcd: 0x00c0, 0x2fce: 0x00c0, 0x2fcf: 0x00c0, 0x2fd0: 0x00c0, 0x2fd1: 0x00c0, + 0x2fd2: 0x00c0, 0x2fd3: 0x00c0, 0x2fd4: 0x00c0, 0x2fd5: 0x00c0, 0x2fd6: 0x00c0, 0x2fd7: 0x00c0, + 0x2fd8: 0x00c0, 0x2fd9: 0x00c0, 0x2fda: 0x00c0, 0x2fdb: 0x00c0, 0x2fdc: 0x00c0, 0x2fdd: 0x00c0, + 0x2fde: 0x00c0, 0x2fdf: 0x00c0, 0x2fe0: 0x00c0, 0x2fe1: 0x00c0, 0x2fe2: 0x00c0, 0x2fe3: 0x00c0, + 0x2fe4: 0x00c0, 0x2fe5: 0x00c0, 0x2fe6: 0x00c0, 0x2fe8: 0x00c0, 0x2fe9: 0x00c0, + 0x2fea: 0x00c0, 0x2feb: 0x00c0, 0x2fec: 0x00c0, 0x2fed: 0x00c0, 0x2fee: 0x00c0, 0x2fef: 0x00c0, + 0x2ff0: 0x00c0, 0x2ff1: 0x00c0, 0x2ff2: 0x00c0, 0x2ff3: 0x00c0, 0x2ff4: 0x00c0, 0x2ff5: 0x00c0, + 0x2ff6: 0x00c0, 0x2ff7: 0x00c0, 0x2ff8: 0x00c0, 0x2ff9: 0x00c0, 0x2ffa: 0x00c0, + 0x2ffc: 0x00c0, 0x2ffd: 0x00c0, 0x2fff: 0x00c0, + // Block 0xc0, offset 0x3000 + 0x3000: 0x00c0, 0x3001: 0x00c0, 0x3002: 0x00c0, 0x3003: 0x00c0, 0x3004: 0x00c0, 0x3005: 0x00c0, + 0x3006: 0x00c0, 0x3007: 0x00c0, 0x3008: 0x00c0, 0x3009: 0x00c0, 0x300a: 0x00c0, 0x300b: 0x00c0, + 0x300c: 0x00c0, 0x300d: 0x00c0, 0x3010: 0x00c0, 0x3011: 0x00c0, + 0x3012: 0x00c0, 0x3013: 0x00c0, 0x3014: 0x00c0, 0x3015: 0x00c0, 0x3016: 0x00c0, 0x3017: 0x00c0, + 0x3018: 0x00c0, 0x3019: 0x00c0, 0x301a: 0x00c0, 0x301b: 0x00c0, 0x301c: 0x00c0, 0x301d: 0x00c0, + // Block 0xc1, offset 0x3040 + 0x3040: 0x00c0, 0x3041: 0x00c0, 0x3042: 0x00c0, 0x3043: 0x00c0, 0x3044: 0x00c0, 0x3045: 0x00c0, + 0x3046: 0x00c0, 0x3047: 0x00c0, 0x3048: 0x00c0, 0x3049: 0x00c0, 0x304a: 0x00c0, 0x304b: 0x00c0, + 0x304c: 0x00c0, 0x304d: 0x00c0, 0x304e: 0x00c0, 0x304f: 0x00c0, 0x3050: 0x00c0, 0x3051: 0x00c0, + 0x3052: 0x00c0, 0x3053: 0x00c0, 0x3054: 0x00c0, 0x3055: 0x00c0, 0x3056: 0x00c0, 0x3057: 0x00c0, + 0x3058: 0x00c0, 0x3059: 0x00c0, 0x305a: 0x00c0, 0x305b: 0x00c0, 0x305c: 0x00c0, 0x305d: 0x00c0, + 0x305e: 0x00c0, 0x305f: 0x00c0, 0x3060: 0x00c0, 0x3061: 0x00c0, 0x3062: 0x00c0, 0x3063: 0x00c0, + 0x3064: 0x00c0, 0x3065: 0x00c0, 0x3066: 0x00c0, 0x3067: 0x00c0, 0x3068: 0x00c0, 0x3069: 0x00c0, + 0x306a: 0x00c0, 0x306b: 0x00c0, 0x306c: 0x00c0, 0x306d: 0x00c0, 0x306e: 0x00c0, 0x306f: 0x00c0, + 0x3070: 0x00c0, 0x3071: 0x00c0, 0x3072: 0x00c0, 0x3073: 0x00c0, 0x3074: 0x00c0, 0x3075: 0x00c0, + 0x3076: 0x00c0, 0x3077: 0x00c0, 0x3078: 0x00c0, 0x3079: 0x00c0, 0x307a: 0x00c0, + // Block 0xc2, offset 0x3080 + 0x3080: 0x0080, 0x3081: 0x0080, 0x3082: 0x0080, + 0x3087: 0x0080, 0x3088: 0x0080, 0x3089: 0x0080, 0x308a: 0x0080, 0x308b: 0x0080, + 0x308c: 0x0080, 0x308d: 0x0080, 0x308e: 0x0080, 0x308f: 0x0080, 0x3090: 0x0080, 0x3091: 0x0080, + 0x3092: 0x0080, 0x3093: 0x0080, 0x3094: 0x0080, 0x3095: 0x0080, 0x3096: 0x0080, 0x3097: 0x0080, + 0x3098: 0x0080, 0x3099: 0x0080, 0x309a: 0x0080, 0x309b: 0x0080, 0x309c: 0x0080, 0x309d: 0x0080, + 0x309e: 0x0080, 0x309f: 0x0080, 0x30a0: 0x0080, 0x30a1: 0x0080, 0x30a2: 0x0080, 0x30a3: 0x0080, + 0x30a4: 0x0080, 0x30a5: 0x0080, 0x30a6: 0x0080, 0x30a7: 0x0080, 0x30a8: 0x0080, 0x30a9: 0x0080, + 0x30aa: 0x0080, 0x30ab: 0x0080, 0x30ac: 0x0080, 0x30ad: 0x0080, 0x30ae: 0x0080, 0x30af: 0x0080, + 0x30b0: 0x0080, 0x30b1: 0x0080, 0x30b2: 0x0080, 0x30b3: 0x0080, + 0x30b7: 0x0080, 0x30b8: 0x0080, 0x30b9: 0x0080, 0x30ba: 0x0080, 0x30bb: 0x0080, + 0x30bc: 0x0080, 0x30bd: 0x0080, 0x30be: 0x0080, 0x30bf: 0x0080, + // Block 0xc3, offset 0x30c0 + 0x30c0: 0x0088, 0x30c1: 0x0088, 0x30c2: 0x0088, 0x30c3: 0x0088, 0x30c4: 0x0088, 0x30c5: 0x0088, + 0x30c6: 0x0088, 0x30c7: 0x0088, 0x30c8: 0x0088, 0x30c9: 0x0088, 0x30ca: 0x0088, 0x30cb: 0x0088, + 0x30cc: 0x0088, 0x30cd: 0x0088, 0x30ce: 0x0088, 0x30cf: 0x0088, 0x30d0: 0x0088, 0x30d1: 0x0088, + 0x30d2: 0x0088, 0x30d3: 0x0088, 0x30d4: 0x0088, 0x30d5: 0x0088, 0x30d6: 0x0088, 0x30d7: 0x0088, + 0x30d8: 0x0088, 0x30d9: 0x0088, 0x30da: 0x0088, 0x30db: 0x0088, 0x30dc: 0x0088, 0x30dd: 0x0088, + 0x30de: 0x0088, 0x30df: 0x0088, 0x30e0: 0x0088, 0x30e1: 0x0088, 0x30e2: 0x0088, 0x30e3: 0x0088, + 0x30e4: 0x0088, 0x30e5: 0x0088, 0x30e6: 0x0088, 0x30e7: 0x0088, 0x30e8: 0x0088, 0x30e9: 0x0088, + 0x30ea: 0x0088, 0x30eb: 0x0088, 0x30ec: 0x0088, 0x30ed: 0x0088, 0x30ee: 0x0088, 0x30ef: 0x0088, + 0x30f0: 0x0088, 0x30f1: 0x0088, 0x30f2: 0x0088, 0x30f3: 0x0088, 0x30f4: 0x0088, 0x30f5: 0x0088, + 0x30f6: 0x0088, 0x30f7: 0x0088, 0x30f8: 0x0088, 0x30f9: 0x0088, 0x30fa: 0x0088, 0x30fb: 0x0088, + 0x30fc: 0x0088, 0x30fd: 0x0088, 0x30fe: 0x0088, 0x30ff: 0x0088, + // Block 0xc4, offset 0x3100 + 0x3100: 0x0088, 0x3101: 0x0088, 0x3102: 0x0088, 0x3103: 0x0088, 0x3104: 0x0088, 0x3105: 0x0088, + 0x3106: 0x0088, 0x3107: 0x0088, 0x3108: 0x0088, 0x3109: 0x0088, 0x310a: 0x0088, 0x310b: 0x0088, + 0x310c: 0x0088, 0x310d: 0x0088, 0x310e: 0x0088, 0x3110: 0x0080, 0x3111: 0x0080, + 0x3112: 0x0080, 0x3113: 0x0080, 0x3114: 0x0080, 0x3115: 0x0080, 0x3116: 0x0080, 0x3117: 0x0080, + 0x3118: 0x0080, 0x3119: 0x0080, 0x311a: 0x0080, 0x311b: 0x0080, + 0x3120: 0x0088, + // Block 0xc5, offset 0x3140 + 0x3150: 0x0080, 0x3151: 0x0080, + 0x3152: 0x0080, 0x3153: 0x0080, 0x3154: 0x0080, 0x3155: 0x0080, 0x3156: 0x0080, 0x3157: 0x0080, + 0x3158: 0x0080, 0x3159: 0x0080, 0x315a: 0x0080, 0x315b: 0x0080, 0x315c: 0x0080, 0x315d: 0x0080, + 0x315e: 0x0080, 0x315f: 0x0080, 0x3160: 0x0080, 0x3161: 0x0080, 0x3162: 0x0080, 0x3163: 0x0080, + 0x3164: 0x0080, 0x3165: 0x0080, 0x3166: 0x0080, 0x3167: 0x0080, 0x3168: 0x0080, 0x3169: 0x0080, + 0x316a: 0x0080, 0x316b: 0x0080, 0x316c: 0x0080, 0x316d: 0x0080, 0x316e: 0x0080, 0x316f: 0x0080, + 0x3170: 0x0080, 0x3171: 0x0080, 0x3172: 0x0080, 0x3173: 0x0080, 0x3174: 0x0080, 0x3175: 0x0080, + 0x3176: 0x0080, 0x3177: 0x0080, 0x3178: 0x0080, 0x3179: 0x0080, 0x317a: 0x0080, 0x317b: 0x0080, + 0x317c: 0x0080, 0x317d: 0x00c3, + // Block 0xc6, offset 0x3180 + 0x3180: 0x00c0, 0x3181: 0x00c0, 0x3182: 0x00c0, 0x3183: 0x00c0, 0x3184: 0x00c0, 0x3185: 0x00c0, + 0x3186: 0x00c0, 0x3187: 0x00c0, 0x3188: 0x00c0, 0x3189: 0x00c0, 0x318a: 0x00c0, 0x318b: 0x00c0, + 0x318c: 0x00c0, 0x318d: 0x00c0, 0x318e: 0x00c0, 0x318f: 0x00c0, 0x3190: 0x00c0, 0x3191: 0x00c0, + 0x3192: 0x00c0, 0x3193: 0x00c0, 0x3194: 0x00c0, 0x3195: 0x00c0, 0x3196: 0x00c0, 0x3197: 0x00c0, + 0x3198: 0x00c0, 0x3199: 0x00c0, 0x319a: 0x00c0, 0x319b: 0x00c0, 0x319c: 0x00c0, + 0x31a0: 0x00c0, 0x31a1: 0x00c0, 0x31a2: 0x00c0, 0x31a3: 0x00c0, + 0x31a4: 0x00c0, 0x31a5: 0x00c0, 0x31a6: 0x00c0, 0x31a7: 0x00c0, 0x31a8: 0x00c0, 0x31a9: 0x00c0, + 0x31aa: 0x00c0, 0x31ab: 0x00c0, 0x31ac: 0x00c0, 0x31ad: 0x00c0, 0x31ae: 0x00c0, 0x31af: 0x00c0, + 0x31b0: 0x00c0, 0x31b1: 0x00c0, 0x31b2: 0x00c0, 0x31b3: 0x00c0, 0x31b4: 0x00c0, 0x31b5: 0x00c0, + 0x31b6: 0x00c0, 0x31b7: 0x00c0, 0x31b8: 0x00c0, 0x31b9: 0x00c0, 0x31ba: 0x00c0, 0x31bb: 0x00c0, + 0x31bc: 0x00c0, 0x31bd: 0x00c0, 0x31be: 0x00c0, 0x31bf: 0x00c0, + // Block 0xc7, offset 0x31c0 + 0x31c0: 0x00c0, 0x31c1: 0x00c0, 0x31c2: 0x00c0, 0x31c3: 0x00c0, 0x31c4: 0x00c0, 0x31c5: 0x00c0, + 0x31c6: 0x00c0, 0x31c7: 0x00c0, 0x31c8: 0x00c0, 0x31c9: 0x00c0, 0x31ca: 0x00c0, 0x31cb: 0x00c0, + 0x31cc: 0x00c0, 0x31cd: 0x00c0, 0x31ce: 0x00c0, 0x31cf: 0x00c0, 0x31d0: 0x00c0, + 0x31e0: 0x00c3, 0x31e1: 0x0080, 0x31e2: 0x0080, 0x31e3: 0x0080, + 0x31e4: 0x0080, 0x31e5: 0x0080, 0x31e6: 0x0080, 0x31e7: 0x0080, 0x31e8: 0x0080, 0x31e9: 0x0080, + 0x31ea: 0x0080, 0x31eb: 0x0080, 0x31ec: 0x0080, 0x31ed: 0x0080, 0x31ee: 0x0080, 0x31ef: 0x0080, + 0x31f0: 0x0080, 0x31f1: 0x0080, 0x31f2: 0x0080, 0x31f3: 0x0080, 0x31f4: 0x0080, 0x31f5: 0x0080, + 0x31f6: 0x0080, 0x31f7: 0x0080, 0x31f8: 0x0080, 0x31f9: 0x0080, 0x31fa: 0x0080, 0x31fb: 0x0080, + // Block 0xc8, offset 0x3200 + 0x3200: 0x00c0, 0x3201: 0x00c0, 0x3202: 0x00c0, 0x3203: 0x00c0, 0x3204: 0x00c0, 0x3205: 0x00c0, + 0x3206: 0x00c0, 0x3207: 0x00c0, 0x3208: 0x00c0, 0x3209: 0x00c0, 0x320a: 0x00c0, 0x320b: 0x00c0, + 0x320c: 0x00c0, 0x320d: 0x00c0, 0x320e: 0x00c0, 0x320f: 0x00c0, 0x3210: 0x00c0, 0x3211: 0x00c0, + 0x3212: 0x00c0, 0x3213: 0x00c0, 0x3214: 0x00c0, 0x3215: 0x00c0, 0x3216: 0x00c0, 0x3217: 0x00c0, + 0x3218: 0x00c0, 0x3219: 0x00c0, 0x321a: 0x00c0, 0x321b: 0x00c0, 0x321c: 0x00c0, 0x321d: 0x00c0, + 0x321e: 0x00c0, 0x321f: 0x00c0, 0x3220: 0x0080, 0x3221: 0x0080, 0x3222: 0x0080, 0x3223: 0x0080, + 0x3230: 0x00c0, 0x3231: 0x00c0, 0x3232: 0x00c0, 0x3233: 0x00c0, 0x3234: 0x00c0, 0x3235: 0x00c0, + 0x3236: 0x00c0, 0x3237: 0x00c0, 0x3238: 0x00c0, 0x3239: 0x00c0, 0x323a: 0x00c0, 0x323b: 0x00c0, + 0x323c: 0x00c0, 0x323d: 0x00c0, 0x323e: 0x00c0, 0x323f: 0x00c0, + // Block 0xc9, offset 0x3240 + 0x3240: 0x00c0, 0x3241: 0x0080, 0x3242: 0x00c0, 0x3243: 0x00c0, 0x3244: 0x00c0, 0x3245: 0x00c0, + 0x3246: 0x00c0, 0x3247: 0x00c0, 0x3248: 0x00c0, 0x3249: 0x00c0, 0x324a: 0x0080, + 0x3250: 0x00c0, 0x3251: 0x00c0, + 0x3252: 0x00c0, 0x3253: 0x00c0, 0x3254: 0x00c0, 0x3255: 0x00c0, 0x3256: 0x00c0, 0x3257: 0x00c0, + 0x3258: 0x00c0, 0x3259: 0x00c0, 0x325a: 0x00c0, 0x325b: 0x00c0, 0x325c: 0x00c0, 0x325d: 0x00c0, + 0x325e: 0x00c0, 0x325f: 0x00c0, 0x3260: 0x00c0, 0x3261: 0x00c0, 0x3262: 0x00c0, 0x3263: 0x00c0, + 0x3264: 0x00c0, 0x3265: 0x00c0, 0x3266: 0x00c0, 0x3267: 0x00c0, 0x3268: 0x00c0, 0x3269: 0x00c0, + 0x326a: 0x00c0, 0x326b: 0x00c0, 0x326c: 0x00c0, 0x326d: 0x00c0, 0x326e: 0x00c0, 0x326f: 0x00c0, + 0x3270: 0x00c0, 0x3271: 0x00c0, 0x3272: 0x00c0, 0x3273: 0x00c0, 0x3274: 0x00c0, 0x3275: 0x00c0, + 0x3276: 0x00c3, 0x3277: 0x00c3, 0x3278: 0x00c3, 0x3279: 0x00c3, 0x327a: 0x00c3, + // Block 0xca, offset 0x3280 + 0x3280: 0x00c0, 0x3281: 0x00c0, 0x3282: 0x00c0, 0x3283: 0x00c0, 0x3284: 0x00c0, 0x3285: 0x00c0, + 0x3286: 0x00c0, 0x3287: 0x00c0, 0x3288: 0x00c0, 0x3289: 0x00c0, 0x328a: 0x00c0, 0x328b: 0x00c0, + 0x328c: 0x00c0, 0x328d: 0x00c0, 0x328e: 0x00c0, 0x328f: 0x00c0, 0x3290: 0x00c0, 0x3291: 0x00c0, + 0x3292: 0x00c0, 0x3293: 0x00c0, 0x3294: 0x00c0, 0x3295: 0x00c0, 0x3296: 0x00c0, 0x3297: 0x00c0, + 0x3298: 0x00c0, 0x3299: 0x00c0, 0x329a: 0x00c0, 0x329b: 0x00c0, 0x329c: 0x00c0, 0x329d: 0x00c0, + 0x329f: 0x0080, 0x32a0: 0x00c0, 0x32a1: 0x00c0, 0x32a2: 0x00c0, 0x32a3: 0x00c0, + 0x32a4: 0x00c0, 0x32a5: 0x00c0, 0x32a6: 0x00c0, 0x32a7: 0x00c0, 0x32a8: 0x00c0, 0x32a9: 0x00c0, + 0x32aa: 0x00c0, 0x32ab: 0x00c0, 0x32ac: 0x00c0, 0x32ad: 0x00c0, 0x32ae: 0x00c0, 0x32af: 0x00c0, + 0x32b0: 0x00c0, 0x32b1: 0x00c0, 0x32b2: 0x00c0, 0x32b3: 0x00c0, 0x32b4: 0x00c0, 0x32b5: 0x00c0, + 0x32b6: 0x00c0, 0x32b7: 0x00c0, 0x32b8: 0x00c0, 0x32b9: 0x00c0, 0x32ba: 0x00c0, 0x32bb: 0x00c0, + 0x32bc: 0x00c0, 0x32bd: 0x00c0, 0x32be: 0x00c0, 0x32bf: 0x00c0, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x00c0, 0x32c1: 0x00c0, 0x32c2: 0x00c0, 0x32c3: 0x00c0, + 0x32c8: 0x00c0, 0x32c9: 0x00c0, 0x32ca: 0x00c0, 0x32cb: 0x00c0, + 0x32cc: 0x00c0, 0x32cd: 0x00c0, 0x32ce: 0x00c0, 0x32cf: 0x00c0, 0x32d0: 0x0080, 0x32d1: 0x0080, + 0x32d2: 0x0080, 0x32d3: 0x0080, 0x32d4: 0x0080, 0x32d5: 0x0080, + // Block 0xcc, offset 0x3300 + 0x3300: 0x00c0, 0x3301: 0x00c0, 0x3302: 0x00c0, 0x3303: 0x00c0, 0x3304: 0x00c0, 0x3305: 0x00c0, + 0x3306: 0x00c0, 0x3307: 0x00c0, 0x3308: 0x00c0, 0x3309: 0x00c0, 0x330a: 0x00c0, 0x330b: 0x00c0, + 0x330c: 0x00c0, 0x330d: 0x00c0, 0x330e: 0x00c0, 0x330f: 0x00c0, 0x3310: 0x00c0, 0x3311: 0x00c0, + 0x3312: 0x00c0, 0x3313: 0x00c0, 0x3314: 0x00c0, 0x3315: 0x00c0, 0x3316: 0x00c0, 0x3317: 0x00c0, + 0x3318: 0x00c0, 0x3319: 0x00c0, 0x331a: 0x00c0, 0x331b: 0x00c0, 0x331c: 0x00c0, 0x331d: 0x00c0, + 0x3320: 0x00c0, 0x3321: 0x00c0, 0x3322: 0x00c0, 0x3323: 0x00c0, + 0x3324: 0x00c0, 0x3325: 0x00c0, 0x3326: 0x00c0, 0x3327: 0x00c0, 0x3328: 0x00c0, 0x3329: 0x00c0, + 0x3330: 0x00c0, 0x3331: 0x00c0, 0x3332: 0x00c0, 0x3333: 0x00c0, 0x3334: 0x00c0, 0x3335: 0x00c0, + 0x3336: 0x00c0, 0x3337: 0x00c0, 0x3338: 0x00c0, 0x3339: 0x00c0, 0x333a: 0x00c0, 0x333b: 0x00c0, + 0x333c: 0x00c0, 0x333d: 0x00c0, 0x333e: 0x00c0, 0x333f: 0x00c0, + // Block 0xcd, offset 0x3340 + 0x3340: 0x00c0, 0x3341: 0x00c0, 0x3342: 0x00c0, 0x3343: 0x00c0, 0x3344: 0x00c0, 0x3345: 0x00c0, + 0x3346: 0x00c0, 0x3347: 0x00c0, 0x3348: 0x00c0, 0x3349: 0x00c0, 0x334a: 0x00c0, 0x334b: 0x00c0, + 0x334c: 0x00c0, 0x334d: 0x00c0, 0x334e: 0x00c0, 0x334f: 0x00c0, 0x3350: 0x00c0, 0x3351: 0x00c0, + 0x3352: 0x00c0, 0x3353: 0x00c0, + 0x3358: 0x00c0, 0x3359: 0x00c0, 0x335a: 0x00c0, 0x335b: 0x00c0, 0x335c: 0x00c0, 0x335d: 0x00c0, + 0x335e: 0x00c0, 0x335f: 0x00c0, 0x3360: 0x00c0, 0x3361: 0x00c0, 0x3362: 0x00c0, 0x3363: 0x00c0, + 0x3364: 0x00c0, 0x3365: 0x00c0, 0x3366: 0x00c0, 0x3367: 0x00c0, 0x3368: 0x00c0, 0x3369: 0x00c0, + 0x336a: 0x00c0, 0x336b: 0x00c0, 0x336c: 0x00c0, 0x336d: 0x00c0, 0x336e: 0x00c0, 0x336f: 0x00c0, + 0x3370: 0x00c0, 0x3371: 0x00c0, 0x3372: 0x00c0, 0x3373: 0x00c0, 0x3374: 0x00c0, 0x3375: 0x00c0, + 0x3376: 0x00c0, 0x3377: 0x00c0, 0x3378: 0x00c0, 0x3379: 0x00c0, 0x337a: 0x00c0, 0x337b: 0x00c0, + // Block 0xce, offset 0x3380 + 0x3380: 0x00c0, 0x3381: 0x00c0, 0x3382: 0x00c0, 0x3383: 0x00c0, 0x3384: 0x00c0, 0x3385: 0x00c0, + 0x3386: 0x00c0, 0x3387: 0x00c0, 0x3388: 0x00c0, 0x3389: 0x00c0, 0x338a: 0x00c0, 0x338b: 0x00c0, + 0x338c: 0x00c0, 0x338d: 0x00c0, 0x338e: 0x00c0, 0x338f: 0x00c0, 0x3390: 0x00c0, 0x3391: 0x00c0, + 0x3392: 0x00c0, 0x3393: 0x00c0, 0x3394: 0x00c0, 0x3395: 0x00c0, 0x3396: 0x00c0, 0x3397: 0x00c0, + 0x3398: 0x00c0, 0x3399: 0x00c0, 0x339a: 0x00c0, 0x339b: 0x00c0, 0x339c: 0x00c0, 0x339d: 0x00c0, + 0x339e: 0x00c0, 0x339f: 0x00c0, 0x33a0: 0x00c0, 0x33a1: 0x00c0, 0x33a2: 0x00c0, 0x33a3: 0x00c0, + 0x33a4: 0x00c0, 0x33a5: 0x00c0, 0x33a6: 0x00c0, 0x33a7: 0x00c0, + 0x33b0: 0x00c0, 0x33b1: 0x00c0, 0x33b2: 0x00c0, 0x33b3: 0x00c0, 0x33b4: 0x00c0, 0x33b5: 0x00c0, + 0x33b6: 0x00c0, 0x33b7: 0x00c0, 0x33b8: 0x00c0, 0x33b9: 0x00c0, 0x33ba: 0x00c0, 0x33bb: 0x00c0, + 0x33bc: 0x00c0, 0x33bd: 0x00c0, 0x33be: 0x00c0, 0x33bf: 0x00c0, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x00c0, 0x33c1: 0x00c0, 0x33c2: 0x00c0, 0x33c3: 0x00c0, 0x33c4: 0x00c0, 0x33c5: 0x00c0, + 0x33c6: 0x00c0, 0x33c7: 0x00c0, 0x33c8: 0x00c0, 0x33c9: 0x00c0, 0x33ca: 0x00c0, 0x33cb: 0x00c0, + 0x33cc: 0x00c0, 0x33cd: 0x00c0, 0x33ce: 0x00c0, 0x33cf: 0x00c0, 0x33d0: 0x00c0, 0x33d1: 0x00c0, + 0x33d2: 0x00c0, 0x33d3: 0x00c0, 0x33d4: 0x00c0, 0x33d5: 0x00c0, 0x33d6: 0x00c0, 0x33d7: 0x00c0, + 0x33d8: 0x00c0, 0x33d9: 0x00c0, 0x33da: 0x00c0, 0x33db: 0x00c0, 0x33dc: 0x00c0, 0x33dd: 0x00c0, + 0x33de: 0x00c0, 0x33df: 0x00c0, 0x33e0: 0x00c0, 0x33e1: 0x00c0, 0x33e2: 0x00c0, 0x33e3: 0x00c0, + 0x33ef: 0x0080, + // Block 0xd0, offset 0x3400 + 0x3400: 0x00c0, 0x3401: 0x00c0, 0x3402: 0x00c0, 0x3403: 0x00c0, 0x3404: 0x00c0, 0x3405: 0x00c0, + 0x3406: 0x00c0, 0x3407: 0x00c0, 0x3408: 0x00c0, 0x3409: 0x00c0, 0x340a: 0x00c0, 0x340b: 0x00c0, + 0x340c: 0x00c0, 0x340d: 0x00c0, 0x340e: 0x00c0, 0x340f: 0x00c0, 0x3410: 0x00c0, 0x3411: 0x00c0, + 0x3412: 0x00c0, 0x3413: 0x00c0, 0x3414: 0x00c0, 0x3415: 0x00c0, 0x3416: 0x00c0, 0x3417: 0x00c0, + 0x3418: 0x00c0, 0x3419: 0x00c0, 0x341a: 0x00c0, 0x341b: 0x00c0, 0x341c: 0x00c0, 0x341d: 0x00c0, + 0x341e: 0x00c0, 0x341f: 0x00c0, 0x3420: 0x00c0, 0x3421: 0x00c0, 0x3422: 0x00c0, 0x3423: 0x00c0, + 0x3424: 0x00c0, 0x3425: 0x00c0, 0x3426: 0x00c0, 0x3427: 0x00c0, 0x3428: 0x00c0, 0x3429: 0x00c0, + 0x342a: 0x00c0, 0x342b: 0x00c0, 0x342c: 0x00c0, 0x342d: 0x00c0, 0x342e: 0x00c0, 0x342f: 0x00c0, + 0x3430: 0x00c0, 0x3431: 0x00c0, 0x3432: 0x00c0, 0x3433: 0x00c0, 0x3434: 0x00c0, 0x3435: 0x00c0, + 0x3436: 0x00c0, + // Block 0xd1, offset 0x3440 + 0x3440: 0x00c0, 0x3441: 0x00c0, 0x3442: 0x00c0, 0x3443: 0x00c0, 0x3444: 0x00c0, 0x3445: 0x00c0, + 0x3446: 0x00c0, 0x3447: 0x00c0, 0x3448: 0x00c0, 0x3449: 0x00c0, 0x344a: 0x00c0, 0x344b: 0x00c0, + 0x344c: 0x00c0, 0x344d: 0x00c0, 0x344e: 0x00c0, 0x344f: 0x00c0, 0x3450: 0x00c0, 0x3451: 0x00c0, + 0x3452: 0x00c0, 0x3453: 0x00c0, 0x3454: 0x00c0, 0x3455: 0x00c0, + 0x3460: 0x00c0, 0x3461: 0x00c0, 0x3462: 0x00c0, 0x3463: 0x00c0, + 0x3464: 0x00c0, 0x3465: 0x00c0, 0x3466: 0x00c0, 0x3467: 0x00c0, + // Block 0xd2, offset 0x3480 + 0x3480: 0x00c0, 0x3481: 0x00c0, 0x3482: 0x00c0, 0x3483: 0x00c0, 0x3484: 0x00c0, 0x3485: 0x00c0, + 0x3488: 0x00c0, 0x348a: 0x00c0, 0x348b: 0x00c0, + 0x348c: 0x00c0, 0x348d: 0x00c0, 0x348e: 0x00c0, 0x348f: 0x00c0, 0x3490: 0x00c0, 0x3491: 0x00c0, + 0x3492: 0x00c0, 0x3493: 0x00c0, 0x3494: 0x00c0, 0x3495: 0x00c0, 0x3496: 0x00c0, 0x3497: 0x00c0, + 0x3498: 0x00c0, 0x3499: 0x00c0, 0x349a: 0x00c0, 0x349b: 0x00c0, 0x349c: 0x00c0, 0x349d: 0x00c0, + 0x349e: 0x00c0, 0x349f: 0x00c0, 0x34a0: 0x00c0, 0x34a1: 0x00c0, 0x34a2: 0x00c0, 0x34a3: 0x00c0, + 0x34a4: 0x00c0, 0x34a5: 0x00c0, 0x34a6: 0x00c0, 0x34a7: 0x00c0, 0x34a8: 0x00c0, 0x34a9: 0x00c0, + 0x34aa: 0x00c0, 0x34ab: 0x00c0, 0x34ac: 0x00c0, 0x34ad: 0x00c0, 0x34ae: 0x00c0, 0x34af: 0x00c0, + 0x34b0: 0x00c0, 0x34b1: 0x00c0, 0x34b2: 0x00c0, 0x34b3: 0x00c0, 0x34b4: 0x00c0, 0x34b5: 0x00c0, + 0x34b7: 0x00c0, 0x34b8: 0x00c0, + 0x34bc: 0x00c0, 0x34bf: 0x00c0, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x00c0, 0x34c1: 0x00c0, 0x34c2: 0x00c0, 0x34c3: 0x00c0, 0x34c4: 0x00c0, 0x34c5: 0x00c0, + 0x34c6: 0x00c0, 0x34c7: 0x00c0, 0x34c8: 0x00c0, 0x34c9: 0x00c0, 0x34ca: 0x00c0, 0x34cb: 0x00c0, + 0x34cc: 0x00c0, 0x34cd: 0x00c0, 0x34ce: 0x00c0, 0x34cf: 0x00c0, 0x34d0: 0x00c0, 0x34d1: 0x00c0, + 0x34d2: 0x00c0, 0x34d3: 0x00c0, 0x34d4: 0x00c0, 0x34d5: 0x00c0, 0x34d7: 0x0080, + 0x34d8: 0x0080, 0x34d9: 0x0080, 0x34da: 0x0080, 0x34db: 0x0080, 0x34dc: 0x0080, 0x34dd: 0x0080, + 0x34de: 0x0080, 0x34df: 0x0080, 0x34e0: 0x00c0, 0x34e1: 0x00c0, 0x34e2: 0x00c0, 0x34e3: 0x00c0, + 0x34e4: 0x00c0, 0x34e5: 0x00c0, 0x34e6: 0x00c0, 0x34e7: 0x00c0, 0x34e8: 0x00c0, 0x34e9: 0x00c0, + 0x34ea: 0x00c0, 0x34eb: 0x00c0, 0x34ec: 0x00c0, 0x34ed: 0x00c0, 0x34ee: 0x00c0, 0x34ef: 0x00c0, + 0x34f0: 0x00c0, 0x34f1: 0x00c0, 0x34f2: 0x00c0, 0x34f3: 0x00c0, 0x34f4: 0x00c0, 0x34f5: 0x00c0, + 0x34f6: 0x00c0, 0x34f7: 0x0080, 0x34f8: 0x0080, 0x34f9: 0x0080, 0x34fa: 0x0080, 0x34fb: 0x0080, + 0x34fc: 0x0080, 0x34fd: 0x0080, 0x34fe: 0x0080, 0x34ff: 0x0080, + // Block 0xd4, offset 0x3500 + 0x3500: 0x00c0, 0x3501: 0x00c0, 0x3502: 0x00c0, 0x3503: 0x00c0, 0x3504: 0x00c0, 0x3505: 0x00c0, + 0x3506: 0x00c0, 0x3507: 0x00c0, 0x3508: 0x00c0, 0x3509: 0x00c0, 0x350a: 0x00c0, 0x350b: 0x00c0, + 0x350c: 0x00c0, 0x350d: 0x00c0, 0x350e: 0x00c0, 0x350f: 0x00c0, 0x3510: 0x00c0, 0x3511: 0x00c0, + 0x3512: 0x00c0, 0x3513: 0x00c0, 0x3514: 0x00c0, 0x3515: 0x00c0, 0x3516: 0x00c0, 0x3517: 0x00c0, + 0x3518: 0x00c0, 0x3519: 0x00c0, 0x351a: 0x00c0, 0x351b: 0x00c0, 0x351c: 0x00c0, 0x351d: 0x00c0, + 0x351e: 0x00c0, + 0x3527: 0x0080, 0x3528: 0x0080, 0x3529: 0x0080, + 0x352a: 0x0080, 0x352b: 0x0080, 0x352c: 0x0080, 0x352d: 0x0080, 0x352e: 0x0080, 0x352f: 0x0080, + // Block 0xd5, offset 0x3540 + 0x3560: 0x00c0, 0x3561: 0x00c0, 0x3562: 0x00c0, 0x3563: 0x00c0, + 0x3564: 0x00c0, 0x3565: 0x00c0, 0x3566: 0x00c0, 0x3567: 0x00c0, 0x3568: 0x00c0, 0x3569: 0x00c0, + 0x356a: 0x00c0, 0x356b: 0x00c0, 0x356c: 0x00c0, 0x356d: 0x00c0, 0x356e: 0x00c0, 0x356f: 0x00c0, + 0x3570: 0x00c0, 0x3571: 0x00c0, 0x3572: 0x00c0, 0x3574: 0x00c0, 0x3575: 0x00c0, + 0x357b: 0x0080, + 0x357c: 0x0080, 0x357d: 0x0080, 0x357e: 0x0080, 0x357f: 0x0080, + // Block 0xd6, offset 0x3580 + 0x3580: 0x00c0, 0x3581: 0x00c0, 0x3582: 0x00c0, 0x3583: 0x00c0, 0x3584: 0x00c0, 0x3585: 0x00c0, + 0x3586: 0x00c0, 0x3587: 0x00c0, 0x3588: 0x00c0, 0x3589: 0x00c0, 0x358a: 0x00c0, 0x358b: 0x00c0, + 0x358c: 0x00c0, 0x358d: 0x00c0, 0x358e: 0x00c0, 0x358f: 0x00c0, 0x3590: 0x00c0, 0x3591: 0x00c0, + 0x3592: 0x00c0, 0x3593: 0x00c0, 0x3594: 0x00c0, 0x3595: 0x00c0, 0x3596: 0x0080, 0x3597: 0x0080, + 0x3598: 0x0080, 0x3599: 0x0080, 0x359a: 0x0080, 0x359b: 0x0080, + 0x359f: 0x0080, 0x35a0: 0x00c0, 0x35a1: 0x00c0, 0x35a2: 0x00c0, 0x35a3: 0x00c0, + 0x35a4: 0x00c0, 0x35a5: 0x00c0, 0x35a6: 0x00c0, 0x35a7: 0x00c0, 0x35a8: 0x00c0, 0x35a9: 0x00c0, + 0x35aa: 0x00c0, 0x35ab: 0x00c0, 0x35ac: 0x00c0, 0x35ad: 0x00c0, 0x35ae: 0x00c0, 0x35af: 0x00c0, + 0x35b0: 0x00c0, 0x35b1: 0x00c0, 0x35b2: 0x00c0, 0x35b3: 0x00c0, 0x35b4: 0x00c0, 0x35b5: 0x00c0, + 0x35b6: 0x00c0, 0x35b7: 0x00c0, 0x35b8: 0x00c0, 0x35b9: 0x00c0, + 0x35bf: 0x0080, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x00c0, 0x35c1: 0x00c0, 0x35c2: 0x00c0, 0x35c3: 0x00c0, 0x35c4: 0x00c0, 0x35c5: 0x00c0, + 0x35c6: 0x00c0, 0x35c7: 0x00c0, 0x35c8: 0x00c0, 0x35c9: 0x00c0, 0x35ca: 0x00c0, 0x35cb: 0x00c0, + 0x35cc: 0x00c0, 0x35cd: 0x00c0, 0x35ce: 0x00c0, 0x35cf: 0x00c0, 0x35d0: 0x00c0, 0x35d1: 0x00c0, + 0x35d2: 0x00c0, 0x35d3: 0x00c0, 0x35d4: 0x00c0, 0x35d5: 0x00c0, 0x35d6: 0x00c0, 0x35d7: 0x00c0, + 0x35d8: 0x00c0, 0x35d9: 0x00c0, 0x35da: 0x00c0, 0x35db: 0x00c0, 0x35dc: 0x00c0, 0x35dd: 0x00c0, + 0x35de: 0x00c0, 0x35df: 0x00c0, 0x35e0: 0x00c0, 0x35e1: 0x00c0, 0x35e2: 0x00c0, 0x35e3: 0x00c0, + 0x35e4: 0x00c0, 0x35e5: 0x00c0, 0x35e6: 0x00c0, 0x35e7: 0x00c0, 0x35e8: 0x00c0, 0x35e9: 0x00c0, + 0x35ea: 0x00c0, 0x35eb: 0x00c0, 0x35ec: 0x00c0, 0x35ed: 0x00c0, 0x35ee: 0x00c0, 0x35ef: 0x00c0, + 0x35f0: 0x00c0, 0x35f1: 0x00c0, 0x35f2: 0x00c0, 0x35f3: 0x00c0, 0x35f4: 0x00c0, 0x35f5: 0x00c0, + 0x35f6: 0x00c0, 0x35f7: 0x00c0, + 0x35fc: 0x0080, 0x35fd: 0x0080, 0x35fe: 0x00c0, 0x35ff: 0x00c0, + // Block 0xd8, offset 0x3600 + 0x3600: 0x00c0, 0x3601: 0x00c3, 0x3602: 0x00c3, 0x3603: 0x00c3, 0x3605: 0x00c3, + 0x3606: 0x00c3, + 0x360c: 0x00c3, 0x360d: 0x00c3, 0x360e: 0x00c3, 0x360f: 0x00c3, 0x3610: 0x00c0, 0x3611: 0x00c0, + 0x3612: 0x00c0, 0x3613: 0x00c0, 0x3615: 0x00c0, 0x3616: 0x00c0, 0x3617: 0x00c0, + 0x3619: 0x00c0, 0x361a: 0x00c0, 0x361b: 0x00c0, 0x361c: 0x00c0, 0x361d: 0x00c0, + 0x361e: 0x00c0, 0x361f: 0x00c0, 0x3620: 0x00c0, 0x3621: 0x00c0, 0x3622: 0x00c0, 0x3623: 0x00c0, + 0x3624: 0x00c0, 0x3625: 0x00c0, 0x3626: 0x00c0, 0x3627: 0x00c0, 0x3628: 0x00c0, 0x3629: 0x00c0, + 0x362a: 0x00c0, 0x362b: 0x00c0, 0x362c: 0x00c0, 0x362d: 0x00c0, 0x362e: 0x00c0, 0x362f: 0x00c0, + 0x3630: 0x00c0, 0x3631: 0x00c0, 0x3632: 0x00c0, 0x3633: 0x00c0, + 0x3638: 0x00c3, 0x3639: 0x00c3, 0x363a: 0x00c3, + 0x363f: 0x00c6, + // Block 0xd9, offset 0x3640 + 0x3640: 0x0080, 0x3641: 0x0080, 0x3642: 0x0080, 0x3643: 0x0080, 0x3644: 0x0080, 0x3645: 0x0080, + 0x3646: 0x0080, 0x3647: 0x0080, + 0x3650: 0x0080, 0x3651: 0x0080, + 0x3652: 0x0080, 0x3653: 0x0080, 0x3654: 0x0080, 0x3655: 0x0080, 0x3656: 0x0080, 0x3657: 0x0080, + 0x3658: 0x0080, + 0x3660: 0x00c0, 0x3661: 0x00c0, 0x3662: 0x00c0, 0x3663: 0x00c0, + 0x3664: 0x00c0, 0x3665: 0x00c0, 0x3666: 0x00c0, 0x3667: 0x00c0, 0x3668: 0x00c0, 0x3669: 0x00c0, + 0x366a: 0x00c0, 0x366b: 0x00c0, 0x366c: 0x00c0, 0x366d: 0x00c0, 0x366e: 0x00c0, 0x366f: 0x00c0, + 0x3670: 0x00c0, 0x3671: 0x00c0, 0x3672: 0x00c0, 0x3673: 0x00c0, 0x3674: 0x00c0, 0x3675: 0x00c0, + 0x3676: 0x00c0, 0x3677: 0x00c0, 0x3678: 0x00c0, 0x3679: 0x00c0, 0x367a: 0x00c0, 0x367b: 0x00c0, + 0x367c: 0x00c0, 0x367d: 0x0080, 0x367e: 0x0080, 0x367f: 0x0080, + // Block 0xda, offset 0x3680 + 0x3680: 0x00c0, 0x3681: 0x00c0, 0x3682: 0x00c0, 0x3683: 0x00c0, 0x3684: 0x00c0, 0x3685: 0x00c0, + 0x3686: 0x00c0, 0x3687: 0x00c0, 0x3688: 0x00c0, 0x3689: 0x00c0, 0x368a: 0x00c0, 0x368b: 0x00c0, + 0x368c: 0x00c0, 0x368d: 0x00c0, 0x368e: 0x00c0, 0x368f: 0x00c0, 0x3690: 0x00c0, 0x3691: 0x00c0, + 0x3692: 0x00c0, 0x3693: 0x00c0, 0x3694: 0x00c0, 0x3695: 0x00c0, 0x3696: 0x00c0, 0x3697: 0x00c0, + 0x3698: 0x00c0, 0x3699: 0x00c0, 0x369a: 0x00c0, 0x369b: 0x00c0, 0x369c: 0x00c0, 0x369d: 0x0080, + 0x369e: 0x0080, 0x369f: 0x0080, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x00c2, 0x36c1: 0x00c2, 0x36c2: 0x00c2, 0x36c3: 0x00c2, 0x36c4: 0x00c2, 0x36c5: 0x00c4, + 0x36c6: 0x00c0, 0x36c7: 0x00c4, 0x36c8: 0x0080, 0x36c9: 0x00c4, 0x36ca: 0x00c4, 0x36cb: 0x00c0, + 0x36cc: 0x00c0, 0x36cd: 0x00c1, 0x36ce: 0x00c4, 0x36cf: 0x00c4, 0x36d0: 0x00c4, 0x36d1: 0x00c4, + 0x36d2: 0x00c4, 0x36d3: 0x00c2, 0x36d4: 0x00c2, 0x36d5: 0x00c2, 0x36d6: 0x00c2, 0x36d7: 0x00c1, + 0x36d8: 0x00c2, 0x36d9: 0x00c2, 0x36da: 0x00c2, 0x36db: 0x00c2, 0x36dc: 0x00c2, 0x36dd: 0x00c4, + 0x36de: 0x00c2, 0x36df: 0x00c2, 0x36e0: 0x00c2, 0x36e1: 0x00c4, 0x36e2: 0x00c0, 0x36e3: 0x00c0, + 0x36e4: 0x00c4, 0x36e5: 0x00c3, 0x36e6: 0x00c3, + 0x36eb: 0x0082, 0x36ec: 0x0082, 0x36ed: 0x0082, 0x36ee: 0x0082, 0x36ef: 0x0084, + 0x36f0: 0x0080, 0x36f1: 0x0080, 0x36f2: 0x0080, 0x36f3: 0x0080, 0x36f4: 0x0080, 0x36f5: 0x0080, + 0x36f6: 0x0080, + // Block 0xdc, offset 0x3700 + 0x3700: 0x00c0, 0x3701: 0x00c0, 0x3702: 0x00c0, 0x3703: 0x00c0, 0x3704: 0x00c0, 0x3705: 0x00c0, + 0x3706: 0x00c0, 0x3707: 0x00c0, 0x3708: 0x00c0, 0x3709: 0x00c0, 0x370a: 0x00c0, 0x370b: 0x00c0, + 0x370c: 0x00c0, 0x370d: 0x00c0, 0x370e: 0x00c0, 0x370f: 0x00c0, 0x3710: 0x00c0, 0x3711: 0x00c0, + 0x3712: 0x00c0, 0x3713: 0x00c0, 0x3714: 0x00c0, 0x3715: 0x00c0, 0x3716: 0x00c0, 0x3717: 0x00c0, + 0x3718: 0x00c0, 0x3719: 0x00c0, 0x371a: 0x00c0, 0x371b: 0x00c0, 0x371c: 0x00c0, 0x371d: 0x00c0, + 0x371e: 0x00c0, 0x371f: 0x00c0, 0x3720: 0x00c0, 0x3721: 0x00c0, 0x3722: 0x00c0, 0x3723: 0x00c0, + 0x3724: 0x00c0, 0x3725: 0x00c0, 0x3726: 0x00c0, 0x3727: 0x00c0, 0x3728: 0x00c0, 0x3729: 0x00c0, + 0x372a: 0x00c0, 0x372b: 0x00c0, 0x372c: 0x00c0, 0x372d: 0x00c0, 0x372e: 0x00c0, 0x372f: 0x00c0, + 0x3730: 0x00c0, 0x3731: 0x00c0, 0x3732: 0x00c0, 0x3733: 0x00c0, 0x3734: 0x00c0, 0x3735: 0x00c0, + 0x3739: 0x0080, 0x373a: 0x0080, 0x373b: 0x0080, + 0x373c: 0x0080, 0x373d: 0x0080, 0x373e: 0x0080, 0x373f: 0x0080, + // Block 0xdd, offset 0x3740 + 0x3740: 0x00c0, 0x3741: 0x00c0, 0x3742: 0x00c0, 0x3743: 0x00c0, 0x3744: 0x00c0, 0x3745: 0x00c0, + 0x3746: 0x00c0, 0x3747: 0x00c0, 0x3748: 0x00c0, 0x3749: 0x00c0, 0x374a: 0x00c0, 0x374b: 0x00c0, + 0x374c: 0x00c0, 0x374d: 0x00c0, 0x374e: 0x00c0, 0x374f: 0x00c0, 0x3750: 0x00c0, 0x3751: 0x00c0, + 0x3752: 0x00c0, 0x3753: 0x00c0, 0x3754: 0x00c0, 0x3755: 0x00c0, + 0x3758: 0x0080, 0x3759: 0x0080, 0x375a: 0x0080, 0x375b: 0x0080, 0x375c: 0x0080, 0x375d: 0x0080, + 0x375e: 0x0080, 0x375f: 0x0080, 0x3760: 0x00c0, 0x3761: 0x00c0, 0x3762: 0x00c0, 0x3763: 0x00c0, + 0x3764: 0x00c0, 0x3765: 0x00c0, 0x3766: 0x00c0, 0x3767: 0x00c0, 0x3768: 0x00c0, 0x3769: 0x00c0, + 0x376a: 0x00c0, 0x376b: 0x00c0, 0x376c: 0x00c0, 0x376d: 0x00c0, 0x376e: 0x00c0, 0x376f: 0x00c0, + 0x3770: 0x00c0, 0x3771: 0x00c0, 0x3772: 0x00c0, + 0x3778: 0x0080, 0x3779: 0x0080, 0x377a: 0x0080, 0x377b: 0x0080, + 0x377c: 0x0080, 0x377d: 0x0080, 0x377e: 0x0080, 0x377f: 0x0080, + // Block 0xde, offset 0x3780 + 0x3780: 0x00c2, 0x3781: 0x00c4, 0x3782: 0x00c2, 0x3783: 0x00c4, 0x3784: 0x00c4, 0x3785: 0x00c4, + 0x3786: 0x00c2, 0x3787: 0x00c2, 0x3788: 0x00c2, 0x3789: 0x00c4, 0x378a: 0x00c2, 0x378b: 0x00c2, + 0x378c: 0x00c4, 0x378d: 0x00c2, 0x378e: 0x00c4, 0x378f: 0x00c4, 0x3790: 0x00c2, 0x3791: 0x00c4, + 0x3799: 0x0080, 0x379a: 0x0080, 0x379b: 0x0080, 0x379c: 0x0080, + 0x37a9: 0x0084, + 0x37aa: 0x0084, 0x37ab: 0x0084, 0x37ac: 0x0084, 0x37ad: 0x0082, 0x37ae: 0x0082, 0x37af: 0x0080, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x00c0, 0x37c1: 0x00c0, 0x37c2: 0x00c0, 0x37c3: 0x00c0, 0x37c4: 0x00c0, 0x37c5: 0x00c0, + 0x37c6: 0x00c0, 0x37c7: 0x00c0, 0x37c8: 0x00c0, 0x37c9: 0x00c0, 0x37ca: 0x00c0, 0x37cb: 0x00c0, + 0x37cc: 0x00c0, 0x37cd: 0x00c0, 0x37ce: 0x00c0, 0x37cf: 0x00c0, 0x37d0: 0x00c0, 0x37d1: 0x00c0, + 0x37d2: 0x00c0, 0x37d3: 0x00c0, 0x37d4: 0x00c0, 0x37d5: 0x00c0, 0x37d6: 0x00c0, 0x37d7: 0x00c0, + 0x37d8: 0x00c0, 0x37d9: 0x00c0, 0x37da: 0x00c0, 0x37db: 0x00c0, 0x37dc: 0x00c0, 0x37dd: 0x00c0, + 0x37de: 0x00c0, 0x37df: 0x00c0, 0x37e0: 0x00c0, 0x37e1: 0x00c0, 0x37e2: 0x00c0, 0x37e3: 0x00c0, + 0x37e4: 0x00c0, 0x37e5: 0x00c0, 0x37e6: 0x00c0, 0x37e7: 0x00c0, 0x37e8: 0x00c0, 0x37e9: 0x00c0, + 0x37ea: 0x00c0, 0x37eb: 0x00c0, 0x37ec: 0x00c0, 0x37ed: 0x00c0, 0x37ee: 0x00c0, 0x37ef: 0x00c0, + 0x37f0: 0x00c0, 0x37f1: 0x00c0, 0x37f2: 0x00c0, + // Block 0xe0, offset 0x3800 + 0x3800: 0x00c0, 0x3801: 0x00c0, 0x3802: 0x00c0, 0x3803: 0x00c0, 0x3804: 0x00c0, 0x3805: 0x00c0, + 0x3806: 0x00c0, 0x3807: 0x00c0, 0x3808: 0x00c0, 0x3809: 0x00c0, 0x380a: 0x00c0, 0x380b: 0x00c0, + 0x380c: 0x00c0, 0x380d: 0x00c0, 0x380e: 0x00c0, 0x380f: 0x00c0, 0x3810: 0x00c0, 0x3811: 0x00c0, + 0x3812: 0x00c0, 0x3813: 0x00c0, 0x3814: 0x00c0, 0x3815: 0x00c0, 0x3816: 0x00c0, 0x3817: 0x00c0, + 0x3818: 0x00c0, 0x3819: 0x00c0, 0x381a: 0x00c0, 0x381b: 0x00c0, 0x381c: 0x00c0, 0x381d: 0x00c0, + 0x381e: 0x00c0, 0x381f: 0x00c0, 0x3820: 0x00c0, 0x3821: 0x00c0, 0x3822: 0x00c0, 0x3823: 0x00c0, + 0x3824: 0x00c0, 0x3825: 0x00c0, 0x3826: 0x00c0, 0x3827: 0x00c0, 0x3828: 0x00c0, 0x3829: 0x00c0, + 0x382a: 0x00c0, 0x382b: 0x00c0, 0x382c: 0x00c0, 0x382d: 0x00c0, 0x382e: 0x00c0, 0x382f: 0x00c0, + 0x3830: 0x00c0, 0x3831: 0x00c0, 0x3832: 0x00c0, + 0x383a: 0x0080, 0x383b: 0x0080, + 0x383c: 0x0080, 0x383d: 0x0080, 0x383e: 0x0080, 0x383f: 0x0080, + // Block 0xe1, offset 0x3840 + 0x3860: 0x0080, 0x3861: 0x0080, 0x3862: 0x0080, 0x3863: 0x0080, + 0x3864: 0x0080, 0x3865: 0x0080, 0x3866: 0x0080, 0x3867: 0x0080, 0x3868: 0x0080, 0x3869: 0x0080, + 0x386a: 0x0080, 0x386b: 0x0080, 0x386c: 0x0080, 0x386d: 0x0080, 0x386e: 0x0080, 0x386f: 0x0080, + 0x3870: 0x0080, 0x3871: 0x0080, 0x3872: 0x0080, 0x3873: 0x0080, 0x3874: 0x0080, 0x3875: 0x0080, + 0x3876: 0x0080, 0x3877: 0x0080, 0x3878: 0x0080, 0x3879: 0x0080, 0x387a: 0x0080, 0x387b: 0x0080, + 0x387c: 0x0080, 0x387d: 0x0080, 0x387e: 0x0080, + // Block 0xe2, offset 0x3880 + 0x3880: 0x00c0, 0x3881: 0x00c3, 0x3882: 0x00c0, 0x3883: 0x00c0, 0x3884: 0x00c0, 0x3885: 0x00c0, + 0x3886: 0x00c0, 0x3887: 0x00c0, 0x3888: 0x00c0, 0x3889: 0x00c0, 0x388a: 0x00c0, 0x388b: 0x00c0, + 0x388c: 0x00c0, 0x388d: 0x00c0, 0x388e: 0x00c0, 0x388f: 0x00c0, 0x3890: 0x00c0, 0x3891: 0x00c0, + 0x3892: 0x00c0, 0x3893: 0x00c0, 0x3894: 0x00c0, 0x3895: 0x00c0, 0x3896: 0x00c0, 0x3897: 0x00c0, + 0x3898: 0x00c0, 0x3899: 0x00c0, 0x389a: 0x00c0, 0x389b: 0x00c0, 0x389c: 0x00c0, 0x389d: 0x00c0, + 0x389e: 0x00c0, 0x389f: 0x00c0, 0x38a0: 0x00c0, 0x38a1: 0x00c0, 0x38a2: 0x00c0, 0x38a3: 0x00c0, + 0x38a4: 0x00c0, 0x38a5: 0x00c0, 0x38a6: 0x00c0, 0x38a7: 0x00c0, 0x38a8: 0x00c0, 0x38a9: 0x00c0, + 0x38aa: 0x00c0, 0x38ab: 0x00c0, 0x38ac: 0x00c0, 0x38ad: 0x00c0, 0x38ae: 0x00c0, 0x38af: 0x00c0, + 0x38b0: 0x00c0, 0x38b1: 0x00c0, 0x38b2: 0x00c0, 0x38b3: 0x00c0, 0x38b4: 0x00c0, 0x38b5: 0x00c0, + 0x38b6: 0x00c0, 0x38b7: 0x00c0, 0x38b8: 0x00c3, 0x38b9: 0x00c3, 0x38ba: 0x00c3, 0x38bb: 0x00c3, + 0x38bc: 0x00c3, 0x38bd: 0x00c3, 0x38be: 0x00c3, 0x38bf: 0x00c3, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x00c3, 0x38c1: 0x00c3, 0x38c2: 0x00c3, 0x38c3: 0x00c3, 0x38c4: 0x00c3, 0x38c5: 0x00c3, + 0x38c6: 0x00c6, 0x38c7: 0x0080, 0x38c8: 0x0080, 0x38c9: 0x0080, 0x38ca: 0x0080, 0x38cb: 0x0080, + 0x38cc: 0x0080, 0x38cd: 0x0080, + 0x38d2: 0x0080, 0x38d3: 0x0080, 0x38d4: 0x0080, 0x38d5: 0x0080, 0x38d6: 0x0080, 0x38d7: 0x0080, + 0x38d8: 0x0080, 0x38d9: 0x0080, 0x38da: 0x0080, 0x38db: 0x0080, 0x38dc: 0x0080, 0x38dd: 0x0080, + 0x38de: 0x0080, 0x38df: 0x0080, 0x38e0: 0x0080, 0x38e1: 0x0080, 0x38e2: 0x0080, 0x38e3: 0x0080, + 0x38e4: 0x0080, 0x38e5: 0x0080, 0x38e6: 0x00c0, 0x38e7: 0x00c0, 0x38e8: 0x00c0, 0x38e9: 0x00c0, + 0x38ea: 0x00c0, 0x38eb: 0x00c0, 0x38ec: 0x00c0, 0x38ed: 0x00c0, 0x38ee: 0x00c0, 0x38ef: 0x00c0, + 0x38ff: 0x00c6, + // Block 0xe4, offset 0x3900 + 0x3900: 0x00c3, 0x3901: 0x00c3, 0x3902: 0x00c0, 0x3903: 0x00c0, 0x3904: 0x00c0, 0x3905: 0x00c0, + 0x3906: 0x00c0, 0x3907: 0x00c0, 0x3908: 0x00c0, 0x3909: 0x00c0, 0x390a: 0x00c0, 0x390b: 0x00c0, + 0x390c: 0x00c0, 0x390d: 0x00c0, 0x390e: 0x00c0, 0x390f: 0x00c0, 0x3910: 0x00c0, 0x3911: 0x00c0, + 0x3912: 0x00c0, 0x3913: 0x00c0, 0x3914: 0x00c0, 0x3915: 0x00c0, 0x3916: 0x00c0, 0x3917: 0x00c0, + 0x3918: 0x00c0, 0x3919: 0x00c0, 0x391a: 0x00c0, 0x391b: 0x00c0, 0x391c: 0x00c0, 0x391d: 0x00c0, + 0x391e: 0x00c0, 0x391f: 0x00c0, 0x3920: 0x00c0, 0x3921: 0x00c0, 0x3922: 0x00c0, 0x3923: 0x00c0, + 0x3924: 0x00c0, 0x3925: 0x00c0, 0x3926: 0x00c0, 0x3927: 0x00c0, 0x3928: 0x00c0, 0x3929: 0x00c0, + 0x392a: 0x00c0, 0x392b: 0x00c0, 0x392c: 0x00c0, 0x392d: 0x00c0, 0x392e: 0x00c0, 0x392f: 0x00c0, + 0x3930: 0x00c0, 0x3931: 0x00c0, 0x3932: 0x00c0, 0x3933: 0x00c3, 0x3934: 0x00c3, 0x3935: 0x00c3, + 0x3936: 0x00c3, 0x3937: 0x00c0, 0x3938: 0x00c0, 0x3939: 0x00c6, 0x393a: 0x00c3, 0x393b: 0x0080, + 0x393c: 0x0080, 0x393d: 0x0040, 0x393e: 0x0080, 0x393f: 0x0080, + // Block 0xe5, offset 0x3940 + 0x3940: 0x0080, 0x3941: 0x0080, + 0x3950: 0x00c0, 0x3951: 0x00c0, + 0x3952: 0x00c0, 0x3953: 0x00c0, 0x3954: 0x00c0, 0x3955: 0x00c0, 0x3956: 0x00c0, 0x3957: 0x00c0, + 0x3958: 0x00c0, 0x3959: 0x00c0, 0x395a: 0x00c0, 0x395b: 0x00c0, 0x395c: 0x00c0, 0x395d: 0x00c0, + 0x395e: 0x00c0, 0x395f: 0x00c0, 0x3960: 0x00c0, 0x3961: 0x00c0, 0x3962: 0x00c0, 0x3963: 0x00c0, + 0x3964: 0x00c0, 0x3965: 0x00c0, 0x3966: 0x00c0, 0x3967: 0x00c0, 0x3968: 0x00c0, + 0x3970: 0x00c0, 0x3971: 0x00c0, 0x3972: 0x00c0, 0x3973: 0x00c0, 0x3974: 0x00c0, 0x3975: 0x00c0, + 0x3976: 0x00c0, 0x3977: 0x00c0, 0x3978: 0x00c0, 0x3979: 0x00c0, + // Block 0xe6, offset 0x3980 + 0x3980: 0x00c3, 0x3981: 0x00c3, 0x3982: 0x00c3, 0x3983: 0x00c0, 0x3984: 0x00c0, 0x3985: 0x00c0, + 0x3986: 0x00c0, 0x3987: 0x00c0, 0x3988: 0x00c0, 0x3989: 0x00c0, 0x398a: 0x00c0, 0x398b: 0x00c0, + 0x398c: 0x00c0, 0x398d: 0x00c0, 0x398e: 0x00c0, 0x398f: 0x00c0, 0x3990: 0x00c0, 0x3991: 0x00c0, + 0x3992: 0x00c0, 0x3993: 0x00c0, 0x3994: 0x00c0, 0x3995: 0x00c0, 0x3996: 0x00c0, 0x3997: 0x00c0, + 0x3998: 0x00c0, 0x3999: 0x00c0, 0x399a: 0x00c0, 0x399b: 0x00c0, 0x399c: 0x00c0, 0x399d: 0x00c0, + 0x399e: 0x00c0, 0x399f: 0x00c0, 0x39a0: 0x00c0, 0x39a1: 0x00c0, 0x39a2: 0x00c0, 0x39a3: 0x00c0, + 0x39a4: 0x00c0, 0x39a5: 0x00c0, 0x39a6: 0x00c0, 0x39a7: 0x00c3, 0x39a8: 0x00c3, 0x39a9: 0x00c3, + 0x39aa: 0x00c3, 0x39ab: 0x00c3, 0x39ac: 0x00c0, 0x39ad: 0x00c3, 0x39ae: 0x00c3, 0x39af: 0x00c3, + 0x39b0: 0x00c3, 0x39b1: 0x00c3, 0x39b2: 0x00c3, 0x39b3: 0x00c6, 0x39b4: 0x00c6, + 0x39b6: 0x00c0, 0x39b7: 0x00c0, 0x39b8: 0x00c0, 0x39b9: 0x00c0, 0x39ba: 0x00c0, 0x39bb: 0x00c0, + 0x39bc: 0x00c0, 0x39bd: 0x00c0, 0x39be: 0x00c0, 0x39bf: 0x00c0, + // Block 0xe7, offset 0x39c0 + 0x39c0: 0x0080, 0x39c1: 0x0080, 0x39c2: 0x0080, 0x39c3: 0x0080, + 0x39d0: 0x00c0, 0x39d1: 0x00c0, + 0x39d2: 0x00c0, 0x39d3: 0x00c0, 0x39d4: 0x00c0, 0x39d5: 0x00c0, 0x39d6: 0x00c0, 0x39d7: 0x00c0, + 0x39d8: 0x00c0, 0x39d9: 0x00c0, 0x39da: 0x00c0, 0x39db: 0x00c0, 0x39dc: 0x00c0, 0x39dd: 0x00c0, + 0x39de: 0x00c0, 0x39df: 0x00c0, 0x39e0: 0x00c0, 0x39e1: 0x00c0, 0x39e2: 0x00c0, 0x39e3: 0x00c0, + 0x39e4: 0x00c0, 0x39e5: 0x00c0, 0x39e6: 0x00c0, 0x39e7: 0x00c0, 0x39e8: 0x00c0, 0x39e9: 0x00c0, + 0x39ea: 0x00c0, 0x39eb: 0x00c0, 0x39ec: 0x00c0, 0x39ed: 0x00c0, 0x39ee: 0x00c0, 0x39ef: 0x00c0, + 0x39f0: 0x00c0, 0x39f1: 0x00c0, 0x39f2: 0x00c0, 0x39f3: 0x00c3, 0x39f4: 0x0080, 0x39f5: 0x0080, + 0x39f6: 0x00c0, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x00c3, 0x3a01: 0x00c3, 0x3a02: 0x00c0, 0x3a03: 0x00c0, 0x3a04: 0x00c0, 0x3a05: 0x00c0, + 0x3a06: 0x00c0, 0x3a07: 0x00c0, 0x3a08: 0x00c0, 0x3a09: 0x00c0, 0x3a0a: 0x00c0, 0x3a0b: 0x00c0, + 0x3a0c: 0x00c0, 0x3a0d: 0x00c0, 0x3a0e: 0x00c0, 0x3a0f: 0x00c0, 0x3a10: 0x00c0, 0x3a11: 0x00c0, + 0x3a12: 0x00c0, 0x3a13: 0x00c0, 0x3a14: 0x00c0, 0x3a15: 0x00c0, 0x3a16: 0x00c0, 0x3a17: 0x00c0, + 0x3a18: 0x00c0, 0x3a19: 0x00c0, 0x3a1a: 0x00c0, 0x3a1b: 0x00c0, 0x3a1c: 0x00c0, 0x3a1d: 0x00c0, + 0x3a1e: 0x00c0, 0x3a1f: 0x00c0, 0x3a20: 0x00c0, 0x3a21: 0x00c0, 0x3a22: 0x00c0, 0x3a23: 0x00c0, + 0x3a24: 0x00c0, 0x3a25: 0x00c0, 0x3a26: 0x00c0, 0x3a27: 0x00c0, 0x3a28: 0x00c0, 0x3a29: 0x00c0, + 0x3a2a: 0x00c0, 0x3a2b: 0x00c0, 0x3a2c: 0x00c0, 0x3a2d: 0x00c0, 0x3a2e: 0x00c0, 0x3a2f: 0x00c0, + 0x3a30: 0x00c0, 0x3a31: 0x00c0, 0x3a32: 0x00c0, 0x3a33: 0x00c0, 0x3a34: 0x00c0, 0x3a35: 0x00c0, + 0x3a36: 0x00c3, 0x3a37: 0x00c3, 0x3a38: 0x00c3, 0x3a39: 0x00c3, 0x3a3a: 0x00c3, 0x3a3b: 0x00c3, + 0x3a3c: 0x00c3, 0x3a3d: 0x00c3, 0x3a3e: 0x00c3, 0x3a3f: 0x00c0, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x00c5, 0x3a41: 0x00c0, 0x3a42: 0x00c0, 0x3a43: 0x00c0, 0x3a44: 0x00c0, 0x3a45: 0x0080, + 0x3a46: 0x0080, 0x3a47: 0x0080, 0x3a48: 0x0080, 0x3a49: 0x0080, 0x3a4a: 0x00c3, 0x3a4b: 0x00c3, + 0x3a4c: 0x00c3, 0x3a4d: 0x0080, 0x3a50: 0x00c0, 0x3a51: 0x00c0, + 0x3a52: 0x00c0, 0x3a53: 0x00c0, 0x3a54: 0x00c0, 0x3a55: 0x00c0, 0x3a56: 0x00c0, 0x3a57: 0x00c0, + 0x3a58: 0x00c0, 0x3a59: 0x00c0, 0x3a5a: 0x00c0, 0x3a5b: 0x0080, 0x3a5c: 0x00c0, 0x3a5d: 0x0080, + 0x3a5e: 0x0080, 0x3a5f: 0x0080, 0x3a61: 0x0080, 0x3a62: 0x0080, 0x3a63: 0x0080, + 0x3a64: 0x0080, 0x3a65: 0x0080, 0x3a66: 0x0080, 0x3a67: 0x0080, 0x3a68: 0x0080, 0x3a69: 0x0080, + 0x3a6a: 0x0080, 0x3a6b: 0x0080, 0x3a6c: 0x0080, 0x3a6d: 0x0080, 0x3a6e: 0x0080, 0x3a6f: 0x0080, + 0x3a70: 0x0080, 0x3a71: 0x0080, 0x3a72: 0x0080, 0x3a73: 0x0080, 0x3a74: 0x0080, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x00c0, 0x3a81: 0x00c0, 0x3a82: 0x00c0, 0x3a83: 0x00c0, 0x3a84: 0x00c0, 0x3a85: 0x00c0, + 0x3a86: 0x00c0, 0x3a87: 0x00c0, 0x3a88: 0x00c0, 0x3a89: 0x00c0, 0x3a8a: 0x00c0, 0x3a8b: 0x00c0, + 0x3a8c: 0x00c0, 0x3a8d: 0x00c0, 0x3a8e: 0x00c0, 0x3a8f: 0x00c0, 0x3a90: 0x00c0, 0x3a91: 0x00c0, + 0x3a93: 0x00c0, 0x3a94: 0x00c0, 0x3a95: 0x00c0, 0x3a96: 0x00c0, 0x3a97: 0x00c0, + 0x3a98: 0x00c0, 0x3a99: 0x00c0, 0x3a9a: 0x00c0, 0x3a9b: 0x00c0, 0x3a9c: 0x00c0, 0x3a9d: 0x00c0, + 0x3a9e: 0x00c0, 0x3a9f: 0x00c0, 0x3aa0: 0x00c0, 0x3aa1: 0x00c0, 0x3aa2: 0x00c0, 0x3aa3: 0x00c0, + 0x3aa4: 0x00c0, 0x3aa5: 0x00c0, 0x3aa6: 0x00c0, 0x3aa7: 0x00c0, 0x3aa8: 0x00c0, 0x3aa9: 0x00c0, + 0x3aaa: 0x00c0, 0x3aab: 0x00c0, 0x3aac: 0x00c0, 0x3aad: 0x00c0, 0x3aae: 0x00c0, 0x3aaf: 0x00c3, + 0x3ab0: 0x00c3, 0x3ab1: 0x00c3, 0x3ab2: 0x00c0, 0x3ab3: 0x00c0, 0x3ab4: 0x00c3, 0x3ab5: 0x00c5, + 0x3ab6: 0x00c3, 0x3ab7: 0x00c3, 0x3ab8: 0x0080, 0x3ab9: 0x0080, 0x3aba: 0x0080, 0x3abb: 0x0080, + 0x3abc: 0x0080, 0x3abd: 0x0080, 0x3abe: 0x00c3, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x00c0, 0x3ac1: 0x00c0, 0x3ac2: 0x00c0, 0x3ac3: 0x00c0, 0x3ac4: 0x00c0, 0x3ac5: 0x00c0, + 0x3ac6: 0x00c0, 0x3ac8: 0x00c0, 0x3aca: 0x00c0, 0x3acb: 0x00c0, + 0x3acc: 0x00c0, 0x3acd: 0x00c0, 0x3acf: 0x00c0, 0x3ad0: 0x00c0, 0x3ad1: 0x00c0, + 0x3ad2: 0x00c0, 0x3ad3: 0x00c0, 0x3ad4: 0x00c0, 0x3ad5: 0x00c0, 0x3ad6: 0x00c0, 0x3ad7: 0x00c0, + 0x3ad8: 0x00c0, 0x3ad9: 0x00c0, 0x3ada: 0x00c0, 0x3adb: 0x00c0, 0x3adc: 0x00c0, 0x3add: 0x00c0, + 0x3adf: 0x00c0, 0x3ae0: 0x00c0, 0x3ae1: 0x00c0, 0x3ae2: 0x00c0, 0x3ae3: 0x00c0, + 0x3ae4: 0x00c0, 0x3ae5: 0x00c0, 0x3ae6: 0x00c0, 0x3ae7: 0x00c0, 0x3ae8: 0x00c0, 0x3ae9: 0x0080, + 0x3af0: 0x00c0, 0x3af1: 0x00c0, 0x3af2: 0x00c0, 0x3af3: 0x00c0, 0x3af4: 0x00c0, 0x3af5: 0x00c0, + 0x3af6: 0x00c0, 0x3af7: 0x00c0, 0x3af8: 0x00c0, 0x3af9: 0x00c0, 0x3afa: 0x00c0, 0x3afb: 0x00c0, + 0x3afc: 0x00c0, 0x3afd: 0x00c0, 0x3afe: 0x00c0, 0x3aff: 0x00c0, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x00c0, 0x3b01: 0x00c0, 0x3b02: 0x00c0, 0x3b03: 0x00c0, 0x3b04: 0x00c0, 0x3b05: 0x00c0, + 0x3b06: 0x00c0, 0x3b07: 0x00c0, 0x3b08: 0x00c0, 0x3b09: 0x00c0, 0x3b0a: 0x00c0, 0x3b0b: 0x00c0, + 0x3b0c: 0x00c0, 0x3b0d: 0x00c0, 0x3b0e: 0x00c0, 0x3b0f: 0x00c0, 0x3b10: 0x00c0, 0x3b11: 0x00c0, + 0x3b12: 0x00c0, 0x3b13: 0x00c0, 0x3b14: 0x00c0, 0x3b15: 0x00c0, 0x3b16: 0x00c0, 0x3b17: 0x00c0, + 0x3b18: 0x00c0, 0x3b19: 0x00c0, 0x3b1a: 0x00c0, 0x3b1b: 0x00c0, 0x3b1c: 0x00c0, 0x3b1d: 0x00c0, + 0x3b1e: 0x00c0, 0x3b1f: 0x00c3, 0x3b20: 0x00c0, 0x3b21: 0x00c0, 0x3b22: 0x00c0, 0x3b23: 0x00c3, + 0x3b24: 0x00c3, 0x3b25: 0x00c3, 0x3b26: 0x00c3, 0x3b27: 0x00c3, 0x3b28: 0x00c3, 0x3b29: 0x00c3, + 0x3b2a: 0x00c6, + 0x3b30: 0x00c0, 0x3b31: 0x00c0, 0x3b32: 0x00c0, 0x3b33: 0x00c0, 0x3b34: 0x00c0, 0x3b35: 0x00c0, + 0x3b36: 0x00c0, 0x3b37: 0x00c0, 0x3b38: 0x00c0, 0x3b39: 0x00c0, + // Block 0xed, offset 0x3b40 + 0x3b40: 0x00c3, 0x3b41: 0x00c3, 0x3b42: 0x00c0, 0x3b43: 0x00c0, 0x3b45: 0x00c0, + 0x3b46: 0x00c0, 0x3b47: 0x00c0, 0x3b48: 0x00c0, 0x3b49: 0x00c0, 0x3b4a: 0x00c0, 0x3b4b: 0x00c0, + 0x3b4c: 0x00c0, 0x3b4f: 0x00c0, 0x3b50: 0x00c0, + 0x3b53: 0x00c0, 0x3b54: 0x00c0, 0x3b55: 0x00c0, 0x3b56: 0x00c0, 0x3b57: 0x00c0, + 0x3b58: 0x00c0, 0x3b59: 0x00c0, 0x3b5a: 0x00c0, 0x3b5b: 0x00c0, 0x3b5c: 0x00c0, 0x3b5d: 0x00c0, + 0x3b5e: 0x00c0, 0x3b5f: 0x00c0, 0x3b60: 0x00c0, 0x3b61: 0x00c0, 0x3b62: 0x00c0, 0x3b63: 0x00c0, + 0x3b64: 0x00c0, 0x3b65: 0x00c0, 0x3b66: 0x00c0, 0x3b67: 0x00c0, 0x3b68: 0x00c0, + 0x3b6a: 0x00c0, 0x3b6b: 0x00c0, 0x3b6c: 0x00c0, 0x3b6d: 0x00c0, 0x3b6e: 0x00c0, 0x3b6f: 0x00c0, + 0x3b70: 0x00c0, 0x3b72: 0x00c0, 0x3b73: 0x00c0, 0x3b75: 0x00c0, + 0x3b76: 0x00c0, 0x3b77: 0x00c0, 0x3b78: 0x00c0, 0x3b79: 0x00c0, + 0x3b7c: 0x00c3, 0x3b7d: 0x00c0, 0x3b7e: 0x00c0, 0x3b7f: 0x00c0, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x00c3, 0x3b81: 0x00c0, 0x3b82: 0x00c0, 0x3b83: 0x00c0, 0x3b84: 0x00c0, + 0x3b87: 0x00c0, 0x3b88: 0x00c0, 0x3b8b: 0x00c0, + 0x3b8c: 0x00c0, 0x3b8d: 0x00c5, 0x3b90: 0x00c0, + 0x3b97: 0x00c0, + 0x3b9d: 0x00c0, + 0x3b9e: 0x00c0, 0x3b9f: 0x00c0, 0x3ba0: 0x00c0, 0x3ba1: 0x00c0, 0x3ba2: 0x00c0, 0x3ba3: 0x00c0, + 0x3ba6: 0x00c3, 0x3ba7: 0x00c3, 0x3ba8: 0x00c3, 0x3ba9: 0x00c3, + 0x3baa: 0x00c3, 0x3bab: 0x00c3, 0x3bac: 0x00c3, + 0x3bb0: 0x00c3, 0x3bb1: 0x00c3, 0x3bb2: 0x00c3, 0x3bb3: 0x00c3, 0x3bb4: 0x00c3, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x00c0, 0x3bc1: 0x00c0, 0x3bc2: 0x00c0, 0x3bc3: 0x00c0, 0x3bc4: 0x00c0, 0x3bc5: 0x00c0, + 0x3bc6: 0x00c0, 0x3bc7: 0x00c0, 0x3bc8: 0x00c0, 0x3bc9: 0x00c0, 0x3bca: 0x00c0, 0x3bcb: 0x00c0, + 0x3bcc: 0x00c0, 0x3bcd: 0x00c0, 0x3bce: 0x00c0, 0x3bcf: 0x00c0, 0x3bd0: 0x00c0, 0x3bd1: 0x00c0, + 0x3bd2: 0x00c0, 0x3bd3: 0x00c0, 0x3bd4: 0x00c0, 0x3bd5: 0x00c0, 0x3bd6: 0x00c0, 0x3bd7: 0x00c0, + 0x3bd8: 0x00c0, 0x3bd9: 0x00c0, 0x3bda: 0x00c0, 0x3bdb: 0x00c0, 0x3bdc: 0x00c0, 0x3bdd: 0x00c0, + 0x3bde: 0x00c0, 0x3bdf: 0x00c0, 0x3be0: 0x00c0, 0x3be1: 0x00c0, 0x3be2: 0x00c0, 0x3be3: 0x00c0, + 0x3be4: 0x00c0, 0x3be5: 0x00c0, 0x3be6: 0x00c0, 0x3be7: 0x00c0, 0x3be8: 0x00c0, 0x3be9: 0x00c0, + 0x3bea: 0x00c0, 0x3beb: 0x00c0, 0x3bec: 0x00c0, 0x3bed: 0x00c0, 0x3bee: 0x00c0, 0x3bef: 0x00c0, + 0x3bf0: 0x00c0, 0x3bf1: 0x00c0, 0x3bf2: 0x00c0, 0x3bf3: 0x00c0, 0x3bf4: 0x00c0, 0x3bf5: 0x00c0, + 0x3bf6: 0x00c0, 0x3bf7: 0x00c0, 0x3bf8: 0x00c3, 0x3bf9: 0x00c3, 0x3bfa: 0x00c3, 0x3bfb: 0x00c3, + 0x3bfc: 0x00c3, 0x3bfd: 0x00c3, 0x3bfe: 0x00c3, 0x3bff: 0x00c3, + // Block 0xf0, offset 0x3c00 + 0x3c00: 0x00c0, 0x3c01: 0x00c0, 0x3c02: 0x00c6, 0x3c03: 0x00c3, 0x3c04: 0x00c3, 0x3c05: 0x00c0, + 0x3c06: 0x00c3, 0x3c07: 0x00c0, 0x3c08: 0x00c0, 0x3c09: 0x00c0, 0x3c0a: 0x00c0, 0x3c0b: 0x0080, + 0x3c0c: 0x0080, 0x3c0d: 0x0080, 0x3c0e: 0x0080, 0x3c0f: 0x0080, 0x3c10: 0x00c0, 0x3c11: 0x00c0, + 0x3c12: 0x00c0, 0x3c13: 0x00c0, 0x3c14: 0x00c0, 0x3c15: 0x00c0, 0x3c16: 0x00c0, 0x3c17: 0x00c0, + 0x3c18: 0x00c0, 0x3c19: 0x00c0, 0x3c1b: 0x0080, 0x3c1d: 0x0080, + // Block 0xf1, offset 0x3c40 + 0x3c40: 0x00c0, 0x3c41: 0x00c0, 0x3c42: 0x00c0, 0x3c43: 0x00c0, 0x3c44: 0x00c0, 0x3c45: 0x00c0, + 0x3c46: 0x00c0, 0x3c47: 0x00c0, 0x3c48: 0x00c0, 0x3c49: 0x00c0, 0x3c4a: 0x00c0, 0x3c4b: 0x00c0, + 0x3c4c: 0x00c0, 0x3c4d: 0x00c0, 0x3c4e: 0x00c0, 0x3c4f: 0x00c0, 0x3c50: 0x00c0, 0x3c51: 0x00c0, + 0x3c52: 0x00c0, 0x3c53: 0x00c0, 0x3c54: 0x00c0, 0x3c55: 0x00c0, 0x3c56: 0x00c0, 0x3c57: 0x00c0, + 0x3c58: 0x00c0, 0x3c59: 0x00c0, 0x3c5a: 0x00c0, 0x3c5b: 0x00c0, 0x3c5c: 0x00c0, 0x3c5d: 0x00c0, + 0x3c5e: 0x00c0, 0x3c5f: 0x00c0, 0x3c60: 0x00c0, 0x3c61: 0x00c0, 0x3c62: 0x00c0, 0x3c63: 0x00c0, + 0x3c64: 0x00c0, 0x3c65: 0x00c0, 0x3c66: 0x00c0, 0x3c67: 0x00c0, 0x3c68: 0x00c0, 0x3c69: 0x00c0, + 0x3c6a: 0x00c0, 0x3c6b: 0x00c0, 0x3c6c: 0x00c0, 0x3c6d: 0x00c0, 0x3c6e: 0x00c0, 0x3c6f: 0x00c0, + 0x3c70: 0x00c0, 0x3c71: 0x00c0, 0x3c72: 0x00c0, 0x3c73: 0x00c3, 0x3c74: 0x00c3, 0x3c75: 0x00c3, + 0x3c76: 0x00c3, 0x3c77: 0x00c3, 0x3c78: 0x00c3, 0x3c79: 0x00c0, 0x3c7a: 0x00c3, 0x3c7b: 0x00c0, + 0x3c7c: 0x00c0, 0x3c7d: 0x00c0, 0x3c7e: 0x00c0, 0x3c7f: 0x00c3, + // Block 0xf2, offset 0x3c80 + 0x3c80: 0x00c3, 0x3c81: 0x00c0, 0x3c82: 0x00c6, 0x3c83: 0x00c3, 0x3c84: 0x00c0, 0x3c85: 0x00c0, + 0x3c86: 0x0080, 0x3c87: 0x00c0, + 0x3c90: 0x00c0, 0x3c91: 0x00c0, + 0x3c92: 0x00c0, 0x3c93: 0x00c0, 0x3c94: 0x00c0, 0x3c95: 0x00c0, 0x3c96: 0x00c0, 0x3c97: 0x00c0, + 0x3c98: 0x00c0, 0x3c99: 0x00c0, + // Block 0xf3, offset 0x3cc0 + 0x3cc0: 0x00c0, 0x3cc1: 0x00c0, 0x3cc2: 0x00c0, 0x3cc3: 0x00c0, 0x3cc4: 0x00c0, 0x3cc5: 0x00c0, + 0x3cc6: 0x00c0, 0x3cc7: 0x00c0, 0x3cc8: 0x00c0, 0x3cc9: 0x00c0, 0x3cca: 0x00c0, 0x3ccb: 0x00c0, + 0x3ccc: 0x00c0, 0x3ccd: 0x00c0, 0x3cce: 0x00c0, 0x3ccf: 0x00c0, 0x3cd0: 0x00c0, 0x3cd1: 0x00c0, + 0x3cd2: 0x00c0, 0x3cd3: 0x00c0, 0x3cd4: 0x00c0, 0x3cd5: 0x00c0, 0x3cd6: 0x00c0, 0x3cd7: 0x00c0, + 0x3cd8: 0x00c0, 0x3cd9: 0x00c0, 0x3cda: 0x00c0, 0x3cdb: 0x00c0, 0x3cdc: 0x00c0, 0x3cdd: 0x00c0, + 0x3cde: 0x00c0, 0x3cdf: 0x00c0, 0x3ce0: 0x00c0, 0x3ce1: 0x00c0, 0x3ce2: 0x00c0, 0x3ce3: 0x00c0, + 0x3ce4: 0x00c0, 0x3ce5: 0x00c0, 0x3ce6: 0x00c0, 0x3ce7: 0x00c0, 0x3ce8: 0x00c0, 0x3ce9: 0x00c0, + 0x3cea: 0x00c0, 0x3ceb: 0x00c0, 0x3cec: 0x00c0, 0x3ced: 0x00c0, 0x3cee: 0x00c0, 0x3cef: 0x00c0, + 0x3cf0: 0x00c0, 0x3cf1: 0x00c0, 0x3cf2: 0x00c3, 0x3cf3: 0x00c3, 0x3cf4: 0x00c3, 0x3cf5: 0x00c3, + 0x3cf8: 0x00c0, 0x3cf9: 0x00c0, 0x3cfa: 0x00c0, 0x3cfb: 0x00c0, + 0x3cfc: 0x00c3, 0x3cfd: 0x00c3, 0x3cfe: 0x00c0, 0x3cff: 0x00c6, + // Block 0xf4, offset 0x3d00 + 0x3d00: 0x00c3, 0x3d01: 0x0080, 0x3d02: 0x0080, 0x3d03: 0x0080, 0x3d04: 0x0080, 0x3d05: 0x0080, + 0x3d06: 0x0080, 0x3d07: 0x0080, 0x3d08: 0x0080, 0x3d09: 0x0080, 0x3d0a: 0x0080, 0x3d0b: 0x0080, + 0x3d0c: 0x0080, 0x3d0d: 0x0080, 0x3d0e: 0x0080, 0x3d0f: 0x0080, 0x3d10: 0x0080, 0x3d11: 0x0080, + 0x3d12: 0x0080, 0x3d13: 0x0080, 0x3d14: 0x0080, 0x3d15: 0x0080, 0x3d16: 0x0080, 0x3d17: 0x0080, + 0x3d18: 0x00c0, 0x3d19: 0x00c0, 0x3d1a: 0x00c0, 0x3d1b: 0x00c0, 0x3d1c: 0x00c3, 0x3d1d: 0x00c3, + // Block 0xf5, offset 0x3d40 + 0x3d40: 0x00c0, 0x3d41: 0x00c0, 0x3d42: 0x00c0, 0x3d43: 0x00c0, 0x3d44: 0x00c0, 0x3d45: 0x00c0, + 0x3d46: 0x00c0, 0x3d47: 0x00c0, 0x3d48: 0x00c0, 0x3d49: 0x00c0, 0x3d4a: 0x00c0, 0x3d4b: 0x00c0, + 0x3d4c: 0x00c0, 0x3d4d: 0x00c0, 0x3d4e: 0x00c0, 0x3d4f: 0x00c0, 0x3d50: 0x00c0, 0x3d51: 0x00c0, + 0x3d52: 0x00c0, 0x3d53: 0x00c0, 0x3d54: 0x00c0, 0x3d55: 0x00c0, 0x3d56: 0x00c0, 0x3d57: 0x00c0, + 0x3d58: 0x00c0, 0x3d59: 0x00c0, 0x3d5a: 0x00c0, 0x3d5b: 0x00c0, 0x3d5c: 0x00c0, 0x3d5d: 0x00c0, + 0x3d5e: 0x00c0, 0x3d5f: 0x00c0, 0x3d60: 0x00c0, 0x3d61: 0x00c0, 0x3d62: 0x00c0, 0x3d63: 0x00c0, + 0x3d64: 0x00c0, 0x3d65: 0x00c0, 0x3d66: 0x00c0, 0x3d67: 0x00c0, 0x3d68: 0x00c0, 0x3d69: 0x00c0, + 0x3d6a: 0x00c0, 0x3d6b: 0x00c0, 0x3d6c: 0x00c0, 0x3d6d: 0x00c0, 0x3d6e: 0x00c0, 0x3d6f: 0x00c0, + 0x3d70: 0x00c0, 0x3d71: 0x00c0, 0x3d72: 0x00c0, 0x3d73: 0x00c3, 0x3d74: 0x00c3, 0x3d75: 0x00c3, + 0x3d76: 0x00c3, 0x3d77: 0x00c3, 0x3d78: 0x00c3, 0x3d79: 0x00c3, 0x3d7a: 0x00c3, 0x3d7b: 0x00c0, + 0x3d7c: 0x00c0, 0x3d7d: 0x00c3, 0x3d7e: 0x00c0, 0x3d7f: 0x00c6, + // Block 0xf6, offset 0x3d80 + 0x3d80: 0x00c3, 0x3d81: 0x0080, 0x3d82: 0x0080, 0x3d83: 0x0080, 0x3d84: 0x00c0, + 0x3d90: 0x00c0, 0x3d91: 0x00c0, + 0x3d92: 0x00c0, 0x3d93: 0x00c0, 0x3d94: 0x00c0, 0x3d95: 0x00c0, 0x3d96: 0x00c0, 0x3d97: 0x00c0, + 0x3d98: 0x00c0, 0x3d99: 0x00c0, + 0x3da0: 0x0080, 0x3da1: 0x0080, 0x3da2: 0x0080, 0x3da3: 0x0080, + 0x3da4: 0x0080, 0x3da5: 0x0080, 0x3da6: 0x0080, 0x3da7: 0x0080, 0x3da8: 0x0080, 0x3da9: 0x0080, + 0x3daa: 0x0080, 0x3dab: 0x0080, 0x3dac: 0x0080, + // Block 0xf7, offset 0x3dc0 + 0x3dc0: 0x00c0, 0x3dc1: 0x00c0, 0x3dc2: 0x00c0, 0x3dc3: 0x00c0, 0x3dc4: 0x00c0, 0x3dc5: 0x00c0, + 0x3dc6: 0x00c0, 0x3dc7: 0x00c0, 0x3dc8: 0x00c0, 0x3dc9: 0x00c0, 0x3dca: 0x00c0, 0x3dcb: 0x00c0, + 0x3dcc: 0x00c0, 0x3dcd: 0x00c0, 0x3dce: 0x00c0, 0x3dcf: 0x00c0, 0x3dd0: 0x00c0, 0x3dd1: 0x00c0, + 0x3dd2: 0x00c0, 0x3dd3: 0x00c0, 0x3dd4: 0x00c0, 0x3dd5: 0x00c0, 0x3dd6: 0x00c0, 0x3dd7: 0x00c0, + 0x3dd8: 0x00c0, 0x3dd9: 0x00c0, 0x3dda: 0x00c0, 0x3ddb: 0x00c0, 0x3ddc: 0x00c0, 0x3ddd: 0x00c0, + 0x3dde: 0x00c0, 0x3ddf: 0x00c0, 0x3de0: 0x00c0, 0x3de1: 0x00c0, 0x3de2: 0x00c0, 0x3de3: 0x00c0, + 0x3de4: 0x00c0, 0x3de5: 0x00c0, 0x3de6: 0x00c0, 0x3de7: 0x00c0, 0x3de8: 0x00c0, 0x3de9: 0x00c0, + 0x3dea: 0x00c0, 0x3deb: 0x00c3, 0x3dec: 0x00c0, 0x3ded: 0x00c3, 0x3dee: 0x00c0, 0x3def: 0x00c0, + 0x3df0: 0x00c3, 0x3df1: 0x00c3, 0x3df2: 0x00c3, 0x3df3: 0x00c3, 0x3df4: 0x00c3, 0x3df5: 0x00c3, + 0x3df6: 0x00c5, 0x3df7: 0x00c3, + // Block 0xf8, offset 0x3e00 + 0x3e00: 0x00c0, 0x3e01: 0x00c0, 0x3e02: 0x00c0, 0x3e03: 0x00c0, 0x3e04: 0x00c0, 0x3e05: 0x00c0, + 0x3e06: 0x00c0, 0x3e07: 0x00c0, 0x3e08: 0x00c0, 0x3e09: 0x00c0, + // Block 0xf9, offset 0x3e40 + 0x3e40: 0x00c0, 0x3e41: 0x00c0, 0x3e42: 0x00c0, 0x3e43: 0x00c0, 0x3e44: 0x00c0, 0x3e45: 0x00c0, + 0x3e46: 0x00c0, 0x3e47: 0x00c0, 0x3e48: 0x00c0, 0x3e49: 0x00c0, 0x3e4a: 0x00c0, 0x3e4b: 0x00c0, + 0x3e4c: 0x00c0, 0x3e4d: 0x00c0, 0x3e4e: 0x00c0, 0x3e4f: 0x00c0, 0x3e50: 0x00c0, 0x3e51: 0x00c0, + 0x3e52: 0x00c0, 0x3e53: 0x00c0, 0x3e54: 0x00c0, 0x3e55: 0x00c0, 0x3e56: 0x00c0, 0x3e57: 0x00c0, + 0x3e58: 0x00c0, 0x3e59: 0x00c0, 0x3e5d: 0x00c3, + 0x3e5e: 0x00c3, 0x3e5f: 0x00c3, 0x3e60: 0x00c0, 0x3e61: 0x00c0, 0x3e62: 0x00c3, 0x3e63: 0x00c3, + 0x3e64: 0x00c3, 0x3e65: 0x00c3, 0x3e66: 0x00c0, 0x3e67: 0x00c3, 0x3e68: 0x00c3, 0x3e69: 0x00c3, + 0x3e6a: 0x00c3, 0x3e6b: 0x00c6, + 0x3e70: 0x00c0, 0x3e71: 0x00c0, 0x3e72: 0x00c0, 0x3e73: 0x00c0, 0x3e74: 0x00c0, 0x3e75: 0x00c0, + 0x3e76: 0x00c0, 0x3e77: 0x00c0, 0x3e78: 0x00c0, 0x3e79: 0x00c0, 0x3e7a: 0x0080, 0x3e7b: 0x0080, + 0x3e7c: 0x0080, 0x3e7d: 0x0080, 0x3e7e: 0x0080, 0x3e7f: 0x0080, + // Block 0xfa, offset 0x3e80 + 0x3ea0: 0x00c0, 0x3ea1: 0x00c0, 0x3ea2: 0x00c0, 0x3ea3: 0x00c0, + 0x3ea4: 0x00c0, 0x3ea5: 0x00c0, 0x3ea6: 0x00c0, 0x3ea7: 0x00c0, 0x3ea8: 0x00c0, 0x3ea9: 0x00c0, + 0x3eaa: 0x00c0, 0x3eab: 0x00c0, 0x3eac: 0x00c0, 0x3ead: 0x00c0, 0x3eae: 0x00c0, 0x3eaf: 0x00c0, + 0x3eb0: 0x00c0, 0x3eb1: 0x00c0, 0x3eb2: 0x00c0, 0x3eb3: 0x00c0, 0x3eb4: 0x00c0, 0x3eb5: 0x00c0, + 0x3eb6: 0x00c0, 0x3eb7: 0x00c0, 0x3eb8: 0x00c0, 0x3eb9: 0x00c0, 0x3eba: 0x00c0, 0x3ebb: 0x00c0, + 0x3ebc: 0x00c0, 0x3ebd: 0x00c0, 0x3ebe: 0x00c0, 0x3ebf: 0x00c0, + // Block 0xfb, offset 0x3ec0 + 0x3ec0: 0x00c0, 0x3ec1: 0x00c0, 0x3ec2: 0x00c0, 0x3ec3: 0x00c0, 0x3ec4: 0x00c0, 0x3ec5: 0x00c0, + 0x3ec6: 0x00c0, 0x3ec7: 0x00c0, 0x3ec8: 0x00c0, 0x3ec9: 0x00c0, 0x3eca: 0x00c0, 0x3ecb: 0x00c0, + 0x3ecc: 0x00c0, 0x3ecd: 0x00c0, 0x3ece: 0x00c0, 0x3ecf: 0x00c0, 0x3ed0: 0x00c0, 0x3ed1: 0x00c0, + 0x3ed2: 0x00c0, 0x3ed3: 0x00c0, 0x3ed4: 0x00c0, 0x3ed5: 0x00c0, 0x3ed6: 0x00c0, 0x3ed7: 0x00c0, + 0x3ed8: 0x00c0, 0x3ed9: 0x00c0, 0x3eda: 0x00c0, 0x3edb: 0x00c0, 0x3edc: 0x00c0, 0x3edd: 0x00c0, + 0x3ede: 0x00c0, 0x3edf: 0x00c0, 0x3ee0: 0x00c0, 0x3ee1: 0x00c0, 0x3ee2: 0x00c0, 0x3ee3: 0x00c0, + 0x3ee4: 0x00c0, 0x3ee5: 0x00c0, 0x3ee6: 0x00c0, 0x3ee7: 0x00c0, 0x3ee8: 0x00c0, 0x3ee9: 0x00c0, + 0x3eea: 0x0080, 0x3eeb: 0x0080, 0x3eec: 0x0080, 0x3eed: 0x0080, 0x3eee: 0x0080, 0x3eef: 0x0080, + 0x3ef0: 0x0080, 0x3ef1: 0x0080, 0x3ef2: 0x0080, + 0x3eff: 0x00c0, + // Block 0xfc, offset 0x3f00 + 0x3f00: 0x00c0, 0x3f01: 0x00c0, 0x3f02: 0x00c0, 0x3f03: 0x00c0, 0x3f04: 0x00c0, 0x3f05: 0x00c0, + 0x3f06: 0x00c0, 0x3f07: 0x00c0, 0x3f08: 0x00c0, 0x3f09: 0x00c0, 0x3f0a: 0x00c0, 0x3f0b: 0x00c0, + 0x3f0c: 0x00c0, 0x3f0d: 0x00c0, 0x3f0e: 0x00c0, 0x3f0f: 0x00c0, 0x3f10: 0x00c0, 0x3f11: 0x00c0, + 0x3f12: 0x00c0, 0x3f13: 0x00c0, 0x3f14: 0x00c0, 0x3f15: 0x00c0, 0x3f16: 0x00c0, 0x3f17: 0x00c0, + 0x3f18: 0x00c0, 0x3f19: 0x00c0, 0x3f1a: 0x00c0, 0x3f1b: 0x00c0, 0x3f1c: 0x00c0, 0x3f1d: 0x00c0, + 0x3f1e: 0x00c0, 0x3f1f: 0x00c0, 0x3f20: 0x00c0, 0x3f21: 0x00c0, 0x3f22: 0x00c0, 0x3f23: 0x00c0, + 0x3f24: 0x00c0, 0x3f25: 0x00c0, 0x3f26: 0x00c0, 0x3f27: 0x00c0, 0x3f28: 0x00c0, 0x3f29: 0x00c0, + 0x3f2a: 0x00c0, 0x3f2b: 0x00c0, 0x3f2c: 0x00c0, 0x3f2d: 0x00c0, 0x3f2e: 0x00c0, 0x3f2f: 0x00c0, + 0x3f30: 0x00c0, 0x3f31: 0x00c0, 0x3f32: 0x00c0, 0x3f33: 0x00c0, 0x3f34: 0x00c0, 0x3f35: 0x00c0, + 0x3f36: 0x00c0, 0x3f37: 0x00c0, 0x3f38: 0x00c0, + // Block 0xfd, offset 0x3f40 + 0x3f40: 0x00c0, 0x3f41: 0x00c0, 0x3f42: 0x00c0, 0x3f43: 0x00c0, 0x3f44: 0x00c0, 0x3f45: 0x00c0, + 0x3f46: 0x00c0, 0x3f47: 0x00c0, 0x3f48: 0x00c0, 0x3f4a: 0x00c0, 0x3f4b: 0x00c0, + 0x3f4c: 0x00c0, 0x3f4d: 0x00c0, 0x3f4e: 0x00c0, 0x3f4f: 0x00c0, 0x3f50: 0x00c0, 0x3f51: 0x00c0, + 0x3f52: 0x00c0, 0x3f53: 0x00c0, 0x3f54: 0x00c0, 0x3f55: 0x00c0, 0x3f56: 0x00c0, 0x3f57: 0x00c0, + 0x3f58: 0x00c0, 0x3f59: 0x00c0, 0x3f5a: 0x00c0, 0x3f5b: 0x00c0, 0x3f5c: 0x00c0, 0x3f5d: 0x00c0, + 0x3f5e: 0x00c0, 0x3f5f: 0x00c0, 0x3f60: 0x00c0, 0x3f61: 0x00c0, 0x3f62: 0x00c0, 0x3f63: 0x00c0, + 0x3f64: 0x00c0, 0x3f65: 0x00c0, 0x3f66: 0x00c0, 0x3f67: 0x00c0, 0x3f68: 0x00c0, 0x3f69: 0x00c0, + 0x3f6a: 0x00c0, 0x3f6b: 0x00c0, 0x3f6c: 0x00c0, 0x3f6d: 0x00c0, 0x3f6e: 0x00c0, 0x3f6f: 0x00c0, + 0x3f70: 0x00c3, 0x3f71: 0x00c3, 0x3f72: 0x00c3, 0x3f73: 0x00c3, 0x3f74: 0x00c3, 0x3f75: 0x00c3, + 0x3f76: 0x00c3, 0x3f78: 0x00c3, 0x3f79: 0x00c3, 0x3f7a: 0x00c3, 0x3f7b: 0x00c3, + 0x3f7c: 0x00c3, 0x3f7d: 0x00c3, 0x3f7e: 0x00c0, 0x3f7f: 0x00c6, + // Block 0xfe, offset 0x3f80 + 0x3f80: 0x00c0, 0x3f81: 0x0080, 0x3f82: 0x0080, 0x3f83: 0x0080, 0x3f84: 0x0080, 0x3f85: 0x0080, + 0x3f90: 0x00c0, 0x3f91: 0x00c0, + 0x3f92: 0x00c0, 0x3f93: 0x00c0, 0x3f94: 0x00c0, 0x3f95: 0x00c0, 0x3f96: 0x00c0, 0x3f97: 0x00c0, + 0x3f98: 0x00c0, 0x3f99: 0x00c0, 0x3f9a: 0x0080, 0x3f9b: 0x0080, 0x3f9c: 0x0080, 0x3f9d: 0x0080, + 0x3f9e: 0x0080, 0x3f9f: 0x0080, 0x3fa0: 0x0080, 0x3fa1: 0x0080, 0x3fa2: 0x0080, 0x3fa3: 0x0080, + 0x3fa4: 0x0080, 0x3fa5: 0x0080, 0x3fa6: 0x0080, 0x3fa7: 0x0080, 0x3fa8: 0x0080, 0x3fa9: 0x0080, + 0x3faa: 0x0080, 0x3fab: 0x0080, 0x3fac: 0x0080, + 0x3fb0: 0x0080, 0x3fb1: 0x0080, 0x3fb2: 0x00c0, 0x3fb3: 0x00c0, 0x3fb4: 0x00c0, 0x3fb5: 0x00c0, + 0x3fb6: 0x00c0, 0x3fb7: 0x00c0, 0x3fb8: 0x00c0, 0x3fb9: 0x00c0, 0x3fba: 0x00c0, 0x3fbb: 0x00c0, + 0x3fbc: 0x00c0, 0x3fbd: 0x00c0, 0x3fbe: 0x00c0, 0x3fbf: 0x00c0, + // Block 0xff, offset 0x3fc0 + 0x3fc0: 0x00c0, 0x3fc1: 0x00c0, 0x3fc2: 0x00c0, 0x3fc3: 0x00c0, 0x3fc4: 0x00c0, 0x3fc5: 0x00c0, + 0x3fc6: 0x00c0, 0x3fc7: 0x00c0, 0x3fc8: 0x00c0, 0x3fc9: 0x00c0, 0x3fca: 0x00c0, 0x3fcb: 0x00c0, + 0x3fcc: 0x00c0, 0x3fcd: 0x00c0, 0x3fce: 0x00c0, 0x3fcf: 0x00c0, + 0x3fd2: 0x00c3, 0x3fd3: 0x00c3, 0x3fd4: 0x00c3, 0x3fd5: 0x00c3, 0x3fd6: 0x00c3, 0x3fd7: 0x00c3, + 0x3fd8: 0x00c3, 0x3fd9: 0x00c3, 0x3fda: 0x00c3, 0x3fdb: 0x00c3, 0x3fdc: 0x00c3, 0x3fdd: 0x00c3, + 0x3fde: 0x00c3, 0x3fdf: 0x00c3, 0x3fe0: 0x00c3, 0x3fe1: 0x00c3, 0x3fe2: 0x00c3, 0x3fe3: 0x00c3, + 0x3fe4: 0x00c3, 0x3fe5: 0x00c3, 0x3fe6: 0x00c3, 0x3fe7: 0x00c3, 0x3fe9: 0x00c0, + 0x3fea: 0x00c3, 0x3feb: 0x00c3, 0x3fec: 0x00c3, 0x3fed: 0x00c3, 0x3fee: 0x00c3, 0x3fef: 0x00c3, + 0x3ff0: 0x00c3, 0x3ff1: 0x00c0, 0x3ff2: 0x00c3, 0x3ff3: 0x00c3, 0x3ff4: 0x00c0, 0x3ff5: 0x00c3, + 0x3ff6: 0x00c3, + // Block 0x100, offset 0x4000 + 0x4000: 0x00c0, 0x4001: 0x00c0, 0x4002: 0x00c0, 0x4003: 0x00c0, 0x4004: 0x00c0, 0x4005: 0x00c0, + 0x4006: 0x00c0, 0x4007: 0x00c0, 0x4008: 0x00c0, 0x4009: 0x00c0, 0x400a: 0x00c0, 0x400b: 0x00c0, + 0x400c: 0x00c0, 0x400d: 0x00c0, 0x400e: 0x00c0, 0x400f: 0x00c0, 0x4010: 0x00c0, 0x4011: 0x00c0, + 0x4012: 0x00c0, 0x4013: 0x00c0, 0x4014: 0x00c0, 0x4015: 0x00c0, 0x4016: 0x00c0, 0x4017: 0x00c0, + 0x4018: 0x00c0, 0x4019: 0x00c0, + // Block 0x101, offset 0x4040 + 0x4040: 0x0080, 0x4041: 0x0080, 0x4042: 0x0080, 0x4043: 0x0080, 0x4044: 0x0080, 0x4045: 0x0080, + 0x4046: 0x0080, 0x4047: 0x0080, 0x4048: 0x0080, 0x4049: 0x0080, 0x404a: 0x0080, 0x404b: 0x0080, + 0x404c: 0x0080, 0x404d: 0x0080, 0x404e: 0x0080, 0x404f: 0x0080, 0x4050: 0x0080, 0x4051: 0x0080, + 0x4052: 0x0080, 0x4053: 0x0080, 0x4054: 0x0080, 0x4055: 0x0080, 0x4056: 0x0080, 0x4057: 0x0080, + 0x4058: 0x0080, 0x4059: 0x0080, 0x405a: 0x0080, 0x405b: 0x0080, 0x405c: 0x0080, 0x405d: 0x0080, + 0x405e: 0x0080, 0x405f: 0x0080, 0x4060: 0x0080, 0x4061: 0x0080, 0x4062: 0x0080, 0x4063: 0x0080, + 0x4064: 0x0080, 0x4065: 0x0080, 0x4066: 0x0080, 0x4067: 0x0080, 0x4068: 0x0080, 0x4069: 0x0080, + 0x406a: 0x0080, 0x406b: 0x0080, 0x406c: 0x0080, 0x406d: 0x0080, 0x406e: 0x0080, + 0x4070: 0x0080, 0x4071: 0x0080, 0x4072: 0x0080, 0x4073: 0x0080, 0x4074: 0x0080, + // Block 0x102, offset 0x4080 + 0x4080: 0x00c0, 0x4081: 0x00c0, 0x4082: 0x00c0, 0x4083: 0x00c0, + // Block 0x103, offset 0x40c0 + 0x40c0: 0x00c0, 0x40c1: 0x00c0, 0x40c2: 0x00c0, 0x40c3: 0x00c0, 0x40c4: 0x00c0, 0x40c5: 0x00c0, + 0x40c6: 0x00c0, 0x40c7: 0x00c0, 0x40c8: 0x00c0, 0x40c9: 0x00c0, 0x40ca: 0x00c0, 0x40cb: 0x00c0, + 0x40cc: 0x00c0, 0x40cd: 0x00c0, 0x40ce: 0x00c0, 0x40cf: 0x00c0, 0x40d0: 0x00c0, 0x40d1: 0x00c0, + 0x40d2: 0x00c0, 0x40d3: 0x00c0, 0x40d4: 0x00c0, 0x40d5: 0x00c0, 0x40d6: 0x00c0, 0x40d7: 0x00c0, + 0x40d8: 0x00c0, 0x40d9: 0x00c0, 0x40da: 0x00c0, 0x40db: 0x00c0, 0x40dc: 0x00c0, 0x40dd: 0x00c0, + 0x40de: 0x00c0, 0x40df: 0x00c0, 0x40e0: 0x00c0, 0x40e1: 0x00c0, 0x40e2: 0x00c0, 0x40e3: 0x00c0, + 0x40e4: 0x00c0, 0x40e5: 0x00c0, 0x40e6: 0x00c0, 0x40e7: 0x00c0, 0x40e8: 0x00c0, 0x40e9: 0x00c0, + 0x40ea: 0x00c0, 0x40eb: 0x00c0, 0x40ec: 0x00c0, 0x40ed: 0x00c0, 0x40ee: 0x00c0, + // Block 0x104, offset 0x4100 + 0x4100: 0x00c0, 0x4101: 0x00c0, 0x4102: 0x00c0, 0x4103: 0x00c0, 0x4104: 0x00c0, 0x4105: 0x00c0, + 0x4106: 0x00c0, + // Block 0x105, offset 0x4140 + 0x4140: 0x00c0, 0x4141: 0x00c0, 0x4142: 0x00c0, 0x4143: 0x00c0, 0x4144: 0x00c0, 0x4145: 0x00c0, + 0x4146: 0x00c0, 0x4147: 0x00c0, 0x4148: 0x00c0, 0x4149: 0x00c0, 0x414a: 0x00c0, 0x414b: 0x00c0, + 0x414c: 0x00c0, 0x414d: 0x00c0, 0x414e: 0x00c0, 0x414f: 0x00c0, 0x4150: 0x00c0, 0x4151: 0x00c0, + 0x4152: 0x00c0, 0x4153: 0x00c0, 0x4154: 0x00c0, 0x4155: 0x00c0, 0x4156: 0x00c0, 0x4157: 0x00c0, + 0x4158: 0x00c0, 0x4159: 0x00c0, 0x415a: 0x00c0, 0x415b: 0x00c0, 0x415c: 0x00c0, 0x415d: 0x00c0, + 0x415e: 0x00c0, 0x4160: 0x00c0, 0x4161: 0x00c0, 0x4162: 0x00c0, 0x4163: 0x00c0, + 0x4164: 0x00c0, 0x4165: 0x00c0, 0x4166: 0x00c0, 0x4167: 0x00c0, 0x4168: 0x00c0, 0x4169: 0x00c0, + 0x416e: 0x0080, 0x416f: 0x0080, + // Block 0x106, offset 0x4180 + 0x4190: 0x00c0, 0x4191: 0x00c0, + 0x4192: 0x00c0, 0x4193: 0x00c0, 0x4194: 0x00c0, 0x4195: 0x00c0, 0x4196: 0x00c0, 0x4197: 0x00c0, + 0x4198: 0x00c0, 0x4199: 0x00c0, 0x419a: 0x00c0, 0x419b: 0x00c0, 0x419c: 0x00c0, 0x419d: 0x00c0, + 0x419e: 0x00c0, 0x419f: 0x00c0, 0x41a0: 0x00c0, 0x41a1: 0x00c0, 0x41a2: 0x00c0, 0x41a3: 0x00c0, + 0x41a4: 0x00c0, 0x41a5: 0x00c0, 0x41a6: 0x00c0, 0x41a7: 0x00c0, 0x41a8: 0x00c0, 0x41a9: 0x00c0, + 0x41aa: 0x00c0, 0x41ab: 0x00c0, 0x41ac: 0x00c0, 0x41ad: 0x00c0, + 0x41b0: 0x00c3, 0x41b1: 0x00c3, 0x41b2: 0x00c3, 0x41b3: 0x00c3, 0x41b4: 0x00c3, 0x41b5: 0x0080, + // Block 0x107, offset 0x41c0 + 0x41c0: 0x00c0, 0x41c1: 0x00c0, 0x41c2: 0x00c0, 0x41c3: 0x00c0, 0x41c4: 0x00c0, 0x41c5: 0x00c0, + 0x41c6: 0x00c0, 0x41c7: 0x00c0, 0x41c8: 0x00c0, 0x41c9: 0x00c0, 0x41ca: 0x00c0, 0x41cb: 0x00c0, + 0x41cc: 0x00c0, 0x41cd: 0x00c0, 0x41ce: 0x00c0, 0x41cf: 0x00c0, 0x41d0: 0x00c0, 0x41d1: 0x00c0, + 0x41d2: 0x00c0, 0x41d3: 0x00c0, 0x41d4: 0x00c0, 0x41d5: 0x00c0, 0x41d6: 0x00c0, 0x41d7: 0x00c0, + 0x41d8: 0x00c0, 0x41d9: 0x00c0, 0x41da: 0x00c0, 0x41db: 0x00c0, 0x41dc: 0x00c0, 0x41dd: 0x00c0, + 0x41de: 0x00c0, 0x41df: 0x00c0, 0x41e0: 0x00c0, 0x41e1: 0x00c0, 0x41e2: 0x00c0, 0x41e3: 0x00c0, + 0x41e4: 0x00c0, 0x41e5: 0x00c0, 0x41e6: 0x00c0, 0x41e7: 0x00c0, 0x41e8: 0x00c0, 0x41e9: 0x00c0, + 0x41ea: 0x00c0, 0x41eb: 0x00c0, 0x41ec: 0x00c0, 0x41ed: 0x00c0, 0x41ee: 0x00c0, 0x41ef: 0x00c0, + 0x41f0: 0x00c3, 0x41f1: 0x00c3, 0x41f2: 0x00c3, 0x41f3: 0x00c3, 0x41f4: 0x00c3, 0x41f5: 0x00c3, + 0x41f6: 0x00c3, 0x41f7: 0x0080, 0x41f8: 0x0080, 0x41f9: 0x0080, 0x41fa: 0x0080, 0x41fb: 0x0080, + 0x41fc: 0x0080, 0x41fd: 0x0080, 0x41fe: 0x0080, 0x41ff: 0x0080, + // Block 0x108, offset 0x4200 + 0x4200: 0x00c0, 0x4201: 0x00c0, 0x4202: 0x00c0, 0x4203: 0x00c0, 0x4204: 0x0080, 0x4205: 0x0080, + 0x4210: 0x00c0, 0x4211: 0x00c0, + 0x4212: 0x00c0, 0x4213: 0x00c0, 0x4214: 0x00c0, 0x4215: 0x00c0, 0x4216: 0x00c0, 0x4217: 0x00c0, + 0x4218: 0x00c0, 0x4219: 0x00c0, 0x421b: 0x0080, 0x421c: 0x0080, 0x421d: 0x0080, + 0x421e: 0x0080, 0x421f: 0x0080, 0x4220: 0x0080, 0x4221: 0x0080, 0x4223: 0x00c0, + 0x4224: 0x00c0, 0x4225: 0x00c0, 0x4226: 0x00c0, 0x4227: 0x00c0, 0x4228: 0x00c0, 0x4229: 0x00c0, + 0x422a: 0x00c0, 0x422b: 0x00c0, 0x422c: 0x00c0, 0x422d: 0x00c0, 0x422e: 0x00c0, 0x422f: 0x00c0, + 0x4230: 0x00c0, 0x4231: 0x00c0, 0x4232: 0x00c0, 0x4233: 0x00c0, 0x4234: 0x00c0, 0x4235: 0x00c0, + 0x4236: 0x00c0, 0x4237: 0x00c0, + 0x423d: 0x00c0, 0x423e: 0x00c0, 0x423f: 0x00c0, + // Block 0x109, offset 0x4240 + 0x4240: 0x00c0, 0x4241: 0x00c0, 0x4242: 0x00c0, 0x4243: 0x00c0, 0x4244: 0x00c0, 0x4245: 0x00c0, + 0x4246: 0x00c0, 0x4247: 0x00c0, 0x4248: 0x00c0, 0x4249: 0x00c0, 0x424a: 0x00c0, 0x424b: 0x00c0, + 0x424c: 0x00c0, 0x424d: 0x00c0, 0x424e: 0x00c0, 0x424f: 0x00c0, + // Block 0x10a, offset 0x4280 + 0x4280: 0x00c0, 0x4281: 0x00c0, 0x4282: 0x00c0, 0x4283: 0x00c0, 0x4284: 0x00c0, + 0x4290: 0x00c0, 0x4291: 0x00c0, + 0x4292: 0x00c0, 0x4293: 0x00c0, 0x4294: 0x00c0, 0x4295: 0x00c0, 0x4296: 0x00c0, 0x4297: 0x00c0, + 0x4298: 0x00c0, 0x4299: 0x00c0, 0x429a: 0x00c0, 0x429b: 0x00c0, 0x429c: 0x00c0, 0x429d: 0x00c0, + 0x429e: 0x00c0, 0x429f: 0x00c0, 0x42a0: 0x00c0, 0x42a1: 0x00c0, 0x42a2: 0x00c0, 0x42a3: 0x00c0, + 0x42a4: 0x00c0, 0x42a5: 0x00c0, 0x42a6: 0x00c0, 0x42a7: 0x00c0, 0x42a8: 0x00c0, 0x42a9: 0x00c0, + 0x42aa: 0x00c0, 0x42ab: 0x00c0, 0x42ac: 0x00c0, 0x42ad: 0x00c0, 0x42ae: 0x00c0, 0x42af: 0x00c0, + 0x42b0: 0x00c0, 0x42b1: 0x00c0, 0x42b2: 0x00c0, 0x42b3: 0x00c0, 0x42b4: 0x00c0, 0x42b5: 0x00c0, + 0x42b6: 0x00c0, 0x42b7: 0x00c0, 0x42b8: 0x00c0, 0x42b9: 0x00c0, 0x42ba: 0x00c0, 0x42bb: 0x00c0, + 0x42bc: 0x00c0, 0x42bd: 0x00c0, 0x42be: 0x00c0, + // Block 0x10b, offset 0x42c0 + 0x42cf: 0x00c3, 0x42d0: 0x00c3, 0x42d1: 0x00c3, + 0x42d2: 0x00c3, 0x42d3: 0x00c0, 0x42d4: 0x00c0, 0x42d5: 0x00c0, 0x42d6: 0x00c0, 0x42d7: 0x00c0, + 0x42d8: 0x00c0, 0x42d9: 0x00c0, 0x42da: 0x00c0, 0x42db: 0x00c0, 0x42dc: 0x00c0, 0x42dd: 0x00c0, + 0x42de: 0x00c0, 0x42df: 0x00c0, + // Block 0x10c, offset 0x4300 + 0x4320: 0x00c0, + // Block 0x10d, offset 0x4340 + 0x4340: 0x00c0, 0x4341: 0x00c0, 0x4342: 0x00c0, 0x4343: 0x00c0, 0x4344: 0x00c0, 0x4345: 0x00c0, + 0x4346: 0x00c0, 0x4347: 0x00c0, 0x4348: 0x00c0, 0x4349: 0x00c0, 0x434a: 0x00c0, 0x434b: 0x00c0, + 0x434c: 0x00c0, 0x434d: 0x00c0, 0x434e: 0x00c0, 0x434f: 0x00c0, 0x4350: 0x00c0, 0x4351: 0x00c0, + 0x4352: 0x00c0, 0x4353: 0x00c0, 0x4354: 0x00c0, 0x4355: 0x00c0, 0x4356: 0x00c0, 0x4357: 0x00c0, + 0x4358: 0x00c0, 0x4359: 0x00c0, 0x435a: 0x00c0, 0x435b: 0x00c0, 0x435c: 0x00c0, 0x435d: 0x00c0, + 0x435e: 0x00c0, 0x435f: 0x00c0, 0x4360: 0x00c0, 0x4361: 0x00c0, 0x4362: 0x00c0, 0x4363: 0x00c0, + 0x4364: 0x00c0, 0x4365: 0x00c0, 0x4366: 0x00c0, 0x4367: 0x00c0, 0x4368: 0x00c0, 0x4369: 0x00c0, + 0x436a: 0x00c0, 0x436b: 0x00c0, 0x436c: 0x00c0, + // Block 0x10e, offset 0x4380 + 0x4380: 0x00cc, 0x4381: 0x00cc, + // Block 0x10f, offset 0x43c0 + 0x43c0: 0x00c0, 0x43c1: 0x00c0, 0x43c2: 0x00c0, 0x43c3: 0x00c0, 0x43c4: 0x00c0, 0x43c5: 0x00c0, + 0x43c6: 0x00c0, 0x43c7: 0x00c0, 0x43c8: 0x00c0, 0x43c9: 0x00c0, 0x43ca: 0x00c0, 0x43cb: 0x00c0, + 0x43cc: 0x00c0, 0x43cd: 0x00c0, 0x43ce: 0x00c0, 0x43cf: 0x00c0, 0x43d0: 0x00c0, 0x43d1: 0x00c0, + 0x43d2: 0x00c0, 0x43d3: 0x00c0, 0x43d4: 0x00c0, 0x43d5: 0x00c0, 0x43d6: 0x00c0, 0x43d7: 0x00c0, + 0x43d8: 0x00c0, 0x43d9: 0x00c0, 0x43da: 0x00c0, 0x43db: 0x00c0, 0x43dc: 0x00c0, 0x43dd: 0x00c0, + 0x43de: 0x00c0, 0x43df: 0x00c0, 0x43e0: 0x00c0, 0x43e1: 0x00c0, 0x43e2: 0x00c0, 0x43e3: 0x00c0, + 0x43e4: 0x00c0, 0x43e5: 0x00c0, 0x43e6: 0x00c0, 0x43e7: 0x00c0, 0x43e8: 0x00c0, 0x43e9: 0x00c0, + 0x43ea: 0x00c0, + 0x43f0: 0x00c0, 0x43f1: 0x00c0, 0x43f2: 0x00c0, 0x43f3: 0x00c0, 0x43f4: 0x00c0, 0x43f5: 0x00c0, + 0x43f6: 0x00c0, 0x43f7: 0x00c0, 0x43f8: 0x00c0, 0x43f9: 0x00c0, 0x43fa: 0x00c0, 0x43fb: 0x00c0, + 0x43fc: 0x00c0, + // Block 0x110, offset 0x4400 + 0x4400: 0x00c0, 0x4401: 0x00c0, 0x4402: 0x00c0, 0x4403: 0x00c0, 0x4404: 0x00c0, 0x4405: 0x00c0, + 0x4406: 0x00c0, 0x4407: 0x00c0, 0x4408: 0x00c0, + 0x4410: 0x00c0, 0x4411: 0x00c0, + 0x4412: 0x00c0, 0x4413: 0x00c0, 0x4414: 0x00c0, 0x4415: 0x00c0, 0x4416: 0x00c0, 0x4417: 0x00c0, + 0x4418: 0x00c0, 0x4419: 0x00c0, 0x441c: 0x0080, 0x441d: 0x00c3, + 0x441e: 0x00c3, 0x441f: 0x0080, 0x4420: 0x0040, 0x4421: 0x0040, 0x4422: 0x0040, 0x4423: 0x0040, + // Block 0x111, offset 0x4440 + 0x4440: 0x0080, 0x4441: 0x0080, 0x4442: 0x0080, 0x4443: 0x0080, 0x4444: 0x0080, 0x4445: 0x0080, + 0x4446: 0x0080, 0x4447: 0x0080, 0x4448: 0x0080, 0x4449: 0x0080, 0x444a: 0x0080, 0x444b: 0x0080, + 0x444c: 0x0080, 0x444d: 0x0080, 0x444e: 0x0080, 0x444f: 0x0080, 0x4450: 0x0080, 0x4451: 0x0080, + 0x4452: 0x0080, 0x4453: 0x0080, 0x4454: 0x0080, 0x4455: 0x0080, 0x4456: 0x0080, 0x4457: 0x0080, + 0x4458: 0x0080, 0x4459: 0x0080, 0x445a: 0x0080, 0x445b: 0x0080, 0x445c: 0x0080, 0x445d: 0x0080, + 0x445e: 0x0080, 0x445f: 0x0080, 0x4460: 0x0080, 0x4461: 0x0080, 0x4462: 0x0080, 0x4463: 0x0080, + 0x4464: 0x0080, 0x4465: 0x0080, 0x4466: 0x0080, 0x4467: 0x0080, 0x4468: 0x0080, 0x4469: 0x0080, + 0x446a: 0x0080, 0x446b: 0x0080, 0x446c: 0x0080, 0x446d: 0x0080, 0x446e: 0x0080, 0x446f: 0x0080, + 0x4470: 0x0080, 0x4471: 0x0080, 0x4472: 0x0080, 0x4473: 0x0080, 0x4474: 0x0080, 0x4475: 0x0080, + // Block 0x112, offset 0x4480 + 0x4480: 0x0080, 0x4481: 0x0080, 0x4482: 0x0080, 0x4483: 0x0080, 0x4484: 0x0080, 0x4485: 0x0080, + 0x4486: 0x0080, 0x4487: 0x0080, 0x4488: 0x0080, 0x4489: 0x0080, 0x448a: 0x0080, 0x448b: 0x0080, + 0x448c: 0x0080, 0x448d: 0x0080, 0x448e: 0x0080, 0x448f: 0x0080, 0x4490: 0x0080, 0x4491: 0x0080, + 0x4492: 0x0080, 0x4493: 0x0080, 0x4494: 0x0080, 0x4495: 0x0080, 0x4496: 0x0080, 0x4497: 0x0080, + 0x4498: 0x0080, 0x4499: 0x0080, 0x449a: 0x0080, 0x449b: 0x0080, 0x449c: 0x0080, 0x449d: 0x0080, + 0x449e: 0x0080, 0x449f: 0x0080, 0x44a0: 0x0080, 0x44a1: 0x0080, 0x44a2: 0x0080, 0x44a3: 0x0080, + 0x44a4: 0x0080, 0x44a5: 0x0080, 0x44a6: 0x0080, 0x44a9: 0x0080, + 0x44aa: 0x0080, 0x44ab: 0x0080, 0x44ac: 0x0080, 0x44ad: 0x0080, 0x44ae: 0x0080, 0x44af: 0x0080, + 0x44b0: 0x0080, 0x44b1: 0x0080, 0x44b2: 0x0080, 0x44b3: 0x0080, 0x44b4: 0x0080, 0x44b5: 0x0080, + 0x44b6: 0x0080, 0x44b7: 0x0080, 0x44b8: 0x0080, 0x44b9: 0x0080, 0x44ba: 0x0080, 0x44bb: 0x0080, + 0x44bc: 0x0080, 0x44bd: 0x0080, 0x44be: 0x0080, 0x44bf: 0x0080, + // Block 0x113, offset 0x44c0 + 0x44c0: 0x0080, 0x44c1: 0x0080, 0x44c2: 0x0080, 0x44c3: 0x0080, 0x44c4: 0x0080, 0x44c5: 0x0080, + 0x44c6: 0x0080, 0x44c7: 0x0080, 0x44c8: 0x0080, 0x44c9: 0x0080, 0x44ca: 0x0080, 0x44cb: 0x0080, + 0x44cc: 0x0080, 0x44cd: 0x0080, 0x44ce: 0x0080, 0x44cf: 0x0080, 0x44d0: 0x0080, 0x44d1: 0x0080, + 0x44d2: 0x0080, 0x44d3: 0x0080, 0x44d4: 0x0080, 0x44d5: 0x0080, 0x44d6: 0x0080, 0x44d7: 0x0080, + 0x44d8: 0x0080, 0x44d9: 0x0080, 0x44da: 0x0080, 0x44db: 0x0080, 0x44dc: 0x0080, 0x44dd: 0x0080, + 0x44de: 0x0080, 0x44df: 0x0080, 0x44e0: 0x0080, 0x44e1: 0x0080, 0x44e2: 0x0080, 0x44e3: 0x0080, + 0x44e4: 0x0080, 0x44e5: 0x00c0, 0x44e6: 0x00c0, 0x44e7: 0x00c3, 0x44e8: 0x00c3, 0x44e9: 0x00c3, + 0x44ea: 0x0080, 0x44eb: 0x0080, 0x44ec: 0x0080, 0x44ed: 0x00c0, 0x44ee: 0x00c0, 0x44ef: 0x00c0, + 0x44f0: 0x00c0, 0x44f1: 0x00c0, 0x44f2: 0x00c0, 0x44f3: 0x0040, 0x44f4: 0x0040, 0x44f5: 0x0040, + 0x44f6: 0x0040, 0x44f7: 0x0040, 0x44f8: 0x0040, 0x44f9: 0x0040, 0x44fa: 0x0040, 0x44fb: 0x00c3, + 0x44fc: 0x00c3, 0x44fd: 0x00c3, 0x44fe: 0x00c3, 0x44ff: 0x00c3, + // Block 0x114, offset 0x4500 + 0x4500: 0x00c3, 0x4501: 0x00c3, 0x4502: 0x00c3, 0x4503: 0x0080, 0x4504: 0x0080, 0x4505: 0x00c3, + 0x4506: 0x00c3, 0x4507: 0x00c3, 0x4508: 0x00c3, 0x4509: 0x00c3, 0x450a: 0x00c3, 0x450b: 0x00c3, + 0x450c: 0x0080, 0x450d: 0x0080, 0x450e: 0x0080, 0x450f: 0x0080, 0x4510: 0x0080, 0x4511: 0x0080, + 0x4512: 0x0080, 0x4513: 0x0080, 0x4514: 0x0080, 0x4515: 0x0080, 0x4516: 0x0080, 0x4517: 0x0080, + 0x4518: 0x0080, 0x4519: 0x0080, 0x451a: 0x0080, 0x451b: 0x0080, 0x451c: 0x0080, 0x451d: 0x0080, + 0x451e: 0x0080, 0x451f: 0x0080, 0x4520: 0x0080, 0x4521: 0x0080, 0x4522: 0x0080, 0x4523: 0x0080, + 0x4524: 0x0080, 0x4525: 0x0080, 0x4526: 0x0080, 0x4527: 0x0080, 0x4528: 0x0080, 0x4529: 0x0080, + 0x452a: 0x00c3, 0x452b: 0x00c3, 0x452c: 0x00c3, 0x452d: 0x00c3, 0x452e: 0x0080, 0x452f: 0x0080, + 0x4530: 0x0080, 0x4531: 0x0080, 0x4532: 0x0080, 0x4533: 0x0080, 0x4534: 0x0080, 0x4535: 0x0080, + 0x4536: 0x0080, 0x4537: 0x0080, 0x4538: 0x0080, 0x4539: 0x0080, 0x453a: 0x0080, 0x453b: 0x0080, + 0x453c: 0x0080, 0x453d: 0x0080, 0x453e: 0x0080, 0x453f: 0x0080, + // Block 0x115, offset 0x4540 + 0x4540: 0x0080, 0x4541: 0x0080, 0x4542: 0x0080, 0x4543: 0x0080, 0x4544: 0x0080, 0x4545: 0x0080, + 0x4546: 0x0080, 0x4547: 0x0080, 0x4548: 0x0080, 0x4549: 0x0080, 0x454a: 0x0080, 0x454b: 0x0080, + 0x454c: 0x0080, 0x454d: 0x0080, 0x454e: 0x0080, 0x454f: 0x0080, 0x4550: 0x0080, 0x4551: 0x0080, + 0x4552: 0x0080, 0x4553: 0x0080, 0x4554: 0x0080, 0x4555: 0x0080, 0x4556: 0x0080, 0x4557: 0x0080, + 0x4558: 0x0080, 0x4559: 0x0080, 0x455a: 0x0080, 0x455b: 0x0080, 0x455c: 0x0080, 0x455d: 0x0080, + 0x455e: 0x0080, 0x455f: 0x0080, 0x4560: 0x0080, 0x4561: 0x0080, 0x4562: 0x0080, 0x4563: 0x0080, + 0x4564: 0x0080, 0x4565: 0x0080, 0x4566: 0x0080, 0x4567: 0x0080, 0x4568: 0x0080, + // Block 0x116, offset 0x4580 + 0x4580: 0x0088, 0x4581: 0x0088, 0x4582: 0x00c9, 0x4583: 0x00c9, 0x4584: 0x00c9, 0x4585: 0x0088, + // Block 0x117, offset 0x45c0 + 0x45c0: 0x0080, 0x45c1: 0x0080, 0x45c2: 0x0080, 0x45c3: 0x0080, 0x45c4: 0x0080, 0x45c5: 0x0080, + 0x45c6: 0x0080, 0x45c7: 0x0080, 0x45c8: 0x0080, 0x45c9: 0x0080, 0x45ca: 0x0080, 0x45cb: 0x0080, + 0x45cc: 0x0080, 0x45cd: 0x0080, 0x45ce: 0x0080, 0x45cf: 0x0080, 0x45d0: 0x0080, 0x45d1: 0x0080, + 0x45d2: 0x0080, 0x45d3: 0x0080, 0x45d4: 0x0080, 0x45d5: 0x0080, 0x45d6: 0x0080, + 0x45e0: 0x0080, 0x45e1: 0x0080, 0x45e2: 0x0080, 0x45e3: 0x0080, + 0x45e4: 0x0080, 0x45e5: 0x0080, 0x45e6: 0x0080, 0x45e7: 0x0080, 0x45e8: 0x0080, 0x45e9: 0x0080, + 0x45ea: 0x0080, 0x45eb: 0x0080, 0x45ec: 0x0080, 0x45ed: 0x0080, 0x45ee: 0x0080, 0x45ef: 0x0080, + 0x45f0: 0x0080, 0x45f1: 0x0080, + // Block 0x118, offset 0x4600 + 0x4600: 0x0080, 0x4601: 0x0080, 0x4602: 0x0080, 0x4603: 0x0080, 0x4604: 0x0080, 0x4605: 0x0080, + 0x4606: 0x0080, 0x4607: 0x0080, 0x4608: 0x0080, 0x4609: 0x0080, 0x460a: 0x0080, 0x460b: 0x0080, + 0x460c: 0x0080, 0x460d: 0x0080, 0x460e: 0x0080, 0x460f: 0x0080, 0x4610: 0x0080, 0x4611: 0x0080, + 0x4612: 0x0080, 0x4613: 0x0080, 0x4614: 0x0080, 0x4616: 0x0080, 0x4617: 0x0080, + 0x4618: 0x0080, 0x4619: 0x0080, 0x461a: 0x0080, 0x461b: 0x0080, 0x461c: 0x0080, 0x461d: 0x0080, + 0x461e: 0x0080, 0x461f: 0x0080, 0x4620: 0x0080, 0x4621: 0x0080, 0x4622: 0x0080, 0x4623: 0x0080, + 0x4624: 0x0080, 0x4625: 0x0080, 0x4626: 0x0080, 0x4627: 0x0080, 0x4628: 0x0080, 0x4629: 0x0080, + 0x462a: 0x0080, 0x462b: 0x0080, 0x462c: 0x0080, 0x462d: 0x0080, 0x462e: 0x0080, 0x462f: 0x0080, + 0x4630: 0x0080, 0x4631: 0x0080, 0x4632: 0x0080, 0x4633: 0x0080, 0x4634: 0x0080, 0x4635: 0x0080, + 0x4636: 0x0080, 0x4637: 0x0080, 0x4638: 0x0080, 0x4639: 0x0080, 0x463a: 0x0080, 0x463b: 0x0080, + 0x463c: 0x0080, 0x463d: 0x0080, 0x463e: 0x0080, 0x463f: 0x0080, + // Block 0x119, offset 0x4640 + 0x4640: 0x0080, 0x4641: 0x0080, 0x4642: 0x0080, 0x4643: 0x0080, 0x4644: 0x0080, 0x4645: 0x0080, + 0x4646: 0x0080, 0x4647: 0x0080, 0x4648: 0x0080, 0x4649: 0x0080, 0x464a: 0x0080, 0x464b: 0x0080, + 0x464c: 0x0080, 0x464d: 0x0080, 0x464e: 0x0080, 0x464f: 0x0080, 0x4650: 0x0080, 0x4651: 0x0080, + 0x4652: 0x0080, 0x4653: 0x0080, 0x4654: 0x0080, 0x4655: 0x0080, 0x4656: 0x0080, 0x4657: 0x0080, + 0x4658: 0x0080, 0x4659: 0x0080, 0x465a: 0x0080, 0x465b: 0x0080, 0x465c: 0x0080, + 0x465e: 0x0080, 0x465f: 0x0080, 0x4662: 0x0080, + 0x4665: 0x0080, 0x4666: 0x0080, 0x4669: 0x0080, + 0x466a: 0x0080, 0x466b: 0x0080, 0x466c: 0x0080, 0x466e: 0x0080, 0x466f: 0x0080, + 0x4670: 0x0080, 0x4671: 0x0080, 0x4672: 0x0080, 0x4673: 0x0080, 0x4674: 0x0080, 0x4675: 0x0080, + 0x4676: 0x0080, 0x4677: 0x0080, 0x4678: 0x0080, 0x4679: 0x0080, 0x467b: 0x0080, + 0x467d: 0x0080, 0x467e: 0x0080, 0x467f: 0x0080, + // Block 0x11a, offset 0x4680 + 0x4680: 0x0080, 0x4681: 0x0080, 0x4682: 0x0080, 0x4683: 0x0080, 0x4685: 0x0080, + 0x4686: 0x0080, 0x4687: 0x0080, 0x4688: 0x0080, 0x4689: 0x0080, 0x468a: 0x0080, 0x468b: 0x0080, + 0x468c: 0x0080, 0x468d: 0x0080, 0x468e: 0x0080, 0x468f: 0x0080, 0x4690: 0x0080, 0x4691: 0x0080, + 0x4692: 0x0080, 0x4693: 0x0080, 0x4694: 0x0080, 0x4695: 0x0080, 0x4696: 0x0080, 0x4697: 0x0080, + 0x4698: 0x0080, 0x4699: 0x0080, 0x469a: 0x0080, 0x469b: 0x0080, 0x469c: 0x0080, 0x469d: 0x0080, + 0x469e: 0x0080, 0x469f: 0x0080, 0x46a0: 0x0080, 0x46a1: 0x0080, 0x46a2: 0x0080, 0x46a3: 0x0080, + 0x46a4: 0x0080, 0x46a5: 0x0080, 0x46a6: 0x0080, 0x46a7: 0x0080, 0x46a8: 0x0080, 0x46a9: 0x0080, + 0x46aa: 0x0080, 0x46ab: 0x0080, 0x46ac: 0x0080, 0x46ad: 0x0080, 0x46ae: 0x0080, 0x46af: 0x0080, + 0x46b0: 0x0080, 0x46b1: 0x0080, 0x46b2: 0x0080, 0x46b3: 0x0080, 0x46b4: 0x0080, 0x46b5: 0x0080, + 0x46b6: 0x0080, 0x46b7: 0x0080, 0x46b8: 0x0080, 0x46b9: 0x0080, 0x46ba: 0x0080, 0x46bb: 0x0080, + 0x46bc: 0x0080, 0x46bd: 0x0080, 0x46be: 0x0080, 0x46bf: 0x0080, + // Block 0x11b, offset 0x46c0 + 0x46c0: 0x0080, 0x46c1: 0x0080, 0x46c2: 0x0080, 0x46c3: 0x0080, 0x46c4: 0x0080, 0x46c5: 0x0080, + 0x46c7: 0x0080, 0x46c8: 0x0080, 0x46c9: 0x0080, 0x46ca: 0x0080, + 0x46cd: 0x0080, 0x46ce: 0x0080, 0x46cf: 0x0080, 0x46d0: 0x0080, 0x46d1: 0x0080, + 0x46d2: 0x0080, 0x46d3: 0x0080, 0x46d4: 0x0080, 0x46d6: 0x0080, 0x46d7: 0x0080, + 0x46d8: 0x0080, 0x46d9: 0x0080, 0x46da: 0x0080, 0x46db: 0x0080, 0x46dc: 0x0080, + 0x46de: 0x0080, 0x46df: 0x0080, 0x46e0: 0x0080, 0x46e1: 0x0080, 0x46e2: 0x0080, 0x46e3: 0x0080, + 0x46e4: 0x0080, 0x46e5: 0x0080, 0x46e6: 0x0080, 0x46e7: 0x0080, 0x46e8: 0x0080, 0x46e9: 0x0080, + 0x46ea: 0x0080, 0x46eb: 0x0080, 0x46ec: 0x0080, 0x46ed: 0x0080, 0x46ee: 0x0080, 0x46ef: 0x0080, + 0x46f0: 0x0080, 0x46f1: 0x0080, 0x46f2: 0x0080, 0x46f3: 0x0080, 0x46f4: 0x0080, 0x46f5: 0x0080, + 0x46f6: 0x0080, 0x46f7: 0x0080, 0x46f8: 0x0080, 0x46f9: 0x0080, 0x46fb: 0x0080, + 0x46fc: 0x0080, 0x46fd: 0x0080, 0x46fe: 0x0080, + // Block 0x11c, offset 0x4700 + 0x4700: 0x0080, 0x4701: 0x0080, 0x4702: 0x0080, 0x4703: 0x0080, 0x4704: 0x0080, + 0x4706: 0x0080, 0x470a: 0x0080, 0x470b: 0x0080, + 0x470c: 0x0080, 0x470d: 0x0080, 0x470e: 0x0080, 0x470f: 0x0080, 0x4710: 0x0080, + 0x4712: 0x0080, 0x4713: 0x0080, 0x4714: 0x0080, 0x4715: 0x0080, 0x4716: 0x0080, 0x4717: 0x0080, + 0x4718: 0x0080, 0x4719: 0x0080, 0x471a: 0x0080, 0x471b: 0x0080, 0x471c: 0x0080, 0x471d: 0x0080, + 0x471e: 0x0080, 0x471f: 0x0080, 0x4720: 0x0080, 0x4721: 0x0080, 0x4722: 0x0080, 0x4723: 0x0080, + 0x4724: 0x0080, 0x4725: 0x0080, 0x4726: 0x0080, 0x4727: 0x0080, 0x4728: 0x0080, 0x4729: 0x0080, + 0x472a: 0x0080, 0x472b: 0x0080, 0x472c: 0x0080, 0x472d: 0x0080, 0x472e: 0x0080, 0x472f: 0x0080, + 0x4730: 0x0080, 0x4731: 0x0080, 0x4732: 0x0080, 0x4733: 0x0080, 0x4734: 0x0080, 0x4735: 0x0080, + 0x4736: 0x0080, 0x4737: 0x0080, 0x4738: 0x0080, 0x4739: 0x0080, 0x473a: 0x0080, 0x473b: 0x0080, + 0x473c: 0x0080, 0x473d: 0x0080, 0x473e: 0x0080, 0x473f: 0x0080, + // Block 0x11d, offset 0x4740 + 0x4740: 0x0080, 0x4741: 0x0080, 0x4742: 0x0080, 0x4743: 0x0080, 0x4744: 0x0080, 0x4745: 0x0080, + 0x4746: 0x0080, 0x4747: 0x0080, 0x4748: 0x0080, 0x4749: 0x0080, 0x474a: 0x0080, 0x474b: 0x0080, + 0x474c: 0x0080, 0x474d: 0x0080, 0x474e: 0x0080, 0x474f: 0x0080, 0x4750: 0x0080, 0x4751: 0x0080, + 0x4752: 0x0080, 0x4753: 0x0080, 0x4754: 0x0080, 0x4755: 0x0080, 0x4756: 0x0080, 0x4757: 0x0080, + 0x4758: 0x0080, 0x4759: 0x0080, 0x475a: 0x0080, 0x475b: 0x0080, 0x475c: 0x0080, 0x475d: 0x0080, + 0x475e: 0x0080, 0x475f: 0x0080, 0x4760: 0x0080, 0x4761: 0x0080, 0x4762: 0x0080, 0x4763: 0x0080, + 0x4764: 0x0080, 0x4765: 0x0080, 0x4768: 0x0080, 0x4769: 0x0080, + 0x476a: 0x0080, 0x476b: 0x0080, 0x476c: 0x0080, 0x476d: 0x0080, 0x476e: 0x0080, 0x476f: 0x0080, + 0x4770: 0x0080, 0x4771: 0x0080, 0x4772: 0x0080, 0x4773: 0x0080, 0x4774: 0x0080, 0x4775: 0x0080, + 0x4776: 0x0080, 0x4777: 0x0080, 0x4778: 0x0080, 0x4779: 0x0080, 0x477a: 0x0080, 0x477b: 0x0080, + 0x477c: 0x0080, 0x477d: 0x0080, 0x477e: 0x0080, 0x477f: 0x0080, + // Block 0x11e, offset 0x4780 + 0x4780: 0x0080, 0x4781: 0x0080, 0x4782: 0x0080, 0x4783: 0x0080, 0x4784: 0x0080, 0x4785: 0x0080, + 0x4786: 0x0080, 0x4787: 0x0080, 0x4788: 0x0080, 0x4789: 0x0080, 0x478a: 0x0080, 0x478b: 0x0080, + 0x478e: 0x0080, 0x478f: 0x0080, 0x4790: 0x0080, 0x4791: 0x0080, + 0x4792: 0x0080, 0x4793: 0x0080, 0x4794: 0x0080, 0x4795: 0x0080, 0x4796: 0x0080, 0x4797: 0x0080, + 0x4798: 0x0080, 0x4799: 0x0080, 0x479a: 0x0080, 0x479b: 0x0080, 0x479c: 0x0080, 0x479d: 0x0080, + 0x479e: 0x0080, 0x479f: 0x0080, 0x47a0: 0x0080, 0x47a1: 0x0080, 0x47a2: 0x0080, 0x47a3: 0x0080, + 0x47a4: 0x0080, 0x47a5: 0x0080, 0x47a6: 0x0080, 0x47a7: 0x0080, 0x47a8: 0x0080, 0x47a9: 0x0080, + 0x47aa: 0x0080, 0x47ab: 0x0080, 0x47ac: 0x0080, 0x47ad: 0x0080, 0x47ae: 0x0080, 0x47af: 0x0080, + 0x47b0: 0x0080, 0x47b1: 0x0080, 0x47b2: 0x0080, 0x47b3: 0x0080, 0x47b4: 0x0080, 0x47b5: 0x0080, + 0x47b6: 0x0080, 0x47b7: 0x0080, 0x47b8: 0x0080, 0x47b9: 0x0080, 0x47ba: 0x0080, 0x47bb: 0x0080, + 0x47bc: 0x0080, 0x47bd: 0x0080, 0x47be: 0x0080, 0x47bf: 0x0080, + // Block 0x11f, offset 0x47c0 + 0x47c0: 0x00c3, 0x47c1: 0x00c3, 0x47c2: 0x00c3, 0x47c3: 0x00c3, 0x47c4: 0x00c3, 0x47c5: 0x00c3, + 0x47c6: 0x00c3, 0x47c7: 0x00c3, 0x47c8: 0x00c3, 0x47c9: 0x00c3, 0x47ca: 0x00c3, 0x47cb: 0x00c3, + 0x47cc: 0x00c3, 0x47cd: 0x00c3, 0x47ce: 0x00c3, 0x47cf: 0x00c3, 0x47d0: 0x00c3, 0x47d1: 0x00c3, + 0x47d2: 0x00c3, 0x47d3: 0x00c3, 0x47d4: 0x00c3, 0x47d5: 0x00c3, 0x47d6: 0x00c3, 0x47d7: 0x00c3, + 0x47d8: 0x00c3, 0x47d9: 0x00c3, 0x47da: 0x00c3, 0x47db: 0x00c3, 0x47dc: 0x00c3, 0x47dd: 0x00c3, + 0x47de: 0x00c3, 0x47df: 0x00c3, 0x47e0: 0x00c3, 0x47e1: 0x00c3, 0x47e2: 0x00c3, 0x47e3: 0x00c3, + 0x47e4: 0x00c3, 0x47e5: 0x00c3, 0x47e6: 0x00c3, 0x47e7: 0x00c3, 0x47e8: 0x00c3, 0x47e9: 0x00c3, + 0x47ea: 0x00c3, 0x47eb: 0x00c3, 0x47ec: 0x00c3, 0x47ed: 0x00c3, 0x47ee: 0x00c3, 0x47ef: 0x00c3, + 0x47f0: 0x00c3, 0x47f1: 0x00c3, 0x47f2: 0x00c3, 0x47f3: 0x00c3, 0x47f4: 0x00c3, 0x47f5: 0x00c3, + 0x47f6: 0x00c3, 0x47f7: 0x0080, 0x47f8: 0x0080, 0x47f9: 0x0080, 0x47fa: 0x0080, 0x47fb: 0x00c3, + 0x47fc: 0x00c3, 0x47fd: 0x00c3, 0x47fe: 0x00c3, 0x47ff: 0x00c3, + // Block 0x120, offset 0x4800 + 0x4800: 0x00c3, 0x4801: 0x00c3, 0x4802: 0x00c3, 0x4803: 0x00c3, 0x4804: 0x00c3, 0x4805: 0x00c3, + 0x4806: 0x00c3, 0x4807: 0x00c3, 0x4808: 0x00c3, 0x4809: 0x00c3, 0x480a: 0x00c3, 0x480b: 0x00c3, + 0x480c: 0x00c3, 0x480d: 0x00c3, 0x480e: 0x00c3, 0x480f: 0x00c3, 0x4810: 0x00c3, 0x4811: 0x00c3, + 0x4812: 0x00c3, 0x4813: 0x00c3, 0x4814: 0x00c3, 0x4815: 0x00c3, 0x4816: 0x00c3, 0x4817: 0x00c3, + 0x4818: 0x00c3, 0x4819: 0x00c3, 0x481a: 0x00c3, 0x481b: 0x00c3, 0x481c: 0x00c3, 0x481d: 0x00c3, + 0x481e: 0x00c3, 0x481f: 0x00c3, 0x4820: 0x00c3, 0x4821: 0x00c3, 0x4822: 0x00c3, 0x4823: 0x00c3, + 0x4824: 0x00c3, 0x4825: 0x00c3, 0x4826: 0x00c3, 0x4827: 0x00c3, 0x4828: 0x00c3, 0x4829: 0x00c3, + 0x482a: 0x00c3, 0x482b: 0x00c3, 0x482c: 0x00c3, 0x482d: 0x0080, 0x482e: 0x0080, 0x482f: 0x0080, + 0x4830: 0x0080, 0x4831: 0x0080, 0x4832: 0x0080, 0x4833: 0x0080, 0x4834: 0x0080, 0x4835: 0x00c3, + 0x4836: 0x0080, 0x4837: 0x0080, 0x4838: 0x0080, 0x4839: 0x0080, 0x483a: 0x0080, 0x483b: 0x0080, + 0x483c: 0x0080, 0x483d: 0x0080, 0x483e: 0x0080, 0x483f: 0x0080, + // Block 0x121, offset 0x4840 + 0x4840: 0x0080, 0x4841: 0x0080, 0x4842: 0x0080, 0x4843: 0x0080, 0x4844: 0x00c3, 0x4845: 0x0080, + 0x4846: 0x0080, 0x4847: 0x0080, 0x4848: 0x0080, 0x4849: 0x0080, 0x484a: 0x0080, 0x484b: 0x0080, + 0x485b: 0x00c3, 0x485c: 0x00c3, 0x485d: 0x00c3, + 0x485e: 0x00c3, 0x485f: 0x00c3, 0x4861: 0x00c3, 0x4862: 0x00c3, 0x4863: 0x00c3, + 0x4864: 0x00c3, 0x4865: 0x00c3, 0x4866: 0x00c3, 0x4867: 0x00c3, 0x4868: 0x00c3, 0x4869: 0x00c3, + 0x486a: 0x00c3, 0x486b: 0x00c3, 0x486c: 0x00c3, 0x486d: 0x00c3, 0x486e: 0x00c3, 0x486f: 0x00c3, + // Block 0x122, offset 0x4880 + 0x4880: 0x00c3, 0x4881: 0x00c3, 0x4882: 0x00c3, 0x4883: 0x00c3, 0x4884: 0x00c3, 0x4885: 0x00c3, + 0x4886: 0x00c3, 0x4888: 0x00c3, 0x4889: 0x00c3, 0x488a: 0x00c3, 0x488b: 0x00c3, + 0x488c: 0x00c3, 0x488d: 0x00c3, 0x488e: 0x00c3, 0x488f: 0x00c3, 0x4890: 0x00c3, 0x4891: 0x00c3, + 0x4892: 0x00c3, 0x4893: 0x00c3, 0x4894: 0x00c3, 0x4895: 0x00c3, 0x4896: 0x00c3, 0x4897: 0x00c3, + 0x4898: 0x00c3, 0x489b: 0x00c3, 0x489c: 0x00c3, 0x489d: 0x00c3, + 0x489e: 0x00c3, 0x489f: 0x00c3, 0x48a0: 0x00c3, 0x48a1: 0x00c3, 0x48a3: 0x00c3, + 0x48a4: 0x00c3, 0x48a6: 0x00c3, 0x48a7: 0x00c3, 0x48a8: 0x00c3, 0x48a9: 0x00c3, + 0x48aa: 0x00c3, + // Block 0x123, offset 0x48c0 + 0x48c0: 0x00c0, 0x48c1: 0x00c0, 0x48c2: 0x00c0, 0x48c3: 0x00c0, 0x48c4: 0x00c0, + 0x48c7: 0x0080, 0x48c8: 0x0080, 0x48c9: 0x0080, 0x48ca: 0x0080, 0x48cb: 0x0080, + 0x48cc: 0x0080, 0x48cd: 0x0080, 0x48ce: 0x0080, 0x48cf: 0x0080, 0x48d0: 0x00c3, 0x48d1: 0x00c3, + 0x48d2: 0x00c3, 0x48d3: 0x00c3, 0x48d4: 0x00c3, 0x48d5: 0x00c3, 0x48d6: 0x00c3, + // Block 0x124, offset 0x4900 + 0x4900: 0x00c2, 0x4901: 0x00c2, 0x4902: 0x00c2, 0x4903: 0x00c2, 0x4904: 0x00c2, 0x4905: 0x00c2, + 0x4906: 0x00c2, 0x4907: 0x00c2, 0x4908: 0x00c2, 0x4909: 0x00c2, 0x490a: 0x00c2, 0x490b: 0x00c2, + 0x490c: 0x00c2, 0x490d: 0x00c2, 0x490e: 0x00c2, 0x490f: 0x00c2, 0x4910: 0x00c2, 0x4911: 0x00c2, + 0x4912: 0x00c2, 0x4913: 0x00c2, 0x4914: 0x00c2, 0x4915: 0x00c2, 0x4916: 0x00c2, 0x4917: 0x00c2, + 0x4918: 0x00c2, 0x4919: 0x00c2, 0x491a: 0x00c2, 0x491b: 0x00c2, 0x491c: 0x00c2, 0x491d: 0x00c2, + 0x491e: 0x00c2, 0x491f: 0x00c2, 0x4920: 0x00c2, 0x4921: 0x00c2, 0x4922: 0x00c2, 0x4923: 0x00c2, + 0x4924: 0x00c2, 0x4925: 0x00c2, 0x4926: 0x00c2, 0x4927: 0x00c2, 0x4928: 0x00c2, 0x4929: 0x00c2, + 0x492a: 0x00c2, 0x492b: 0x00c2, 0x492c: 0x00c2, 0x492d: 0x00c2, 0x492e: 0x00c2, 0x492f: 0x00c2, + 0x4930: 0x00c2, 0x4931: 0x00c2, 0x4932: 0x00c2, 0x4933: 0x00c2, 0x4934: 0x00c2, 0x4935: 0x00c2, + 0x4936: 0x00c2, 0x4937: 0x00c2, 0x4938: 0x00c2, 0x4939: 0x00c2, 0x493a: 0x00c2, 0x493b: 0x00c2, + 0x493c: 0x00c2, 0x493d: 0x00c2, 0x493e: 0x00c2, 0x493f: 0x00c2, + // Block 0x125, offset 0x4940 + 0x4940: 0x00c2, 0x4941: 0x00c2, 0x4942: 0x00c2, 0x4943: 0x00c2, 0x4944: 0x00c3, 0x4945: 0x00c3, + 0x4946: 0x00c3, 0x4947: 0x00c3, 0x4948: 0x00c3, 0x4949: 0x00c3, 0x494a: 0x00c3, + 0x4950: 0x00c0, 0x4951: 0x00c0, + 0x4952: 0x00c0, 0x4953: 0x00c0, 0x4954: 0x00c0, 0x4955: 0x00c0, 0x4956: 0x00c0, 0x4957: 0x00c0, + 0x4958: 0x00c0, 0x4959: 0x00c0, + 0x495e: 0x0080, 0x495f: 0x0080, + // Block 0x126, offset 0x4980 + 0x4980: 0x0080, 0x4981: 0x0080, 0x4982: 0x0080, 0x4983: 0x0080, 0x4985: 0x0080, + 0x4986: 0x0080, 0x4987: 0x0080, 0x4988: 0x0080, 0x4989: 0x0080, 0x498a: 0x0080, 0x498b: 0x0080, + 0x498c: 0x0080, 0x498d: 0x0080, 0x498e: 0x0080, 0x498f: 0x0080, 0x4990: 0x0080, 0x4991: 0x0080, + 0x4992: 0x0080, 0x4993: 0x0080, 0x4994: 0x0080, 0x4995: 0x0080, 0x4996: 0x0080, 0x4997: 0x0080, + 0x4998: 0x0080, 0x4999: 0x0080, 0x499a: 0x0080, 0x499b: 0x0080, 0x499c: 0x0080, 0x499d: 0x0080, + 0x499e: 0x0080, 0x499f: 0x0080, 0x49a1: 0x0080, 0x49a2: 0x0080, + 0x49a4: 0x0080, 0x49a7: 0x0080, 0x49a9: 0x0080, + 0x49aa: 0x0080, 0x49ab: 0x0080, 0x49ac: 0x0080, 0x49ad: 0x0080, 0x49ae: 0x0080, 0x49af: 0x0080, + 0x49b0: 0x0080, 0x49b1: 0x0080, 0x49b2: 0x0080, 0x49b4: 0x0080, 0x49b5: 0x0080, + 0x49b6: 0x0080, 0x49b7: 0x0080, 0x49b9: 0x0080, 0x49bb: 0x0080, + // Block 0x127, offset 0x49c0 + 0x49c2: 0x0080, + 0x49c7: 0x0080, 0x49c9: 0x0080, 0x49cb: 0x0080, + 0x49cd: 0x0080, 0x49ce: 0x0080, 0x49cf: 0x0080, 0x49d1: 0x0080, + 0x49d2: 0x0080, 0x49d4: 0x0080, 0x49d7: 0x0080, + 0x49d9: 0x0080, 0x49db: 0x0080, 0x49dd: 0x0080, + 0x49df: 0x0080, 0x49e1: 0x0080, 0x49e2: 0x0080, + 0x49e4: 0x0080, 0x49e7: 0x0080, 0x49e8: 0x0080, 0x49e9: 0x0080, + 0x49ea: 0x0080, 0x49ec: 0x0080, 0x49ed: 0x0080, 0x49ee: 0x0080, 0x49ef: 0x0080, + 0x49f0: 0x0080, 0x49f1: 0x0080, 0x49f2: 0x0080, 0x49f4: 0x0080, 0x49f5: 0x0080, + 0x49f6: 0x0080, 0x49f7: 0x0080, 0x49f9: 0x0080, 0x49fa: 0x0080, 0x49fb: 0x0080, + 0x49fc: 0x0080, 0x49fe: 0x0080, + // Block 0x128, offset 0x4a00 + 0x4a00: 0x0080, 0x4a01: 0x0080, 0x4a02: 0x0080, 0x4a03: 0x0080, 0x4a04: 0x0080, 0x4a05: 0x0080, + 0x4a06: 0x0080, 0x4a07: 0x0080, 0x4a08: 0x0080, 0x4a09: 0x0080, 0x4a0b: 0x0080, + 0x4a0c: 0x0080, 0x4a0d: 0x0080, 0x4a0e: 0x0080, 0x4a0f: 0x0080, 0x4a10: 0x0080, 0x4a11: 0x0080, + 0x4a12: 0x0080, 0x4a13: 0x0080, 0x4a14: 0x0080, 0x4a15: 0x0080, 0x4a16: 0x0080, 0x4a17: 0x0080, + 0x4a18: 0x0080, 0x4a19: 0x0080, 0x4a1a: 0x0080, 0x4a1b: 0x0080, + 0x4a21: 0x0080, 0x4a22: 0x0080, 0x4a23: 0x0080, + 0x4a25: 0x0080, 0x4a26: 0x0080, 0x4a27: 0x0080, 0x4a28: 0x0080, 0x4a29: 0x0080, + 0x4a2b: 0x0080, 0x4a2c: 0x0080, 0x4a2d: 0x0080, 0x4a2e: 0x0080, 0x4a2f: 0x0080, + 0x4a30: 0x0080, 0x4a31: 0x0080, 0x4a32: 0x0080, 0x4a33: 0x0080, 0x4a34: 0x0080, 0x4a35: 0x0080, + 0x4a36: 0x0080, 0x4a37: 0x0080, 0x4a38: 0x0080, 0x4a39: 0x0080, 0x4a3a: 0x0080, 0x4a3b: 0x0080, + // Block 0x129, offset 0x4a40 + 0x4a70: 0x0080, 0x4a71: 0x0080, + // Block 0x12a, offset 0x4a80 + 0x4a80: 0x0080, 0x4a81: 0x0080, 0x4a82: 0x0080, 0x4a83: 0x0080, 0x4a84: 0x0080, 0x4a85: 0x0080, + 0x4a86: 0x0080, 0x4a87: 0x0080, 0x4a88: 0x0080, 0x4a89: 0x0080, 0x4a8a: 0x0080, 0x4a8b: 0x0080, + 0x4a8c: 0x0080, 0x4a8d: 0x0080, 0x4a8e: 0x0080, 0x4a8f: 0x0080, 0x4a90: 0x0080, 0x4a91: 0x0080, + 0x4a92: 0x0080, 0x4a93: 0x0080, 0x4a94: 0x0080, 0x4a95: 0x0080, 0x4a96: 0x0080, 0x4a97: 0x0080, + 0x4a98: 0x0080, 0x4a99: 0x0080, 0x4a9a: 0x0080, 0x4a9b: 0x0080, 0x4a9c: 0x0080, 0x4a9d: 0x0080, + 0x4a9e: 0x0080, 0x4a9f: 0x0080, 0x4aa0: 0x0080, 0x4aa1: 0x0080, 0x4aa2: 0x0080, 0x4aa3: 0x0080, + 0x4aa4: 0x0080, 0x4aa5: 0x0080, 0x4aa6: 0x0080, 0x4aa7: 0x0080, 0x4aa8: 0x0080, 0x4aa9: 0x0080, + 0x4aaa: 0x0080, 0x4aab: 0x0080, + 0x4ab0: 0x0080, 0x4ab1: 0x0080, 0x4ab2: 0x0080, 0x4ab3: 0x0080, 0x4ab4: 0x0080, 0x4ab5: 0x0080, + 0x4ab6: 0x0080, 0x4ab7: 0x0080, 0x4ab8: 0x0080, 0x4ab9: 0x0080, 0x4aba: 0x0080, 0x4abb: 0x0080, + 0x4abc: 0x0080, 0x4abd: 0x0080, 0x4abe: 0x0080, 0x4abf: 0x0080, + // Block 0x12b, offset 0x4ac0 + 0x4ac0: 0x0080, 0x4ac1: 0x0080, 0x4ac2: 0x0080, 0x4ac3: 0x0080, 0x4ac4: 0x0080, 0x4ac5: 0x0080, + 0x4ac6: 0x0080, 0x4ac7: 0x0080, 0x4ac8: 0x0080, 0x4ac9: 0x0080, 0x4aca: 0x0080, 0x4acb: 0x0080, + 0x4acc: 0x0080, 0x4acd: 0x0080, 0x4ace: 0x0080, 0x4acf: 0x0080, 0x4ad0: 0x0080, 0x4ad1: 0x0080, + 0x4ad2: 0x0080, 0x4ad3: 0x0080, + 0x4ae0: 0x0080, 0x4ae1: 0x0080, 0x4ae2: 0x0080, 0x4ae3: 0x0080, + 0x4ae4: 0x0080, 0x4ae5: 0x0080, 0x4ae6: 0x0080, 0x4ae7: 0x0080, 0x4ae8: 0x0080, 0x4ae9: 0x0080, + 0x4aea: 0x0080, 0x4aeb: 0x0080, 0x4aec: 0x0080, 0x4aed: 0x0080, 0x4aee: 0x0080, + 0x4af1: 0x0080, 0x4af2: 0x0080, 0x4af3: 0x0080, 0x4af4: 0x0080, 0x4af5: 0x0080, + 0x4af6: 0x0080, 0x4af7: 0x0080, 0x4af8: 0x0080, 0x4af9: 0x0080, 0x4afa: 0x0080, 0x4afb: 0x0080, + 0x4afc: 0x0080, 0x4afd: 0x0080, 0x4afe: 0x0080, 0x4aff: 0x0080, + // Block 0x12c, offset 0x4b00 + 0x4b01: 0x0080, 0x4b02: 0x0080, 0x4b03: 0x0080, 0x4b04: 0x0080, 0x4b05: 0x0080, + 0x4b06: 0x0080, 0x4b07: 0x0080, 0x4b08: 0x0080, 0x4b09: 0x0080, 0x4b0a: 0x0080, 0x4b0b: 0x0080, + 0x4b0c: 0x0080, 0x4b0d: 0x0080, 0x4b0e: 0x0080, 0x4b0f: 0x0080, 0x4b11: 0x0080, + 0x4b12: 0x0080, 0x4b13: 0x0080, 0x4b14: 0x0080, 0x4b15: 0x0080, 0x4b16: 0x0080, 0x4b17: 0x0080, + 0x4b18: 0x0080, 0x4b19: 0x0080, 0x4b1a: 0x0080, 0x4b1b: 0x0080, 0x4b1c: 0x0080, 0x4b1d: 0x0080, + 0x4b1e: 0x0080, 0x4b1f: 0x0080, 0x4b20: 0x0080, 0x4b21: 0x0080, 0x4b22: 0x0080, 0x4b23: 0x0080, + 0x4b24: 0x0080, 0x4b25: 0x0080, 0x4b26: 0x0080, 0x4b27: 0x0080, 0x4b28: 0x0080, 0x4b29: 0x0080, + 0x4b2a: 0x0080, 0x4b2b: 0x0080, 0x4b2c: 0x0080, 0x4b2d: 0x0080, 0x4b2e: 0x0080, 0x4b2f: 0x0080, + 0x4b30: 0x0080, 0x4b31: 0x0080, 0x4b32: 0x0080, 0x4b33: 0x0080, 0x4b34: 0x0080, 0x4b35: 0x0080, + // Block 0x12d, offset 0x4b40 + 0x4b40: 0x0080, 0x4b41: 0x0080, 0x4b42: 0x0080, 0x4b43: 0x0080, 0x4b44: 0x0080, 0x4b45: 0x0080, + 0x4b46: 0x0080, 0x4b47: 0x0080, 0x4b48: 0x0080, 0x4b49: 0x0080, 0x4b4a: 0x0080, 0x4b4b: 0x0080, + 0x4b4c: 0x0080, 0x4b50: 0x0080, 0x4b51: 0x0080, + 0x4b52: 0x0080, 0x4b53: 0x0080, 0x4b54: 0x0080, 0x4b55: 0x0080, 0x4b56: 0x0080, 0x4b57: 0x0080, + 0x4b58: 0x0080, 0x4b59: 0x0080, 0x4b5a: 0x0080, 0x4b5b: 0x0080, 0x4b5c: 0x0080, 0x4b5d: 0x0080, + 0x4b5e: 0x0080, 0x4b5f: 0x0080, 0x4b60: 0x0080, 0x4b61: 0x0080, 0x4b62: 0x0080, 0x4b63: 0x0080, + 0x4b64: 0x0080, 0x4b65: 0x0080, 0x4b66: 0x0080, 0x4b67: 0x0080, 0x4b68: 0x0080, 0x4b69: 0x0080, + 0x4b6a: 0x0080, 0x4b6b: 0x0080, 0x4b6c: 0x0080, 0x4b6d: 0x0080, 0x4b6e: 0x0080, + 0x4b70: 0x0080, 0x4b71: 0x0080, 0x4b72: 0x0080, 0x4b73: 0x0080, 0x4b74: 0x0080, 0x4b75: 0x0080, + 0x4b76: 0x0080, 0x4b77: 0x0080, 0x4b78: 0x0080, 0x4b79: 0x0080, 0x4b7a: 0x0080, 0x4b7b: 0x0080, + 0x4b7c: 0x0080, 0x4b7d: 0x0080, 0x4b7e: 0x0080, 0x4b7f: 0x0080, + // Block 0x12e, offset 0x4b80 + 0x4b80: 0x0080, 0x4b81: 0x0080, 0x4b82: 0x0080, 0x4b83: 0x0080, 0x4b84: 0x0080, 0x4b85: 0x0080, + 0x4b86: 0x0080, 0x4b87: 0x0080, 0x4b88: 0x0080, 0x4b89: 0x0080, 0x4b8a: 0x0080, 0x4b8b: 0x0080, + 0x4b8c: 0x0080, 0x4b8d: 0x0080, 0x4b8e: 0x0080, 0x4b8f: 0x0080, 0x4b90: 0x0080, 0x4b91: 0x0080, + 0x4b92: 0x0080, 0x4b93: 0x0080, 0x4b94: 0x0080, 0x4b95: 0x0080, 0x4b96: 0x0080, 0x4b97: 0x0080, + 0x4b98: 0x0080, 0x4b99: 0x0080, 0x4b9a: 0x0080, 0x4b9b: 0x0080, 0x4b9c: 0x0080, 0x4b9d: 0x0080, + 0x4b9e: 0x0080, 0x4b9f: 0x0080, 0x4ba0: 0x0080, 0x4ba1: 0x0080, 0x4ba2: 0x0080, 0x4ba3: 0x0080, + 0x4ba4: 0x0080, 0x4ba5: 0x0080, 0x4ba6: 0x0080, 0x4ba7: 0x0080, 0x4ba8: 0x0080, 0x4ba9: 0x0080, + 0x4baa: 0x0080, 0x4bab: 0x0080, 0x4bac: 0x0080, + // Block 0x12f, offset 0x4bc0 + 0x4be6: 0x0080, 0x4be7: 0x0080, 0x4be8: 0x0080, 0x4be9: 0x0080, + 0x4bea: 0x0080, 0x4beb: 0x0080, 0x4bec: 0x0080, 0x4bed: 0x0080, 0x4bee: 0x0080, 0x4bef: 0x0080, + 0x4bf0: 0x0080, 0x4bf1: 0x0080, 0x4bf2: 0x0080, 0x4bf3: 0x0080, 0x4bf4: 0x0080, 0x4bf5: 0x0080, + 0x4bf6: 0x0080, 0x4bf7: 0x0080, 0x4bf8: 0x0080, 0x4bf9: 0x0080, 0x4bfa: 0x0080, 0x4bfb: 0x0080, + 0x4bfc: 0x0080, 0x4bfd: 0x0080, 0x4bfe: 0x0080, 0x4bff: 0x0080, + // Block 0x130, offset 0x4c00 + 0x4c00: 0x008c, 0x4c01: 0x0080, 0x4c02: 0x0080, + 0x4c10: 0x0080, 0x4c11: 0x0080, + 0x4c12: 0x0080, 0x4c13: 0x0080, 0x4c14: 0x0080, 0x4c15: 0x0080, 0x4c16: 0x0080, 0x4c17: 0x0080, + 0x4c18: 0x0080, 0x4c19: 0x0080, 0x4c1a: 0x0080, 0x4c1b: 0x0080, 0x4c1c: 0x0080, 0x4c1d: 0x0080, + 0x4c1e: 0x0080, 0x4c1f: 0x0080, 0x4c20: 0x0080, 0x4c21: 0x0080, 0x4c22: 0x0080, 0x4c23: 0x0080, + 0x4c24: 0x0080, 0x4c25: 0x0080, 0x4c26: 0x0080, 0x4c27: 0x0080, 0x4c28: 0x0080, 0x4c29: 0x0080, + 0x4c2a: 0x0080, 0x4c2b: 0x0080, 0x4c2c: 0x0080, 0x4c2d: 0x0080, 0x4c2e: 0x0080, 0x4c2f: 0x0080, + 0x4c30: 0x0080, 0x4c31: 0x0080, 0x4c32: 0x0080, 0x4c33: 0x0080, 0x4c34: 0x0080, 0x4c35: 0x0080, + 0x4c36: 0x0080, 0x4c37: 0x0080, 0x4c38: 0x0080, 0x4c39: 0x0080, 0x4c3a: 0x0080, 0x4c3b: 0x0080, + // Block 0x131, offset 0x4c40 + 0x4c40: 0x0080, 0x4c41: 0x0080, 0x4c42: 0x0080, 0x4c43: 0x0080, 0x4c44: 0x0080, 0x4c45: 0x0080, + 0x4c46: 0x0080, 0x4c47: 0x0080, 0x4c48: 0x0080, + 0x4c50: 0x0080, 0x4c51: 0x0080, + // Block 0x132, offset 0x4c80 + 0x4c80: 0x0080, 0x4c81: 0x0080, 0x4c82: 0x0080, 0x4c83: 0x0080, 0x4c84: 0x0080, 0x4c85: 0x0080, + 0x4c86: 0x0080, 0x4c87: 0x0080, 0x4c88: 0x0080, 0x4c89: 0x0080, 0x4c8a: 0x0080, 0x4c8b: 0x0080, + 0x4c8c: 0x0080, 0x4c8d: 0x0080, 0x4c8e: 0x0080, 0x4c8f: 0x0080, 0x4c90: 0x0080, 0x4c91: 0x0080, + 0x4c92: 0x0080, + 0x4ca0: 0x0080, 0x4ca1: 0x0080, 0x4ca2: 0x0080, 0x4ca3: 0x0080, + 0x4ca4: 0x0080, 0x4ca5: 0x0080, 0x4ca6: 0x0080, 0x4ca7: 0x0080, 0x4ca8: 0x0080, 0x4ca9: 0x0080, + 0x4caa: 0x0080, 0x4cab: 0x0080, 0x4cac: 0x0080, + 0x4cb0: 0x0080, 0x4cb1: 0x0080, 0x4cb2: 0x0080, 0x4cb3: 0x0080, 0x4cb4: 0x0080, 0x4cb5: 0x0080, + 0x4cb6: 0x0080, + // Block 0x133, offset 0x4cc0 + 0x4cc0: 0x0080, 0x4cc1: 0x0080, 0x4cc2: 0x0080, 0x4cc3: 0x0080, 0x4cc4: 0x0080, 0x4cc5: 0x0080, + 0x4cc6: 0x0080, 0x4cc7: 0x0080, 0x4cc8: 0x0080, 0x4cc9: 0x0080, 0x4cca: 0x0080, 0x4ccb: 0x0080, + 0x4ccc: 0x0080, 0x4ccd: 0x0080, 0x4cce: 0x0080, 0x4ccf: 0x0080, 0x4cd0: 0x0080, 0x4cd1: 0x0080, + 0x4cd2: 0x0080, 0x4cd3: 0x0080, 0x4cd4: 0x0080, 0x4cd5: 0x0080, 0x4cd6: 0x0080, 0x4cd7: 0x0080, + 0x4cd8: 0x0080, 0x4cd9: 0x0080, 0x4cda: 0x0080, 0x4cdb: 0x0080, 0x4cdc: 0x0080, 0x4cdd: 0x0080, + 0x4cde: 0x0080, 0x4cdf: 0x0080, 0x4ce0: 0x0080, 0x4ce1: 0x0080, 0x4ce2: 0x0080, 0x4ce3: 0x0080, + 0x4ce4: 0x0080, 0x4ce5: 0x0080, 0x4ce6: 0x0080, 0x4ce7: 0x0080, 0x4ce8: 0x0080, 0x4ce9: 0x0080, + 0x4cea: 0x0080, 0x4ceb: 0x0080, 0x4cec: 0x0080, 0x4ced: 0x0080, 0x4cee: 0x0080, 0x4cef: 0x0080, + 0x4cf0: 0x0080, 0x4cf1: 0x0080, 0x4cf2: 0x0080, 0x4cf3: 0x0080, + // Block 0x134, offset 0x4d00 + 0x4d00: 0x0080, 0x4d01: 0x0080, 0x4d02: 0x0080, 0x4d03: 0x0080, 0x4d04: 0x0080, 0x4d05: 0x0080, + 0x4d06: 0x0080, 0x4d07: 0x0080, 0x4d08: 0x0080, 0x4d09: 0x0080, 0x4d0a: 0x0080, 0x4d0b: 0x0080, + 0x4d0c: 0x0080, 0x4d0d: 0x0080, 0x4d0e: 0x0080, 0x4d0f: 0x0080, 0x4d10: 0x0080, 0x4d11: 0x0080, + 0x4d12: 0x0080, 0x4d13: 0x0080, 0x4d14: 0x0080, + // Block 0x135, offset 0x4d40 + 0x4d40: 0x0080, 0x4d41: 0x0080, 0x4d42: 0x0080, 0x4d43: 0x0080, 0x4d44: 0x0080, 0x4d45: 0x0080, + 0x4d46: 0x0080, 0x4d47: 0x0080, 0x4d48: 0x0080, 0x4d49: 0x0080, 0x4d4a: 0x0080, 0x4d4b: 0x0080, + 0x4d50: 0x0080, 0x4d51: 0x0080, + 0x4d52: 0x0080, 0x4d53: 0x0080, 0x4d54: 0x0080, 0x4d55: 0x0080, 0x4d56: 0x0080, 0x4d57: 0x0080, + 0x4d58: 0x0080, 0x4d59: 0x0080, 0x4d5a: 0x0080, 0x4d5b: 0x0080, 0x4d5c: 0x0080, 0x4d5d: 0x0080, + 0x4d5e: 0x0080, 0x4d5f: 0x0080, 0x4d60: 0x0080, 0x4d61: 0x0080, 0x4d62: 0x0080, 0x4d63: 0x0080, + 0x4d64: 0x0080, 0x4d65: 0x0080, 0x4d66: 0x0080, 0x4d67: 0x0080, 0x4d68: 0x0080, 0x4d69: 0x0080, + 0x4d6a: 0x0080, 0x4d6b: 0x0080, 0x4d6c: 0x0080, 0x4d6d: 0x0080, 0x4d6e: 0x0080, 0x4d6f: 0x0080, + 0x4d70: 0x0080, 0x4d71: 0x0080, 0x4d72: 0x0080, 0x4d73: 0x0080, 0x4d74: 0x0080, 0x4d75: 0x0080, + 0x4d76: 0x0080, 0x4d77: 0x0080, 0x4d78: 0x0080, 0x4d79: 0x0080, 0x4d7a: 0x0080, 0x4d7b: 0x0080, + 0x4d7c: 0x0080, 0x4d7d: 0x0080, 0x4d7e: 0x0080, 0x4d7f: 0x0080, + // Block 0x136, offset 0x4d80 + 0x4d80: 0x0080, 0x4d81: 0x0080, 0x4d82: 0x0080, 0x4d83: 0x0080, 0x4d84: 0x0080, 0x4d85: 0x0080, + 0x4d86: 0x0080, 0x4d87: 0x0080, + 0x4d90: 0x0080, 0x4d91: 0x0080, + 0x4d92: 0x0080, 0x4d93: 0x0080, 0x4d94: 0x0080, 0x4d95: 0x0080, 0x4d96: 0x0080, 0x4d97: 0x0080, + 0x4d98: 0x0080, 0x4d99: 0x0080, + 0x4da0: 0x0080, 0x4da1: 0x0080, 0x4da2: 0x0080, 0x4da3: 0x0080, + 0x4da4: 0x0080, 0x4da5: 0x0080, 0x4da6: 0x0080, 0x4da7: 0x0080, 0x4da8: 0x0080, 0x4da9: 0x0080, + 0x4daa: 0x0080, 0x4dab: 0x0080, 0x4dac: 0x0080, 0x4dad: 0x0080, 0x4dae: 0x0080, 0x4daf: 0x0080, + 0x4db0: 0x0080, 0x4db1: 0x0080, 0x4db2: 0x0080, 0x4db3: 0x0080, 0x4db4: 0x0080, 0x4db5: 0x0080, + 0x4db6: 0x0080, 0x4db7: 0x0080, 0x4db8: 0x0080, 0x4db9: 0x0080, 0x4dba: 0x0080, 0x4dbb: 0x0080, + 0x4dbc: 0x0080, 0x4dbd: 0x0080, 0x4dbe: 0x0080, 0x4dbf: 0x0080, + // Block 0x137, offset 0x4dc0 + 0x4dc0: 0x0080, 0x4dc1: 0x0080, 0x4dc2: 0x0080, 0x4dc3: 0x0080, 0x4dc4: 0x0080, 0x4dc5: 0x0080, + 0x4dc6: 0x0080, 0x4dc7: 0x0080, + 0x4dd0: 0x0080, 0x4dd1: 0x0080, + 0x4dd2: 0x0080, 0x4dd3: 0x0080, 0x4dd4: 0x0080, 0x4dd5: 0x0080, 0x4dd6: 0x0080, 0x4dd7: 0x0080, + 0x4dd8: 0x0080, 0x4dd9: 0x0080, 0x4dda: 0x0080, 0x4ddb: 0x0080, 0x4ddc: 0x0080, 0x4ddd: 0x0080, + 0x4dde: 0x0080, 0x4ddf: 0x0080, 0x4de0: 0x0080, 0x4de1: 0x0080, 0x4de2: 0x0080, 0x4de3: 0x0080, + 0x4de4: 0x0080, 0x4de5: 0x0080, 0x4de6: 0x0080, 0x4de7: 0x0080, 0x4de8: 0x0080, 0x4de9: 0x0080, + 0x4dea: 0x0080, 0x4deb: 0x0080, 0x4dec: 0x0080, 0x4ded: 0x0080, + // Block 0x138, offset 0x4e00 + 0x4e10: 0x0080, 0x4e11: 0x0080, + 0x4e12: 0x0080, 0x4e13: 0x0080, 0x4e14: 0x0080, 0x4e15: 0x0080, 0x4e16: 0x0080, 0x4e17: 0x0080, + 0x4e18: 0x0080, 0x4e19: 0x0080, 0x4e1a: 0x0080, 0x4e1b: 0x0080, 0x4e1c: 0x0080, 0x4e1d: 0x0080, + 0x4e1e: 0x0080, 0x4e20: 0x0080, 0x4e21: 0x0080, 0x4e22: 0x0080, 0x4e23: 0x0080, + 0x4e24: 0x0080, 0x4e25: 0x0080, 0x4e26: 0x0080, 0x4e27: 0x0080, + 0x4e30: 0x0080, 0x4e33: 0x0080, 0x4e34: 0x0080, 0x4e35: 0x0080, + 0x4e36: 0x0080, 0x4e37: 0x0080, 0x4e38: 0x0080, 0x4e39: 0x0080, 0x4e3a: 0x0080, 0x4e3b: 0x0080, + 0x4e3c: 0x0080, 0x4e3d: 0x0080, 0x4e3e: 0x0080, + // Block 0x139, offset 0x4e40 + 0x4e40: 0x0080, 0x4e41: 0x0080, 0x4e42: 0x0080, 0x4e43: 0x0080, 0x4e44: 0x0080, 0x4e45: 0x0080, + 0x4e46: 0x0080, 0x4e47: 0x0080, 0x4e48: 0x0080, 0x4e49: 0x0080, 0x4e4a: 0x0080, 0x4e4b: 0x0080, + 0x4e50: 0x0080, 0x4e51: 0x0080, + 0x4e52: 0x0080, 0x4e53: 0x0080, 0x4e54: 0x0080, 0x4e55: 0x0080, 0x4e56: 0x0080, 0x4e57: 0x0080, + 0x4e58: 0x0080, 0x4e59: 0x0080, 0x4e5a: 0x0080, 0x4e5b: 0x0080, 0x4e5c: 0x0080, 0x4e5d: 0x0080, + 0x4e5e: 0x0080, + // Block 0x13a, offset 0x4e80 + 0x4e80: 0x0080, 0x4e81: 0x0080, 0x4e82: 0x0080, 0x4e83: 0x0080, 0x4e84: 0x0080, 0x4e85: 0x0080, + 0x4e86: 0x0080, 0x4e87: 0x0080, 0x4e88: 0x0080, 0x4e89: 0x0080, 0x4e8a: 0x0080, 0x4e8b: 0x0080, + 0x4e8c: 0x0080, 0x4e8d: 0x0080, 0x4e8e: 0x0080, 0x4e8f: 0x0080, 0x4e90: 0x0080, 0x4e91: 0x0080, + // Block 0x13b, offset 0x4ec0 + 0x4ec0: 0x0080, + // Block 0x13c, offset 0x4f00 + 0x4f00: 0x00cc, 0x4f01: 0x00cc, 0x4f02: 0x00cc, 0x4f03: 0x00cc, 0x4f04: 0x00cc, 0x4f05: 0x00cc, + 0x4f06: 0x00cc, 0x4f07: 0x00cc, 0x4f08: 0x00cc, 0x4f09: 0x00cc, 0x4f0a: 0x00cc, 0x4f0b: 0x00cc, + 0x4f0c: 0x00cc, 0x4f0d: 0x00cc, 0x4f0e: 0x00cc, 0x4f0f: 0x00cc, 0x4f10: 0x00cc, 0x4f11: 0x00cc, + 0x4f12: 0x00cc, 0x4f13: 0x00cc, 0x4f14: 0x00cc, 0x4f15: 0x00cc, 0x4f16: 0x00cc, + // Block 0x13d, offset 0x4f40 + 0x4f40: 0x00cc, 0x4f41: 0x00cc, 0x4f42: 0x00cc, 0x4f43: 0x00cc, 0x4f44: 0x00cc, 0x4f45: 0x00cc, + 0x4f46: 0x00cc, 0x4f47: 0x00cc, 0x4f48: 0x00cc, 0x4f49: 0x00cc, 0x4f4a: 0x00cc, 0x4f4b: 0x00cc, + 0x4f4c: 0x00cc, 0x4f4d: 0x00cc, 0x4f4e: 0x00cc, 0x4f4f: 0x00cc, 0x4f50: 0x00cc, 0x4f51: 0x00cc, + 0x4f52: 0x00cc, 0x4f53: 0x00cc, 0x4f54: 0x00cc, 0x4f55: 0x00cc, 0x4f56: 0x00cc, 0x4f57: 0x00cc, + 0x4f58: 0x00cc, 0x4f59: 0x00cc, 0x4f5a: 0x00cc, 0x4f5b: 0x00cc, 0x4f5c: 0x00cc, 0x4f5d: 0x00cc, + 0x4f5e: 0x00cc, 0x4f5f: 0x00cc, 0x4f60: 0x00cc, 0x4f61: 0x00cc, 0x4f62: 0x00cc, 0x4f63: 0x00cc, + 0x4f64: 0x00cc, 0x4f65: 0x00cc, 0x4f66: 0x00cc, 0x4f67: 0x00cc, 0x4f68: 0x00cc, 0x4f69: 0x00cc, + 0x4f6a: 0x00cc, 0x4f6b: 0x00cc, 0x4f6c: 0x00cc, 0x4f6d: 0x00cc, 0x4f6e: 0x00cc, 0x4f6f: 0x00cc, + 0x4f70: 0x00cc, 0x4f71: 0x00cc, 0x4f72: 0x00cc, 0x4f73: 0x00cc, 0x4f74: 0x00cc, + // Block 0x13e, offset 0x4f80 + 0x4f80: 0x00cc, 0x4f81: 0x00cc, 0x4f82: 0x00cc, 0x4f83: 0x00cc, 0x4f84: 0x00cc, 0x4f85: 0x00cc, + 0x4f86: 0x00cc, 0x4f87: 0x00cc, 0x4f88: 0x00cc, 0x4f89: 0x00cc, 0x4f8a: 0x00cc, 0x4f8b: 0x00cc, + 0x4f8c: 0x00cc, 0x4f8d: 0x00cc, 0x4f8e: 0x00cc, 0x4f8f: 0x00cc, 0x4f90: 0x00cc, 0x4f91: 0x00cc, + 0x4f92: 0x00cc, 0x4f93: 0x00cc, 0x4f94: 0x00cc, 0x4f95: 0x00cc, 0x4f96: 0x00cc, 0x4f97: 0x00cc, + 0x4f98: 0x00cc, 0x4f99: 0x00cc, 0x4f9a: 0x00cc, 0x4f9b: 0x00cc, 0x4f9c: 0x00cc, 0x4f9d: 0x00cc, + 0x4fa0: 0x00cc, 0x4fa1: 0x00cc, 0x4fa2: 0x00cc, 0x4fa3: 0x00cc, + 0x4fa4: 0x00cc, 0x4fa5: 0x00cc, 0x4fa6: 0x00cc, 0x4fa7: 0x00cc, 0x4fa8: 0x00cc, 0x4fa9: 0x00cc, + 0x4faa: 0x00cc, 0x4fab: 0x00cc, 0x4fac: 0x00cc, 0x4fad: 0x00cc, 0x4fae: 0x00cc, 0x4faf: 0x00cc, + 0x4fb0: 0x00cc, 0x4fb1: 0x00cc, 0x4fb2: 0x00cc, 0x4fb3: 0x00cc, 0x4fb4: 0x00cc, 0x4fb5: 0x00cc, + 0x4fb6: 0x00cc, 0x4fb7: 0x00cc, 0x4fb8: 0x00cc, 0x4fb9: 0x00cc, 0x4fba: 0x00cc, 0x4fbb: 0x00cc, + 0x4fbc: 0x00cc, 0x4fbd: 0x00cc, 0x4fbe: 0x00cc, 0x4fbf: 0x00cc, + // Block 0x13f, offset 0x4fc0 + 0x4fc0: 0x00cc, 0x4fc1: 0x00cc, 0x4fc2: 0x00cc, 0x4fc3: 0x00cc, 0x4fc4: 0x00cc, 0x4fc5: 0x00cc, + 0x4fc6: 0x00cc, 0x4fc7: 0x00cc, 0x4fc8: 0x00cc, 0x4fc9: 0x00cc, 0x4fca: 0x00cc, 0x4fcb: 0x00cc, + 0x4fcc: 0x00cc, 0x4fcd: 0x00cc, 0x4fce: 0x00cc, 0x4fcf: 0x00cc, 0x4fd0: 0x00cc, 0x4fd1: 0x00cc, + 0x4fd2: 0x00cc, 0x4fd3: 0x00cc, 0x4fd4: 0x00cc, 0x4fd5: 0x00cc, 0x4fd6: 0x00cc, 0x4fd7: 0x00cc, + 0x4fd8: 0x00cc, 0x4fd9: 0x00cc, 0x4fda: 0x00cc, 0x4fdb: 0x00cc, 0x4fdc: 0x00cc, 0x4fdd: 0x00cc, + 0x4fde: 0x00cc, 0x4fdf: 0x00cc, 0x4fe0: 0x00cc, 0x4fe1: 0x00cc, + // Block 0x140, offset 0x5000 + 0x5000: 0x008c, 0x5001: 0x008c, 0x5002: 0x008c, 0x5003: 0x008c, 0x5004: 0x008c, 0x5005: 0x008c, + 0x5006: 0x008c, 0x5007: 0x008c, 0x5008: 0x008c, 0x5009: 0x008c, 0x500a: 0x008c, 0x500b: 0x008c, + 0x500c: 0x008c, 0x500d: 0x008c, 0x500e: 0x008c, 0x500f: 0x008c, 0x5010: 0x008c, 0x5011: 0x008c, + 0x5012: 0x008c, 0x5013: 0x008c, 0x5014: 0x008c, 0x5015: 0x008c, 0x5016: 0x008c, 0x5017: 0x008c, + 0x5018: 0x008c, 0x5019: 0x008c, 0x501a: 0x008c, 0x501b: 0x008c, 0x501c: 0x008c, 0x501d: 0x008c, + // Block 0x141, offset 0x5040 + 0x5041: 0x0040, + 0x5060: 0x0040, 0x5061: 0x0040, 0x5062: 0x0040, 0x5063: 0x0040, + 0x5064: 0x0040, 0x5065: 0x0040, 0x5066: 0x0040, 0x5067: 0x0040, 0x5068: 0x0040, 0x5069: 0x0040, + 0x506a: 0x0040, 0x506b: 0x0040, 0x506c: 0x0040, 0x506d: 0x0040, 0x506e: 0x0040, 0x506f: 0x0040, + 0x5070: 0x0040, 0x5071: 0x0040, 0x5072: 0x0040, 0x5073: 0x0040, 0x5074: 0x0040, 0x5075: 0x0040, + 0x5076: 0x0040, 0x5077: 0x0040, 0x5078: 0x0040, 0x5079: 0x0040, 0x507a: 0x0040, 0x507b: 0x0040, + 0x507c: 0x0040, 0x507d: 0x0040, 0x507e: 0x0040, 0x507f: 0x0040, + // Block 0x142, offset 0x5080 + 0x5080: 0x0040, 0x5081: 0x0040, 0x5082: 0x0040, 0x5083: 0x0040, 0x5084: 0x0040, 0x5085: 0x0040, + 0x5086: 0x0040, 0x5087: 0x0040, 0x5088: 0x0040, 0x5089: 0x0040, 0x508a: 0x0040, 0x508b: 0x0040, + 0x508c: 0x0040, 0x508d: 0x0040, 0x508e: 0x0040, 0x508f: 0x0040, 0x5090: 0x0040, 0x5091: 0x0040, + 0x5092: 0x0040, 0x5093: 0x0040, 0x5094: 0x0040, 0x5095: 0x0040, 0x5096: 0x0040, 0x5097: 0x0040, + 0x5098: 0x0040, 0x5099: 0x0040, 0x509a: 0x0040, 0x509b: 0x0040, 0x509c: 0x0040, 0x509d: 0x0040, + 0x509e: 0x0040, 0x509f: 0x0040, 0x50a0: 0x0040, 0x50a1: 0x0040, 0x50a2: 0x0040, 0x50a3: 0x0040, + 0x50a4: 0x0040, 0x50a5: 0x0040, 0x50a6: 0x0040, 0x50a7: 0x0040, 0x50a8: 0x0040, 0x50a9: 0x0040, + 0x50aa: 0x0040, 0x50ab: 0x0040, 0x50ac: 0x0040, 0x50ad: 0x0040, 0x50ae: 0x0040, 0x50af: 0x0040, + // Block 0x143, offset 0x50c0 + 0x50c0: 0x0040, 0x50c1: 0x0040, 0x50c2: 0x0040, 0x50c3: 0x0040, 0x50c4: 0x0040, 0x50c5: 0x0040, + 0x50c6: 0x0040, 0x50c7: 0x0040, 0x50c8: 0x0040, 0x50c9: 0x0040, 0x50ca: 0x0040, 0x50cb: 0x0040, + 0x50cc: 0x0040, 0x50cd: 0x0040, 0x50ce: 0x0040, 0x50cf: 0x0040, 0x50d0: 0x0040, 0x50d1: 0x0040, + 0x50d2: 0x0040, 0x50d3: 0x0040, 0x50d4: 0x0040, 0x50d5: 0x0040, 0x50d6: 0x0040, 0x50d7: 0x0040, + 0x50d8: 0x0040, 0x50d9: 0x0040, 0x50da: 0x0040, 0x50db: 0x0040, 0x50dc: 0x0040, 0x50dd: 0x0040, + 0x50de: 0x0040, 0x50df: 0x0040, 0x50e0: 0x0040, 0x50e1: 0x0040, 0x50e2: 0x0040, 0x50e3: 0x0040, + 0x50e4: 0x0040, 0x50e5: 0x0040, 0x50e6: 0x0040, 0x50e7: 0x0040, 0x50e8: 0x0040, 0x50e9: 0x0040, + 0x50ea: 0x0040, 0x50eb: 0x0040, 0x50ec: 0x0040, 0x50ed: 0x0040, 0x50ee: 0x0040, 0x50ef: 0x0040, + 0x50f0: 0x0040, 0x50f1: 0x0040, 0x50f2: 0x0040, 0x50f3: 0x0040, 0x50f4: 0x0040, 0x50f5: 0x0040, + 0x50f6: 0x0040, 0x50f7: 0x0040, 0x50f8: 0x0040, 0x50f9: 0x0040, 0x50fa: 0x0040, 0x50fb: 0x0040, + 0x50fc: 0x0040, 0x50fd: 0x0040, +} + +// derivedPropertiesIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var derivedPropertiesIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc6: 0x05, 0xc7: 0x06, + 0xc8: 0x05, 0xc9: 0x05, 0xca: 0x07, 0xcb: 0x08, 0xcc: 0x09, 0xcd: 0x0a, 0xce: 0x0b, 0xcf: 0x0c, + 0xd0: 0x05, 0xd1: 0x05, 0xd2: 0x0d, 0xd3: 0x05, 0xd4: 0x0e, 0xd5: 0x0f, 0xd6: 0x10, 0xd7: 0x11, + 0xd8: 0x12, 0xd9: 0x13, 0xda: 0x14, 0xdb: 0x15, 0xdc: 0x16, 0xdd: 0x17, 0xde: 0x18, 0xdf: 0x19, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x0a, 0xec: 0x0a, 0xed: 0x0b, 0xee: 0x0c, 0xef: 0x0d, + 0xf0: 0x1d, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x1a, 0x121: 0x1b, 0x122: 0x1c, 0x123: 0x1d, 0x124: 0x1e, 0x125: 0x1f, 0x126: 0x20, 0x127: 0x21, + 0x128: 0x22, 0x129: 0x23, 0x12a: 0x24, 0x12b: 0x25, 0x12c: 0x26, 0x12d: 0x27, 0x12e: 0x28, 0x12f: 0x29, + 0x130: 0x2a, 0x131: 0x2b, 0x132: 0x2c, 0x133: 0x2d, 0x134: 0x2e, 0x135: 0x2f, 0x136: 0x30, 0x137: 0x31, + 0x138: 0x32, 0x139: 0x33, 0x13a: 0x34, 0x13b: 0x35, 0x13c: 0x36, 0x13d: 0x37, 0x13e: 0x38, 0x13f: 0x39, + // Block 0x5, offset 0x140 + 0x140: 0x3a, 0x141: 0x3b, 0x142: 0x3c, 0x143: 0x3d, 0x144: 0x3e, 0x145: 0x3e, 0x146: 0x3e, 0x147: 0x3e, + 0x148: 0x05, 0x149: 0x3f, 0x14a: 0x40, 0x14b: 0x41, 0x14c: 0x42, 0x14d: 0x43, 0x14e: 0x44, 0x14f: 0x45, + 0x150: 0x46, 0x151: 0x05, 0x152: 0x05, 0x153: 0x05, 0x154: 0x05, 0x155: 0x05, 0x156: 0x05, 0x157: 0x05, + 0x158: 0x05, 0x159: 0x47, 0x15a: 0x48, 0x15b: 0x49, 0x15c: 0x4a, 0x15d: 0x4b, 0x15e: 0x4c, 0x15f: 0x4d, + 0x160: 0x4e, 0x161: 0x4f, 0x162: 0x50, 0x163: 0x51, 0x164: 0x52, 0x165: 0x53, 0x166: 0x54, 0x167: 0x55, + 0x168: 0x56, 0x169: 0x57, 0x16a: 0x58, 0x16c: 0x59, 0x16d: 0x5a, 0x16e: 0x5b, 0x16f: 0x5c, + 0x170: 0x5d, 0x171: 0x5e, 0x172: 0x5f, 0x173: 0x60, 0x174: 0x61, 0x175: 0x62, 0x176: 0x63, 0x177: 0x64, + 0x178: 0x05, 0x179: 0x05, 0x17a: 0x65, 0x17b: 0x05, 0x17c: 0x66, 0x17d: 0x67, 0x17e: 0x68, 0x17f: 0x69, + // Block 0x6, offset 0x180 + 0x180: 0x6a, 0x181: 0x6b, 0x182: 0x6c, 0x183: 0x6d, 0x184: 0x6e, 0x185: 0x6f, 0x186: 0x70, 0x187: 0x71, + 0x188: 0x71, 0x189: 0x71, 0x18a: 0x71, 0x18b: 0x71, 0x18c: 0x71, 0x18d: 0x71, 0x18e: 0x71, 0x18f: 0x72, + 0x190: 0x73, 0x191: 0x74, 0x192: 0x71, 0x193: 0x71, 0x194: 0x71, 0x195: 0x71, 0x196: 0x71, 0x197: 0x71, + 0x198: 0x71, 0x199: 0x71, 0x19a: 0x71, 0x19b: 0x71, 0x19c: 0x71, 0x19d: 0x71, 0x19e: 0x71, 0x19f: 0x71, + 0x1a0: 0x71, 0x1a1: 0x71, 0x1a2: 0x71, 0x1a3: 0x71, 0x1a4: 0x71, 0x1a5: 0x71, 0x1a6: 0x71, 0x1a7: 0x71, + 0x1a8: 0x71, 0x1a9: 0x71, 0x1aa: 0x71, 0x1ab: 0x71, 0x1ac: 0x71, 0x1ad: 0x75, 0x1ae: 0x76, 0x1af: 0x77, + 0x1b0: 0x78, 0x1b1: 0x79, 0x1b2: 0x05, 0x1b3: 0x7a, 0x1b4: 0x7b, 0x1b5: 0x7c, 0x1b6: 0x7d, 0x1b7: 0x7e, + 0x1b8: 0x7f, 0x1b9: 0x80, 0x1ba: 0x81, 0x1bb: 0x82, 0x1bc: 0x83, 0x1bd: 0x83, 0x1be: 0x83, 0x1bf: 0x84, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x85, 0x1c1: 0x86, 0x1c2: 0x87, 0x1c3: 0x88, 0x1c4: 0x89, 0x1c5: 0x8a, 0x1c6: 0x8b, 0x1c7: 0x8c, + 0x1c8: 0x8d, 0x1c9: 0x71, 0x1ca: 0x71, 0x1cb: 0x8e, 0x1cc: 0x83, 0x1cd: 0x8f, 0x1ce: 0x71, 0x1cf: 0x71, + 0x1d0: 0x90, 0x1d1: 0x90, 0x1d2: 0x90, 0x1d3: 0x90, 0x1d4: 0x90, 0x1d5: 0x90, 0x1d6: 0x90, 0x1d7: 0x90, + 0x1d8: 0x90, 0x1d9: 0x90, 0x1da: 0x90, 0x1db: 0x90, 0x1dc: 0x90, 0x1dd: 0x90, 0x1de: 0x90, 0x1df: 0x90, + 0x1e0: 0x90, 0x1e1: 0x90, 0x1e2: 0x90, 0x1e3: 0x90, 0x1e4: 0x90, 0x1e5: 0x90, 0x1e6: 0x90, 0x1e7: 0x90, + 0x1e8: 0x90, 0x1e9: 0x90, 0x1ea: 0x90, 0x1eb: 0x90, 0x1ec: 0x90, 0x1ed: 0x90, 0x1ee: 0x90, 0x1ef: 0x90, + 0x1f0: 0x90, 0x1f1: 0x90, 0x1f2: 0x90, 0x1f3: 0x90, 0x1f4: 0x90, 0x1f5: 0x90, 0x1f6: 0x90, 0x1f7: 0x90, + 0x1f8: 0x90, 0x1f9: 0x90, 0x1fa: 0x90, 0x1fb: 0x90, 0x1fc: 0x90, 0x1fd: 0x90, 0x1fe: 0x90, 0x1ff: 0x90, + // Block 0x8, offset 0x200 + 0x200: 0x90, 0x201: 0x90, 0x202: 0x90, 0x203: 0x90, 0x204: 0x90, 0x205: 0x90, 0x206: 0x90, 0x207: 0x90, + 0x208: 0x90, 0x209: 0x90, 0x20a: 0x90, 0x20b: 0x90, 0x20c: 0x90, 0x20d: 0x90, 0x20e: 0x90, 0x20f: 0x90, + 0x210: 0x90, 0x211: 0x90, 0x212: 0x90, 0x213: 0x90, 0x214: 0x90, 0x215: 0x90, 0x216: 0x90, 0x217: 0x90, + 0x218: 0x90, 0x219: 0x90, 0x21a: 0x90, 0x21b: 0x90, 0x21c: 0x90, 0x21d: 0x90, 0x21e: 0x90, 0x21f: 0x90, + 0x220: 0x90, 0x221: 0x90, 0x222: 0x90, 0x223: 0x90, 0x224: 0x90, 0x225: 0x90, 0x226: 0x90, 0x227: 0x90, + 0x228: 0x90, 0x229: 0x90, 0x22a: 0x90, 0x22b: 0x90, 0x22c: 0x90, 0x22d: 0x90, 0x22e: 0x90, 0x22f: 0x90, + 0x230: 0x90, 0x231: 0x90, 0x232: 0x90, 0x233: 0x90, 0x234: 0x90, 0x235: 0x90, 0x236: 0x91, 0x237: 0x71, + 0x238: 0x90, 0x239: 0x90, 0x23a: 0x90, 0x23b: 0x90, 0x23c: 0x90, 0x23d: 0x90, 0x23e: 0x90, 0x23f: 0x90, + // Block 0x9, offset 0x240 + 0x240: 0x90, 0x241: 0x90, 0x242: 0x90, 0x243: 0x90, 0x244: 0x90, 0x245: 0x90, 0x246: 0x90, 0x247: 0x90, + 0x248: 0x90, 0x249: 0x90, 0x24a: 0x90, 0x24b: 0x90, 0x24c: 0x90, 0x24d: 0x90, 0x24e: 0x90, 0x24f: 0x90, + 0x250: 0x90, 0x251: 0x90, 0x252: 0x90, 0x253: 0x90, 0x254: 0x90, 0x255: 0x90, 0x256: 0x90, 0x257: 0x90, + 0x258: 0x90, 0x259: 0x90, 0x25a: 0x90, 0x25b: 0x90, 0x25c: 0x90, 0x25d: 0x90, 0x25e: 0x90, 0x25f: 0x90, + 0x260: 0x90, 0x261: 0x90, 0x262: 0x90, 0x263: 0x90, 0x264: 0x90, 0x265: 0x90, 0x266: 0x90, 0x267: 0x90, + 0x268: 0x90, 0x269: 0x90, 0x26a: 0x90, 0x26b: 0x90, 0x26c: 0x90, 0x26d: 0x90, 0x26e: 0x90, 0x26f: 0x90, + 0x270: 0x90, 0x271: 0x90, 0x272: 0x90, 0x273: 0x90, 0x274: 0x90, 0x275: 0x90, 0x276: 0x90, 0x277: 0x90, + 0x278: 0x90, 0x279: 0x90, 0x27a: 0x90, 0x27b: 0x90, 0x27c: 0x90, 0x27d: 0x90, 0x27e: 0x90, 0x27f: 0x90, + // Block 0xa, offset 0x280 + 0x280: 0x90, 0x281: 0x90, 0x282: 0x90, 0x283: 0x90, 0x284: 0x90, 0x285: 0x90, 0x286: 0x90, 0x287: 0x90, + 0x288: 0x90, 0x289: 0x90, 0x28a: 0x90, 0x28b: 0x90, 0x28c: 0x90, 0x28d: 0x90, 0x28e: 0x90, 0x28f: 0x90, + 0x290: 0x90, 0x291: 0x90, 0x292: 0x90, 0x293: 0x90, 0x294: 0x90, 0x295: 0x90, 0x296: 0x90, 0x297: 0x90, + 0x298: 0x90, 0x299: 0x90, 0x29a: 0x90, 0x29b: 0x90, 0x29c: 0x90, 0x29d: 0x90, 0x29e: 0x90, 0x29f: 0x90, + 0x2a0: 0x90, 0x2a1: 0x90, 0x2a2: 0x90, 0x2a3: 0x90, 0x2a4: 0x90, 0x2a5: 0x90, 0x2a6: 0x90, 0x2a7: 0x90, + 0x2a8: 0x90, 0x2a9: 0x90, 0x2aa: 0x90, 0x2ab: 0x90, 0x2ac: 0x90, 0x2ad: 0x90, 0x2ae: 0x90, 0x2af: 0x90, + 0x2b0: 0x90, 0x2b1: 0x90, 0x2b2: 0x90, 0x2b3: 0x90, 0x2b4: 0x90, 0x2b5: 0x90, 0x2b6: 0x90, 0x2b7: 0x90, + 0x2b8: 0x90, 0x2b9: 0x90, 0x2ba: 0x90, 0x2bb: 0x90, 0x2bc: 0x90, 0x2bd: 0x90, 0x2be: 0x90, 0x2bf: 0x92, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x05, 0x2c1: 0x05, 0x2c2: 0x05, 0x2c3: 0x05, 0x2c4: 0x05, 0x2c5: 0x05, 0x2c6: 0x05, 0x2c7: 0x05, + 0x2c8: 0x05, 0x2c9: 0x05, 0x2ca: 0x05, 0x2cb: 0x05, 0x2cc: 0x05, 0x2cd: 0x05, 0x2ce: 0x05, 0x2cf: 0x05, + 0x2d0: 0x05, 0x2d1: 0x05, 0x2d2: 0x93, 0x2d3: 0x94, 0x2d4: 0x05, 0x2d5: 0x05, 0x2d6: 0x05, 0x2d7: 0x05, + 0x2d8: 0x95, 0x2d9: 0x96, 0x2da: 0x97, 0x2db: 0x98, 0x2dc: 0x99, 0x2dd: 0x9a, 0x2de: 0x9b, 0x2df: 0x9c, + 0x2e0: 0x9d, 0x2e1: 0x9e, 0x2e2: 0x05, 0x2e3: 0x9f, 0x2e4: 0xa0, 0x2e5: 0xa1, 0x2e6: 0xa2, 0x2e7: 0xa3, + 0x2e8: 0xa4, 0x2e9: 0xa5, 0x2ea: 0xa6, 0x2eb: 0xa7, 0x2ec: 0xa8, 0x2ed: 0xa9, 0x2ee: 0x05, 0x2ef: 0xaa, + 0x2f0: 0x05, 0x2f1: 0x05, 0x2f2: 0x05, 0x2f3: 0x05, 0x2f4: 0x05, 0x2f5: 0x05, 0x2f6: 0x05, 0x2f7: 0x05, + 0x2f8: 0x05, 0x2f9: 0x05, 0x2fa: 0x05, 0x2fb: 0x05, 0x2fc: 0x05, 0x2fd: 0x05, 0x2fe: 0x05, 0x2ff: 0x05, + // Block 0xc, offset 0x300 + 0x300: 0x05, 0x301: 0x05, 0x302: 0x05, 0x303: 0x05, 0x304: 0x05, 0x305: 0x05, 0x306: 0x05, 0x307: 0x05, + 0x308: 0x05, 0x309: 0x05, 0x30a: 0x05, 0x30b: 0x05, 0x30c: 0x05, 0x30d: 0x05, 0x30e: 0x05, 0x30f: 0x05, + 0x310: 0x05, 0x311: 0x05, 0x312: 0x05, 0x313: 0x05, 0x314: 0x05, 0x315: 0x05, 0x316: 0x05, 0x317: 0x05, + 0x318: 0x05, 0x319: 0x05, 0x31a: 0x05, 0x31b: 0x05, 0x31c: 0x05, 0x31d: 0x05, 0x31e: 0x05, 0x31f: 0x05, + 0x320: 0x05, 0x321: 0x05, 0x322: 0x05, 0x323: 0x05, 0x324: 0x05, 0x325: 0x05, 0x326: 0x05, 0x327: 0x05, + 0x328: 0x05, 0x329: 0x05, 0x32a: 0x05, 0x32b: 0x05, 0x32c: 0x05, 0x32d: 0x05, 0x32e: 0x05, 0x32f: 0x05, + 0x330: 0x05, 0x331: 0x05, 0x332: 0x05, 0x333: 0x05, 0x334: 0x05, 0x335: 0x05, 0x336: 0x05, 0x337: 0x05, + 0x338: 0x05, 0x339: 0x05, 0x33a: 0x05, 0x33b: 0x05, 0x33c: 0x05, 0x33d: 0x05, 0x33e: 0x05, 0x33f: 0x05, + // Block 0xd, offset 0x340 + 0x340: 0x05, 0x341: 0x05, 0x342: 0x05, 0x343: 0x05, 0x344: 0x05, 0x345: 0x05, 0x346: 0x05, 0x347: 0x05, + 0x348: 0x05, 0x349: 0x05, 0x34a: 0x05, 0x34b: 0x05, 0x34c: 0x05, 0x34d: 0x05, 0x34e: 0x05, 0x34f: 0x05, + 0x350: 0x05, 0x351: 0x05, 0x352: 0x05, 0x353: 0x05, 0x354: 0x05, 0x355: 0x05, 0x356: 0x05, 0x357: 0x05, + 0x358: 0x05, 0x359: 0x05, 0x35a: 0x05, 0x35b: 0x05, 0x35c: 0x05, 0x35d: 0x05, 0x35e: 0xab, 0x35f: 0xac, + // Block 0xe, offset 0x380 + 0x380: 0x3e, 0x381: 0x3e, 0x382: 0x3e, 0x383: 0x3e, 0x384: 0x3e, 0x385: 0x3e, 0x386: 0x3e, 0x387: 0x3e, + 0x388: 0x3e, 0x389: 0x3e, 0x38a: 0x3e, 0x38b: 0x3e, 0x38c: 0x3e, 0x38d: 0x3e, 0x38e: 0x3e, 0x38f: 0x3e, + 0x390: 0x3e, 0x391: 0x3e, 0x392: 0x3e, 0x393: 0x3e, 0x394: 0x3e, 0x395: 0x3e, 0x396: 0x3e, 0x397: 0x3e, + 0x398: 0x3e, 0x399: 0x3e, 0x39a: 0x3e, 0x39b: 0x3e, 0x39c: 0x3e, 0x39d: 0x3e, 0x39e: 0x3e, 0x39f: 0x3e, + 0x3a0: 0x3e, 0x3a1: 0x3e, 0x3a2: 0x3e, 0x3a3: 0x3e, 0x3a4: 0x3e, 0x3a5: 0x3e, 0x3a6: 0x3e, 0x3a7: 0x3e, + 0x3a8: 0x3e, 0x3a9: 0x3e, 0x3aa: 0x3e, 0x3ab: 0x3e, 0x3ac: 0x3e, 0x3ad: 0x3e, 0x3ae: 0x3e, 0x3af: 0x3e, + 0x3b0: 0x3e, 0x3b1: 0x3e, 0x3b2: 0x3e, 0x3b3: 0x3e, 0x3b4: 0x3e, 0x3b5: 0x3e, 0x3b6: 0x3e, 0x3b7: 0x3e, + 0x3b8: 0x3e, 0x3b9: 0x3e, 0x3ba: 0x3e, 0x3bb: 0x3e, 0x3bc: 0x3e, 0x3bd: 0x3e, 0x3be: 0x3e, 0x3bf: 0x3e, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x3e, 0x3c1: 0x3e, 0x3c2: 0x3e, 0x3c3: 0x3e, 0x3c4: 0x3e, 0x3c5: 0x3e, 0x3c6: 0x3e, 0x3c7: 0x3e, + 0x3c8: 0x3e, 0x3c9: 0x3e, 0x3ca: 0x3e, 0x3cb: 0x3e, 0x3cc: 0x3e, 0x3cd: 0x3e, 0x3ce: 0x3e, 0x3cf: 0x3e, + 0x3d0: 0x3e, 0x3d1: 0x3e, 0x3d2: 0x3e, 0x3d3: 0x3e, 0x3d4: 0x3e, 0x3d5: 0x3e, 0x3d6: 0x3e, 0x3d7: 0x3e, + 0x3d8: 0x3e, 0x3d9: 0x3e, 0x3da: 0x3e, 0x3db: 0x3e, 0x3dc: 0x3e, 0x3dd: 0x3e, 0x3de: 0x3e, 0x3df: 0x3e, + 0x3e0: 0x3e, 0x3e1: 0x3e, 0x3e2: 0x3e, 0x3e3: 0x3e, 0x3e4: 0x83, 0x3e5: 0x83, 0x3e6: 0x83, 0x3e7: 0x83, + 0x3e8: 0xad, 0x3e9: 0xae, 0x3ea: 0x83, 0x3eb: 0xaf, 0x3ec: 0xb0, 0x3ed: 0xb1, 0x3ee: 0x71, 0x3ef: 0xb2, + 0x3f0: 0x71, 0x3f1: 0x71, 0x3f2: 0x71, 0x3f3: 0x71, 0x3f4: 0x71, 0x3f5: 0xb3, 0x3f6: 0xb4, 0x3f7: 0xb5, + 0x3f8: 0xb6, 0x3f9: 0xb7, 0x3fa: 0x71, 0x3fb: 0xb8, 0x3fc: 0xb9, 0x3fd: 0xba, 0x3fe: 0xbb, 0x3ff: 0xbc, + // Block 0x10, offset 0x400 + 0x400: 0xbd, 0x401: 0xbe, 0x402: 0x05, 0x403: 0xbf, 0x404: 0xc0, 0x405: 0xc1, 0x406: 0xc2, 0x407: 0xc3, + 0x40a: 0xc4, 0x40b: 0xc5, 0x40c: 0xc6, 0x40d: 0xc7, 0x40e: 0xc8, 0x40f: 0xc9, + 0x410: 0x05, 0x411: 0x05, 0x412: 0xca, 0x413: 0xcb, 0x414: 0xcc, 0x415: 0xcd, + 0x418: 0x05, 0x419: 0x05, 0x41a: 0x05, 0x41b: 0x05, 0x41c: 0xce, 0x41d: 0xcf, + 0x420: 0xd0, 0x421: 0xd1, 0x422: 0xd2, 0x423: 0xd3, 0x424: 0xd4, 0x426: 0xd5, 0x427: 0xb4, + 0x428: 0xd6, 0x429: 0xd7, 0x42a: 0xd8, 0x42b: 0xd9, 0x42c: 0xda, 0x42d: 0xdb, 0x42e: 0xdc, + 0x430: 0x05, 0x431: 0x5f, 0x432: 0xdd, 0x433: 0xde, + 0x439: 0xdf, + // Block 0x11, offset 0x440 + 0x440: 0xe0, 0x441: 0xe1, 0x442: 0xe2, 0x443: 0xe3, 0x444: 0xe4, 0x445: 0xe5, 0x446: 0xe6, 0x447: 0xe7, + 0x448: 0xe8, 0x44a: 0xe9, 0x44b: 0xea, 0x44c: 0xeb, 0x44d: 0xec, + 0x450: 0xed, 0x451: 0xee, 0x452: 0xef, 0x453: 0xf0, 0x456: 0xf1, 0x457: 0xf2, + 0x458: 0xf3, 0x459: 0xf4, 0x45a: 0xf5, 0x45b: 0xf6, 0x45c: 0xf7, + 0x462: 0xf8, 0x463: 0xf9, + 0x46b: 0xfa, + 0x470: 0xfb, 0x471: 0xfc, 0x472: 0xfd, + // Block 0x12, offset 0x480 + 0x480: 0x05, 0x481: 0x05, 0x482: 0x05, 0x483: 0x05, 0x484: 0x05, 0x485: 0x05, 0x486: 0x05, 0x487: 0x05, + 0x488: 0x05, 0x489: 0x05, 0x48a: 0x05, 0x48b: 0x05, 0x48c: 0x05, 0x48d: 0x05, 0x48e: 0xfe, + 0x490: 0x71, 0x491: 0xff, 0x492: 0x05, 0x493: 0x05, 0x494: 0x05, 0x495: 0x100, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x05, 0x4c1: 0x05, 0x4c2: 0x05, 0x4c3: 0x05, 0x4c4: 0x05, 0x4c5: 0x05, 0x4c6: 0x05, 0x4c7: 0x05, + 0x4c8: 0x05, 0x4c9: 0x05, 0x4ca: 0x05, 0x4cb: 0x05, 0x4cc: 0x05, 0x4cd: 0x05, 0x4ce: 0x05, 0x4cf: 0x05, + 0x4d0: 0x101, + // Block 0x14, offset 0x500 + 0x510: 0x05, 0x511: 0x05, 0x512: 0x05, 0x513: 0x05, 0x514: 0x05, 0x515: 0x05, 0x516: 0x05, 0x517: 0x05, + 0x518: 0x05, 0x519: 0x102, + // Block 0x15, offset 0x540 + 0x560: 0x05, 0x561: 0x05, 0x562: 0x05, 0x563: 0x05, 0x564: 0x05, 0x565: 0x05, 0x566: 0x05, 0x567: 0x05, + 0x568: 0xfa, 0x569: 0x103, 0x56b: 0x104, 0x56c: 0x105, 0x56d: 0x106, 0x56e: 0x107, + 0x57c: 0x05, 0x57d: 0x108, 0x57e: 0x109, 0x57f: 0x10a, + // Block 0x16, offset 0x580 + 0x580: 0x05, 0x581: 0x05, 0x582: 0x05, 0x583: 0x05, 0x584: 0x05, 0x585: 0x05, 0x586: 0x05, 0x587: 0x05, + 0x588: 0x05, 0x589: 0x05, 0x58a: 0x05, 0x58b: 0x05, 0x58c: 0x05, 0x58d: 0x05, 0x58e: 0x05, 0x58f: 0x05, + 0x590: 0x05, 0x591: 0x05, 0x592: 0x05, 0x593: 0x05, 0x594: 0x05, 0x595: 0x05, 0x596: 0x05, 0x597: 0x05, + 0x598: 0x05, 0x599: 0x05, 0x59a: 0x05, 0x59b: 0x05, 0x59c: 0x05, 0x59d: 0x05, 0x59e: 0x05, 0x59f: 0x10b, + 0x5a0: 0x05, 0x5a1: 0x05, 0x5a2: 0x05, 0x5a3: 0x05, 0x5a4: 0x05, 0x5a5: 0x05, 0x5a6: 0x05, 0x5a7: 0x05, + 0x5a8: 0x05, 0x5a9: 0x05, 0x5aa: 0x05, 0x5ab: 0xdd, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x10c, + 0x5f0: 0x05, 0x5f1: 0x10d, 0x5f2: 0x10e, + // Block 0x18, offset 0x600 + 0x600: 0x71, 0x601: 0x71, 0x602: 0x71, 0x603: 0x10f, 0x604: 0x110, 0x605: 0x111, 0x606: 0x112, 0x607: 0x113, + 0x608: 0xc1, 0x609: 0x114, 0x60c: 0x71, 0x60d: 0x115, + 0x610: 0x71, 0x611: 0x116, 0x612: 0x117, 0x613: 0x118, 0x614: 0x119, 0x615: 0x11a, 0x616: 0x71, 0x617: 0x71, + 0x618: 0x71, 0x619: 0x71, 0x61a: 0x11b, 0x61b: 0x71, 0x61c: 0x71, 0x61d: 0x71, 0x61e: 0x71, 0x61f: 0x11c, + 0x620: 0x71, 0x621: 0x71, 0x622: 0x71, 0x623: 0x71, 0x624: 0x71, 0x625: 0x71, 0x626: 0x71, 0x627: 0x71, + 0x628: 0x11d, 0x629: 0x11e, 0x62a: 0x11f, + // Block 0x19, offset 0x640 + 0x640: 0x120, + 0x660: 0x05, 0x661: 0x05, 0x662: 0x05, 0x663: 0x121, 0x664: 0x122, 0x665: 0x123, + 0x678: 0x124, 0x679: 0x125, 0x67a: 0x126, 0x67b: 0x127, + // Block 0x1a, offset 0x680 + 0x680: 0x128, 0x681: 0x71, 0x682: 0x129, 0x683: 0x12a, 0x684: 0x12b, 0x685: 0x128, 0x686: 0x12c, 0x687: 0x12d, + 0x688: 0x12e, 0x689: 0x12f, 0x68c: 0x71, 0x68d: 0x71, 0x68e: 0x71, 0x68f: 0x71, + 0x690: 0x71, 0x691: 0x71, 0x692: 0x71, 0x693: 0x71, 0x694: 0x71, 0x695: 0x71, 0x696: 0x71, 0x697: 0x71, + 0x698: 0x71, 0x699: 0x71, 0x69a: 0x71, 0x69b: 0x130, 0x69c: 0x71, 0x69d: 0x131, 0x69e: 0x71, 0x69f: 0x132, + 0x6a0: 0x133, 0x6a1: 0x134, 0x6a2: 0x135, 0x6a4: 0x136, 0x6a5: 0x137, 0x6a6: 0x138, 0x6a7: 0x139, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x90, 0x6c1: 0x90, 0x6c2: 0x90, 0x6c3: 0x90, 0x6c4: 0x90, 0x6c5: 0x90, 0x6c6: 0x90, 0x6c7: 0x90, + 0x6c8: 0x90, 0x6c9: 0x90, 0x6ca: 0x90, 0x6cb: 0x90, 0x6cc: 0x90, 0x6cd: 0x90, 0x6ce: 0x90, 0x6cf: 0x90, + 0x6d0: 0x90, 0x6d1: 0x90, 0x6d2: 0x90, 0x6d3: 0x90, 0x6d4: 0x90, 0x6d5: 0x90, 0x6d6: 0x90, 0x6d7: 0x90, + 0x6d8: 0x90, 0x6d9: 0x90, 0x6da: 0x90, 0x6db: 0x13a, 0x6dc: 0x90, 0x6dd: 0x90, 0x6de: 0x90, 0x6df: 0x90, + 0x6e0: 0x90, 0x6e1: 0x90, 0x6e2: 0x90, 0x6e3: 0x90, 0x6e4: 0x90, 0x6e5: 0x90, 0x6e6: 0x90, 0x6e7: 0x90, + 0x6e8: 0x90, 0x6e9: 0x90, 0x6ea: 0x90, 0x6eb: 0x90, 0x6ec: 0x90, 0x6ed: 0x90, 0x6ee: 0x90, 0x6ef: 0x90, + 0x6f0: 0x90, 0x6f1: 0x90, 0x6f2: 0x90, 0x6f3: 0x90, 0x6f4: 0x90, 0x6f5: 0x90, 0x6f6: 0x90, 0x6f7: 0x90, + 0x6f8: 0x90, 0x6f9: 0x90, 0x6fa: 0x90, 0x6fb: 0x90, 0x6fc: 0x90, 0x6fd: 0x90, 0x6fe: 0x90, 0x6ff: 0x90, + // Block 0x1c, offset 0x700 + 0x700: 0x90, 0x701: 0x90, 0x702: 0x90, 0x703: 0x90, 0x704: 0x90, 0x705: 0x90, 0x706: 0x90, 0x707: 0x90, + 0x708: 0x90, 0x709: 0x90, 0x70a: 0x90, 0x70b: 0x90, 0x70c: 0x90, 0x70d: 0x90, 0x70e: 0x90, 0x70f: 0x90, + 0x710: 0x90, 0x711: 0x90, 0x712: 0x90, 0x713: 0x90, 0x714: 0x90, 0x715: 0x90, 0x716: 0x90, 0x717: 0x90, + 0x718: 0x90, 0x719: 0x90, 0x71a: 0x90, 0x71b: 0x90, 0x71c: 0x13b, 0x71d: 0x90, 0x71e: 0x90, 0x71f: 0x90, + 0x720: 0x13c, 0x721: 0x90, 0x722: 0x90, 0x723: 0x90, 0x724: 0x90, 0x725: 0x90, 0x726: 0x90, 0x727: 0x90, + 0x728: 0x90, 0x729: 0x90, 0x72a: 0x90, 0x72b: 0x90, 0x72c: 0x90, 0x72d: 0x90, 0x72e: 0x90, 0x72f: 0x90, + 0x730: 0x90, 0x731: 0x90, 0x732: 0x90, 0x733: 0x90, 0x734: 0x90, 0x735: 0x90, 0x736: 0x90, 0x737: 0x90, + 0x738: 0x90, 0x739: 0x90, 0x73a: 0x90, 0x73b: 0x90, 0x73c: 0x90, 0x73d: 0x90, 0x73e: 0x90, 0x73f: 0x90, + // Block 0x1d, offset 0x740 + 0x740: 0x90, 0x741: 0x90, 0x742: 0x90, 0x743: 0x90, 0x744: 0x90, 0x745: 0x90, 0x746: 0x90, 0x747: 0x90, + 0x748: 0x90, 0x749: 0x90, 0x74a: 0x90, 0x74b: 0x90, 0x74c: 0x90, 0x74d: 0x90, 0x74e: 0x90, 0x74f: 0x90, + 0x750: 0x90, 0x751: 0x90, 0x752: 0x90, 0x753: 0x90, 0x754: 0x90, 0x755: 0x90, 0x756: 0x90, 0x757: 0x90, + 0x758: 0x90, 0x759: 0x90, 0x75a: 0x90, 0x75b: 0x90, 0x75c: 0x90, 0x75d: 0x90, 0x75e: 0x90, 0x75f: 0x90, + 0x760: 0x90, 0x761: 0x90, 0x762: 0x90, 0x763: 0x90, 0x764: 0x90, 0x765: 0x90, 0x766: 0x90, 0x767: 0x90, + 0x768: 0x90, 0x769: 0x90, 0x76a: 0x90, 0x76b: 0x90, 0x76c: 0x90, 0x76d: 0x90, 0x76e: 0x90, 0x76f: 0x90, + 0x770: 0x90, 0x771: 0x90, 0x772: 0x90, 0x773: 0x90, 0x774: 0x90, 0x775: 0x90, 0x776: 0x90, 0x777: 0x90, + 0x778: 0x90, 0x779: 0x90, 0x77a: 0x13d, + // Block 0x1e, offset 0x780 + 0x7a0: 0x83, 0x7a1: 0x83, 0x7a2: 0x83, 0x7a3: 0x83, 0x7a4: 0x83, 0x7a5: 0x83, 0x7a6: 0x83, 0x7a7: 0x83, + 0x7a8: 0x13e, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0e, 0x7d1: 0x0f, 0x7d2: 0x10, 0x7d3: 0x11, 0x7d4: 0x12, 0x7d6: 0x13, 0x7d7: 0x0a, + 0x7d8: 0x14, 0x7db: 0x15, 0x7dd: 0x16, 0x7de: 0x17, 0x7df: 0x18, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x19, 0x7eb: 0x1a, 0x7ec: 0x1b, 0x7ef: 0x1c, + // Block 0x20, offset 0x800 + 0x800: 0x13f, 0x801: 0x3e, 0x804: 0x3e, 0x805: 0x3e, 0x806: 0x3e, 0x807: 0x140, + // Block 0x21, offset 0x840 + 0x840: 0x3e, 0x841: 0x3e, 0x842: 0x3e, 0x843: 0x3e, 0x844: 0x3e, 0x845: 0x3e, 0x846: 0x3e, 0x847: 0x3e, + 0x848: 0x3e, 0x849: 0x3e, 0x84a: 0x3e, 0x84b: 0x3e, 0x84c: 0x3e, 0x84d: 0x3e, 0x84e: 0x3e, 0x84f: 0x3e, + 0x850: 0x3e, 0x851: 0x3e, 0x852: 0x3e, 0x853: 0x3e, 0x854: 0x3e, 0x855: 0x3e, 0x856: 0x3e, 0x857: 0x3e, + 0x858: 0x3e, 0x859: 0x3e, 0x85a: 0x3e, 0x85b: 0x3e, 0x85c: 0x3e, 0x85d: 0x3e, 0x85e: 0x3e, 0x85f: 0x3e, + 0x860: 0x3e, 0x861: 0x3e, 0x862: 0x3e, 0x863: 0x3e, 0x864: 0x3e, 0x865: 0x3e, 0x866: 0x3e, 0x867: 0x3e, + 0x868: 0x3e, 0x869: 0x3e, 0x86a: 0x3e, 0x86b: 0x3e, 0x86c: 0x3e, 0x86d: 0x3e, 0x86e: 0x3e, 0x86f: 0x3e, + 0x870: 0x3e, 0x871: 0x3e, 0x872: 0x3e, 0x873: 0x3e, 0x874: 0x3e, 0x875: 0x3e, 0x876: 0x3e, 0x877: 0x3e, + 0x878: 0x3e, 0x879: 0x3e, 0x87a: 0x3e, 0x87b: 0x3e, 0x87c: 0x3e, 0x87d: 0x3e, 0x87e: 0x3e, 0x87f: 0x141, + // Block 0x22, offset 0x880 + 0x8a0: 0x1e, + 0x8b0: 0x0c, 0x8b1: 0x0c, 0x8b2: 0x0c, 0x8b3: 0x0c, 0x8b4: 0x0c, 0x8b5: 0x0c, 0x8b6: 0x0c, 0x8b7: 0x0c, + 0x8b8: 0x0c, 0x8b9: 0x0c, 0x8ba: 0x0c, 0x8bb: 0x0c, 0x8bc: 0x0c, 0x8bd: 0x0c, 0x8be: 0x0c, 0x8bf: 0x1f, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0c, 0x8c1: 0x0c, 0x8c2: 0x0c, 0x8c3: 0x0c, 0x8c4: 0x0c, 0x8c5: 0x0c, 0x8c6: 0x0c, 0x8c7: 0x0c, + 0x8c8: 0x0c, 0x8c9: 0x0c, 0x8ca: 0x0c, 0x8cb: 0x0c, 0x8cc: 0x0c, 0x8cd: 0x0c, 0x8ce: 0x0c, 0x8cf: 0x1f, +} + +// Total table size 25344 bytes (24KiB); checksum: 811C9DC5 diff --git a/vendor/golang.org/x/text/secure/precis/transformer.go b/vendor/golang.org/x/text/secure/precis/transformer.go new file mode 100644 index 000000000..97ce5e757 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/transformer.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package precis + +import "golang.org/x/text/transform" + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.Transformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} diff --git a/vendor/golang.org/x/text/secure/precis/trieval.go b/vendor/golang.org/x/text/secure/precis/trieval.go new file mode 100644 index 000000000..fed7d7595 --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/trieval.go @@ -0,0 +1,64 @@ +// This file was generated by go generate; DO NOT EDIT + +package precis + +// entry is the entry of a trie table +// 7..6 property (unassigned, disallowed, maybe, valid) +// 5..0 category +type entry uint8 + +const ( + propShift = 6 + propMask = 0xc0 + catMask = 0x3f +) + +func (e entry) property() property { return property(e & propMask) } +func (e entry) category() category { return category(e & catMask) } + +type property uint8 + +// The order of these constants matter. A Profile may consider runes to be +// allowed either from pValid or idDisOrFreePVal. +const ( + unassigned property = iota << propShift + disallowed + idDisOrFreePVal // disallowed for Identifier, pValid for FreeForm + pValid +) + +// compute permutations of all properties and specialCategories. +type category uint8 + +const ( + other category = iota + + // Special rune types + joiningL + joiningD + joiningT + joiningR + viramaModifier + viramaJoinT // Virama + JoiningT + latinSmallL // U+006c + greek + greekJoinT // Greek + JoiningT + hebrew + hebrewJoinT // Hebrew + JoiningT + japanese // hirigana, katakana, han + + // Special rune types associated with contextual rules defined in + // https://tools.ietf.org/html/rfc5892#appendix-A. + // ContextO + zeroWidthNonJoiner // rule 1 + zeroWidthJoiner // rule 2 + // ContextJ + middleDot // rule 3 + greekLowerNumeralSign // rule 4 + hebrewPreceding // rule 5 and 6 + katakanaMiddleDot // rule 7 + arabicIndicDigit // rule 8 + extendedArabicIndicDigit // rule 9 + + numCategories +) diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go new file mode 100644 index 000000000..1254119b5 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -0,0 +1,198 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_trieval.go gen_ranges.go + +// Package bidi contains functionality for bidirectional text support. +// +// See http://www.unicode.org/reports/tr9. +// +// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways +// and without notice. +package bidi + +// TODO: +// The following functionality would not be hard to implement, but hinges on +// the definition of a Segmenter interface. For now this is up to the user. +// - Iterate over paragraphs +// - Segmenter to iterate over runs directly from a given text. +// Also: +// - Transformer for reordering? +// - Transformer (validator, really) for Bidi Rule. + +// This API tries to avoid dealing with embedding levels for now. Under the hood +// these will be computed, but the question is to which extent the user should +// know they exist. We should at some point allow the user to specify an +// embedding hierarchy, though. + +// A Direction indicates the overall flow of text. +type Direction int + +const ( + // LeftToRight indicates the text contains no right-to-left characters and + // that either there are some left-to-right characters or the option + // DefaultDirection(LeftToRight) was passed. + LeftToRight Direction = iota + + // RightToLeft indicates the text contains no left-to-right characters and + // that either there are some right-to-left characters or the option + // DefaultDirection(RightToLeft) was passed. + RightToLeft + + // Mixed indicates text contains both left-to-right and right-to-left + // characters. + Mixed + + // Neutral means that text contains no left-to-right and right-to-left + // characters and that no default direction has been set. + Neutral +) + +type options struct{} + +// An Option is an option for Bidi processing. +type Option func(*options) + +// ICU allows the user to define embedding levels. This may be used, for example, +// to use hierarchical structure of markup languages to define embeddings. +// The following option may be a way to expose this functionality in this API. +// // LevelFunc sets a function that associates nesting levels with the given text. +// // The levels function will be called with monotonically increasing values for p. +// func LevelFunc(levels func(p int) int) Option { +// panic("unimplemented") +// } + +// DefaultDirection sets the default direction for a Paragraph. The direction is +// overridden if the text contains directional characters. +func DefaultDirection(d Direction) Option { + panic("unimplemented") +} + +// A Paragraph holds a single Paragraph for Bidi processing. +type Paragraph struct { + // buffers +} + +// SetBytes configures p for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If b contains a paragraph separator +// it will only process the first paragraph and report the number of bytes +// consumed from b including this separator. Error may be non-nil if options are +// given. +func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) { + panic("unimplemented") +} + +// SetString configures p for the given paragraph text. It replaces text +// previously set by SetBytes or SetString. If b contains a paragraph separator +// it will only process the first paragraph and report the number of bytes +// consumed from b including this separator. Error may be non-nil if options are +// given. +func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) { + panic("unimplemented") +} + +// IsLeftToRight reports whether the principle direction of rendering for this +// paragraphs is left-to-right. If this returns false, the principle direction +// of rendering is right-to-left. +func (p *Paragraph) IsLeftToRight() bool { + panic("unimplemented") +} + +// Direction returns the direction of the text of this paragraph. +// +// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. +func (p *Paragraph) Direction() Direction { + panic("unimplemented") +} + +// RunAt reports the Run at the given position of the input text. +// +// This method can be used for computing line breaks on paragraphs. +func (p *Paragraph) RunAt(pos int) Run { + panic("unimplemented") +} + +// Order computes the visual ordering of all the runs in a Paragraph. +func (p *Paragraph) Order() (Ordering, error) { + panic("unimplemented") +} + +// Line computes the visual ordering of runs for a single line starting and +// ending at the given positions in the original text. +func (p *Paragraph) Line(start, end int) (Ordering, error) { + panic("unimplemented") +} + +// An Ordering holds the computed visual order of runs of a Paragraph. Calling +// SetBytes or SetString on the originating Paragraph invalidates an Ordering. +// The methods of an Ordering should only be called by one goroutine at a time. +type Ordering struct{} + +// Direction reports the directionality of the runs. +// +// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral. +func (o *Ordering) Direction() Direction { + panic("unimplemented") +} + +// NumRuns returns the number of runs. +func (o *Ordering) NumRuns() int { + panic("unimplemented") +} + +// Run returns the ith run within the ordering. +func (o *Ordering) Run(i int) Run { + panic("unimplemented") +} + +// TODO: perhaps with options. +// // Reorder creates a reader that reads the runes in visual order per character. +// // Modifiers remain after the runes they modify. +// func (l *Runs) Reorder() io.Reader { +// panic("unimplemented") +// } + +// A Run is a continuous sequence of characters of a single direction. +type Run struct { +} + +// String returns the text of the run in its original order. +func (r *Run) String() string { + panic("unimplemented") +} + +// Bytes returns the text of the run in its original order. +func (r *Run) Bytes() []byte { + panic("unimplemented") +} + +// TODO: methods for +// - Display order +// - headers and footers +// - bracket replacement. + +// Direction reports the direction of the run. +func (r *Run) Direction() Direction { + panic("unimplemented") +} + +// Position of the Run within the text passed to SetBytes or SetString of the +// originating Paragraph value. +func (r *Run) Pos() (start, end int) { + panic("unimplemented") +} + +// AppendReverse reverses the order of characters of in, appends them to out, +// and returns the result. Modifiers will still follow the runes they modify. +// Brackets are replaced with their counterparts. +func AppendReverse(out, in []byte) []byte { + panic("unimplemented") +} + +// ReverseString reverses the order of characters in s and returns a new string. +// Modifiers will still follow the runes they modify. Brackets are replaced with +// their counterparts. +func ReverseString(s string) string { + panic("unimplemented") +} diff --git a/vendor/golang.org/x/text/unicode/bidi/bracket.go b/vendor/golang.org/x/text/unicode/bidi/bracket.go new file mode 100644 index 000000000..601e25920 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/bracket.go @@ -0,0 +1,335 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import ( + "container/list" + "fmt" + "sort" +) + +// This file contains a port of the reference implementation of the +// Bidi Parentheses Algorithm: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java +// +// The implementation in this file covers definitions BD14-BD16 and rule N0 +// of UAX#9. +// +// Some preprocessing is done for each rune before data is passed to this +// algorithm: +// - opening and closing brackets are identified +// - a bracket pair type, like '(' and ')' is assigned a unique identifier that +// is identical for the opening and closing bracket. It is left to do these +// mappings. +// - The BPA algorithm requires that bracket characters that are canonical +// equivalents of each other be able to be substituted for each other. +// It is the responsibility of the caller to do this canonicalization. +// +// In implementing BD16, this implementation departs slightly from the "logical" +// algorithm defined in UAX#9. In particular, the stack referenced there +// supports operations that go beyond a "basic" stack. An equivalent +// implementation based on a linked list is used here. + +// Bidi_Paired_Bracket_Type +// BD14. An opening paired bracket is a character whose +// Bidi_Paired_Bracket_Type property value is Open. +// +// BD15. A closing paired bracket is a character whose +// Bidi_Paired_Bracket_Type property value is Close. +type bracketType byte + +const ( + bpNone bracketType = iota + bpOpen + bpClose +) + +// bracketPair holds a pair of index values for opening and closing bracket +// location of a bracket pair. +type bracketPair struct { + opener int + closer int +} + +func (b *bracketPair) String() string { + return fmt.Sprintf("(%v, %v)", b.opener, b.closer) +} + +// bracketPairs is a slice of bracketPairs with a sort.Interface implementation. +type bracketPairs []bracketPair + +func (b bracketPairs) Len() int { return len(b) } +func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener } + +// resolvePairedBrackets runs the paired bracket part of the UBA algorithm. +// +// For each rune, it takes the indexes into the original string, the class the +// bracket type (in pairTypes) and the bracket identifier (pairValues). It also +// takes the direction type for the start-of-sentence and the embedding level. +// +// The identifiers for bracket types are the rune of the canonicalized opening +// bracket for brackets (open or close) or 0 for runes that are not brackets. +func resolvePairedBrackets(s *isolatingRunSequence) { + p := bracketPairer{ + sos: s.sos, + openers: list.New(), + codesIsolatedRun: s.types, + indexes: s.indexes, + } + dirEmbed := L + if s.level&1 != 0 { + dirEmbed = R + } + p.locateBrackets(s.p.pairTypes, s.p.pairValues) + p.resolveBrackets(dirEmbed, s.p.initialTypes) +} + +type bracketPairer struct { + sos Class // direction corresponding to start of sequence + + // The following is a restatement of BD 16 using non-algorithmic language. + // + // A bracket pair is a pair of characters consisting of an opening + // paired bracket and a closing paired bracket such that the + // Bidi_Paired_Bracket property value of the former equals the latter, + // subject to the following constraints. + // - both characters of a pair occur in the same isolating run sequence + // - the closing character of a pair follows the opening character + // - any bracket character can belong at most to one pair, the earliest possible one + // - any bracket character not part of a pair is treated like an ordinary character + // - pairs may nest properly, but their spans may not overlap otherwise + + // Bracket characters with canonical decompositions are supposed to be + // treated as if they had been normalized, to allow normalized and non- + // normalized text to give the same result. In this implementation that step + // is pushed out to the caller. The caller has to ensure that the pairValue + // slices contain the rune of the opening bracket after normalization for + // any opening or closing bracket. + + openers *list.List // list of positions for opening brackets + + // bracket pair positions sorted by location of opening bracket + pairPositions bracketPairs + + codesIsolatedRun []Class // directional bidi codes for an isolated run + indexes []int // array of index values into the original string + +} + +// matchOpener reports whether characters at given positions form a matching +// bracket pair. +func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool { + return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]] +} + +const maxPairingDepth = 63 + +// locateBrackets locates matching bracket pairs according to BD16. +// +// This implementation uses a linked list instead of a stack, because, while +// elements are added at the front (like a push) they are not generally removed +// in atomic 'pop' operations, reducing the benefit of the stack archetype. +func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) { + // traverse the run + // do that explicitly (not in a for-each) so we can record position + for i, index := range p.indexes { + + // look at the bracket type for each character + if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON { + // continue scanning + continue + } + switch pairTypes[index] { + case bpOpen: + // check if maximum pairing depth reached + if p.openers.Len() == maxPairingDepth { + p.openers.Init() + return + } + // remember opener location, most recent first + p.openers.PushFront(i) + + case bpClose: + // see if there is a match + count := 0 + for elem := p.openers.Front(); elem != nil; elem = elem.Next() { + count++ + opener := elem.Value.(int) + if p.matchOpener(pairValues, opener, i) { + // if the opener matches, add nested pair to the ordered list + p.pairPositions = append(p.pairPositions, bracketPair{opener, i}) + // remove up to and including matched opener + for ; count > 0; count-- { + p.openers.Remove(p.openers.Front()) + } + break + } + } + sort.Sort(p.pairPositions) + // if we get here, the closing bracket matched no openers + // and gets ignored + } + } +} + +// Bracket pairs within an isolating run sequence are processed as units so +// that both the opening and the closing paired bracket in a pair resolve to +// the same direction. +// +// N0. Process bracket pairs in an isolating run sequence sequentially in +// the logical order of the text positions of the opening paired brackets +// using the logic given below. Within this scope, bidirectional types EN +// and AN are treated as R. +// +// Identify the bracket pairs in the current isolating run sequence +// according to BD16. For each bracket-pair element in the list of pairs of +// text positions: +// +// a Inspect the bidirectional types of the characters enclosed within the +// bracket pair. +// +// b If any strong type (either L or R) matching the embedding direction is +// found, set the type for both brackets in the pair to match the embedding +// direction. +// +// o [ e ] o -> o e e e o +// +// o [ o e ] -> o e o e e +// +// o [ NI e ] -> o e NI e e +// +// c Otherwise, if a strong type (opposite the embedding direction) is +// found, test for adjacent strong types as follows: 1 First, check +// backwards before the opening paired bracket until the first strong type +// (L, R, or sos) is found. If that first preceding strong type is opposite +// the embedding direction, then set the type for both brackets in the pair +// to that type. 2 Otherwise, set the type for both brackets in the pair to +// the embedding direction. +// +// o [ o ] e -> o o o o e +// +// o [ o NI ] o -> o o o NI o o +// +// e [ o ] o -> e e o e o +// +// e [ o ] e -> e e o e e +// +// e ( o [ o ] NI ) e -> e e o o o o NI e e +// +// d Otherwise, do not set the type for the current bracket pair. Note that +// if the enclosed text contains no strong types the paired brackets will +// both resolve to the same level when resolved individually using rules N1 +// and N2. +// +// e ( NI ) o -> e ( NI ) o + +// getStrongTypeN0 maps character's directional code to strong type as required +// by rule N0. +// +// TODO: have separate type for "strong" directionality. +func (p *bracketPairer) getStrongTypeN0(index int) Class { + switch p.codesIsolatedRun[index] { + // in the scope of N0, number types are treated as R + case EN, AN, AL, R: + return R + case L: + return L + default: + return ON + } +} + +// classifyPairContent reports the strong types contained inside a Bracket Pair, +// assuming the given embedding direction. +// +// It returns ON if no strong type is found. If a single strong type is found, +// it returns this this type. Otherwise it returns the embedding direction. +// +// TODO: use separate type for "strong" directionality. +func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { + dirOpposite := ON + for i := loc.opener + 1; i < loc.closer; i++ { + dir := p.getStrongTypeN0(i) + if dir == ON { + continue + } + if dir == dirEmbed { + return dir // type matching embedding direction found + } + dirOpposite = dir + } + // return ON if no strong type found, or class opposite to dirEmbed + return dirOpposite +} + +// classBeforePair determines which strong types are present before a Bracket +// Pair. Return R or L if strong type found, otherwise ON. +func (p *bracketPairer) classBeforePair(loc bracketPair) Class { + for i := loc.opener - 1; i >= 0; i-- { + if dir := p.getStrongTypeN0(i); dir != ON { + return dir + } + } + // no strong types found, return sos + return p.sos +} + +// assignBracketType implements rule N0 for a single bracket pair. +func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) { + // rule "N0, a", inspect contents of pair + dirPair := p.classifyPairContent(loc, dirEmbed) + + // dirPair is now L, R, or N (no strong type found) + + // the following logical tests are performed out of order compared to + // the statement of the rules but yield the same results + if dirPair == ON { + return // case "d" - nothing to do + } + + if dirPair != dirEmbed { + // case "c": strong type found, opposite - check before (c.1) + dirPair = p.classBeforePair(loc) + if dirPair == dirEmbed || dirPair == ON { + // no strong opposite type found before - use embedding (c.2) + dirPair = dirEmbed + } + } + // else: case "b", strong type found matching embedding, + // no explicit action needed, as dirPair is already set to embedding + // direction + + // set the bracket types to the type found + p.setBracketsToType(loc, dirPair, initialTypes) +} + +func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) { + p.codesIsolatedRun[loc.opener] = dirPair + p.codesIsolatedRun[loc.closer] = dirPair + + for i := loc.opener + 1; i < loc.closer; i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } + + for i := loc.closer + 1; i < len(p.indexes); i++ { + index := p.indexes[i] + if initialTypes[index] != NSM { + break + } + p.codesIsolatedRun[i] = dirPair + } +} + +// resolveBrackets implements rule N0 for a list of pairs. +func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) { + for _, loc := range p.pairPositions { + p.assignBracketType(loc, dirEmbed, initialTypes) + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go new file mode 100644 index 000000000..ec52f1448 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -0,0 +1,1058 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "log" + +// This implementation is a port based on the reference implementation found at: +// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ +// +// described in Unicode Bidirectional Algorithm (UAX #9). +// +// Input: +// There are two levels of input to the algorithm, since clients may prefer to +// supply some information from out-of-band sources rather than relying on the +// default behavior. +// +// - Bidi class array +// - Bidi class array, with externally supplied base line direction +// +// Output: +// Output is separated into several stages: +// +// - levels array over entire paragraph +// - reordering array over entire paragraph +// - levels array over line +// - reordering array over line +// +// Note that for conformance to the Unicode Bidirectional Algorithm, +// implementations are only required to generate correct reordering and +// character directionality (odd or even levels) over a line. Generating +// identical level arrays over a line is not required. Bidi explicit format +// codes (LRE, RLE, LRO, RLO, PDF) and BN can be assigned arbitrary levels and +// positions as long as the rest of the input is properly reordered. +// +// As the algorithm is defined to operate on a single paragraph at a time, this +// implementation is written to handle single paragraphs. Thus rule P1 is +// presumed by this implementation-- the data provided to the implementation is +// assumed to be a single paragraph, and either contains no 'B' codes, or a +// single 'B' code at the end of the input. 'B' is allowed as input to +// illustrate how the algorithm assigns it a level. +// +// Also note that rules L3 and L4 depend on the rendering engine that uses the +// result of the bidi algorithm. This implementation assumes that the rendering +// engine expects combining marks in visual order (e.g. to the left of their +// base character in RTL runs) and that it adjusts the glyphs used to render +// mirrored characters that are in RTL runs so that they render appropriately. + +// level is the embedding level of a character. Even embedding levels indicate +// left-to-right order and odd levels indicate right-to-left order. The special +// level of -1 is reserved for undefined order. +type level int8 + +const implicitLevel level = -1 + +// in returns if x is equal to any of the values in set. +func (c Class) in(set ...Class) bool { + for _, s := range set { + if c == s { + return true + } + } + return false +} + +// A paragraph contains the state of a paragraph. +type paragraph struct { + initialTypes []Class + + // Arrays of properties needed for paired bracket evaluation in N0 + pairTypes []bracketType // paired Bracket types for paragraph + pairValues []rune // rune for opening bracket or pbOpen and pbClose; 0 for pbNone + + embeddingLevel level // default: = implicitLevel; + + // at the paragraph levels + resultTypes []Class + resultLevels []level + + // Index of matching PDI for isolate initiator characters. For other + // characters, the value of matchingPDI will be set to -1. For isolate + // initiators with no matching PDI, matchingPDI will be set to the length of + // the input string. + matchingPDI []int + + // Index of matching isolate initiator for PDI characters. For other + // characters, and for PDIs with no matching isolate initiator, the value of + // matchingIsolateInitiator will be set to -1. + matchingIsolateInitiator []int +} + +// newParagraph initializes a paragraph. The user needs to supply a few arrays +// corresponding to the preprocessed text input. The types correspond to the +// Unicode BiDi classes for each rune. pairTypes indicates the bracket type for +// each rune. pairValues provides a unique bracket class identifier for each +// rune (suggested is the rune of the open bracket for opening and matching +// close brackets, after normalization). The embedding levels are optional, but +// may be supplied to encode embedding levels of styled text. +// +// TODO: return an error. +func newParagraph(types []Class, pairTypes []bracketType, pairValues []rune, levels level) *paragraph { + validateTypes(types) + validatePbTypes(pairTypes) + validatePbValues(pairValues, pairTypes) + validateParagraphEmbeddingLevel(levels) + + p := ¶graph{ + initialTypes: append([]Class(nil), types...), + embeddingLevel: levels, + + pairTypes: pairTypes, + pairValues: pairValues, + + resultTypes: append([]Class(nil), types...), + } + p.run() + return p +} + +func (p *paragraph) Len() int { return len(p.initialTypes) } + +// The algorithm. Does not include line-based processing (Rules L1, L2). +// These are applied later in the line-based phase of the algorithm. +func (p *paragraph) run() { + p.determineMatchingIsolates() + + // 1) determining the paragraph level + // Rule P1 is the requirement for entering this algorithm. + // Rules P2, P3. + // If no externally supplied paragraph embedding level, use default. + if p.embeddingLevel == implicitLevel { + p.embeddingLevel = p.determineParagraphEmbeddingLevel(0, p.Len()) + } + + // Initialize result levels to paragraph embedding level. + p.resultLevels = make([]level, p.Len()) + setLevels(p.resultLevels, p.embeddingLevel) + + // 2) Explicit levels and directions + // Rules X1-X8. + p.determineExplicitEmbeddingLevels() + + // Rule X9. + // We do not remove the embeddings, the overrides, the PDFs, and the BNs + // from the string explicitly. But they are not copied into isolating run + // sequences when they are created, so they are removed for all + // practical purposes. + + // Rule X10. + // Run remainder of algorithm one isolating run sequence at a time + for _, seq := range p.determineIsolatingRunSequences() { + // 3) resolving weak types + // Rules W1-W7. + seq.resolveWeakTypes() + + // 4a) resolving paired brackets + // Rule N0 + resolvePairedBrackets(seq) + + // 4b) resolving neutral types + // Rules N1-N3. + seq.resolveNeutralTypes() + + // 5) resolving implicit embedding levels + // Rules I1, I2. + seq.resolveImplicitLevels() + + // Apply the computed levels and types + seq.applyLevelsAndTypes() + } + + // Assign appropriate levels to 'hide' LREs, RLEs, LROs, RLOs, PDFs, and + // BNs. This is for convenience, so the resulting level array will have + // a value for every character. + p.assignLevelsToCharactersRemovedByX9() +} + +// determineMatchingIsolates determines the matching PDI for each isolate +// initiator and vice versa. +// +// Definition BD9. +// +// At the end of this function: +// +// - The member variable matchingPDI is set to point to the index of the +// matching PDI character for each isolate initiator character. If there is +// no matching PDI, it is set to the length of the input text. For other +// characters, it is set to -1. +// - The member variable matchingIsolateInitiator is set to point to the +// index of the matching isolate initiator character for each PDI character. +// If there is no matching isolate initiator, or the character is not a PDI, +// it is set to -1. +func (p *paragraph) determineMatchingIsolates() { + p.matchingPDI = make([]int, p.Len()) + p.matchingIsolateInitiator = make([]int, p.Len()) + + for i := range p.matchingIsolateInitiator { + p.matchingIsolateInitiator[i] = -1 + } + + for i := range p.matchingPDI { + p.matchingPDI[i] = -1 + + if t := p.resultTypes[i]; t.in(LRI, RLI, FSI) { + depthCounter := 1 + for j := i + 1; j < p.Len(); j++ { + if u := p.resultTypes[j]; u.in(LRI, RLI, FSI) { + depthCounter++ + } else if u == PDI { + if depthCounter--; depthCounter == 0 { + p.matchingPDI[i] = j + p.matchingIsolateInitiator[j] = i + break + } + } + } + if p.matchingPDI[i] == -1 { + p.matchingPDI[i] = p.Len() + } + } + } +} + +// determineParagraphEmbeddingLevel reports the resolved paragraph direction of +// the substring limited by the given range [start, end). +// +// Determines the paragraph level based on rules P2, P3. This is also used +// in rule X5c to find if an FSI should resolve to LRI or RLI. +func (p *paragraph) determineParagraphEmbeddingLevel(start, end int) level { + var strongType Class = unknownClass + + // Rule P2. + for i := start; i < end; i++ { + if t := p.resultTypes[i]; t.in(L, AL, R) { + strongType = t + break + } else if t.in(FSI, LRI, RLI) { + i = p.matchingPDI[i] // skip over to the matching PDI + if i > end { + log.Panic("assert (i <= end)") + } + } + } + // Rule P3. + switch strongType { + case unknownClass: // none found + // default embedding level when no strong types found is 0. + return 0 + case L: + return 0 + default: // AL, R + return 1 + } +} + +const maxDepth = 125 + +// This stack will store the embedding levels and override and isolated +// statuses +type directionalStatusStack struct { + stackCounter int + embeddingLevelStack [maxDepth + 1]level + overrideStatusStack [maxDepth + 1]Class + isolateStatusStack [maxDepth + 1]bool +} + +func (s *directionalStatusStack) empty() { s.stackCounter = 0 } +func (s *directionalStatusStack) pop() { s.stackCounter-- } +func (s *directionalStatusStack) depth() int { return s.stackCounter } + +func (s *directionalStatusStack) push(level level, overrideStatus Class, isolateStatus bool) { + s.embeddingLevelStack[s.stackCounter] = level + s.overrideStatusStack[s.stackCounter] = overrideStatus + s.isolateStatusStack[s.stackCounter] = isolateStatus + s.stackCounter++ +} + +func (s *directionalStatusStack) lastEmbeddingLevel() level { + return s.embeddingLevelStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalOverrideStatus() Class { + return s.overrideStatusStack[s.stackCounter-1] +} + +func (s *directionalStatusStack) lastDirectionalIsolateStatus() bool { + return s.isolateStatusStack[s.stackCounter-1] +} + +// Determine explicit levels using rules X1 - X8 +func (p *paragraph) determineExplicitEmbeddingLevels() { + var stack directionalStatusStack + var overflowIsolateCount, overflowEmbeddingCount, validIsolateCount int + + // Rule X1. + stack.push(p.embeddingLevel, ON, false) + + for i, t := range p.resultTypes { + // Rules X2, X3, X4, X5, X5a, X5b, X5c + switch t { + case RLE, LRE, RLO, LRO, RLI, LRI, FSI: + isIsolate := t.in(RLI, LRI, FSI) + isRTL := t.in(RLE, RLO, RLI) + + // override if this is an FSI that resolves to RLI + if t == FSI { + isRTL = (p.determineParagraphEmbeddingLevel(i+1, p.matchingPDI[i]) == 1) + } + if isIsolate { + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + + var newLevel level + if isRTL { + // least greater odd + newLevel = (stack.lastEmbeddingLevel() + 1) | 1 + } else { + // least greater even + newLevel = (stack.lastEmbeddingLevel() + 2) &^ 1 + } + + if newLevel <= maxDepth && overflowIsolateCount == 0 && overflowEmbeddingCount == 0 { + if isIsolate { + validIsolateCount++ + } + // Push new embedding level, override status, and isolated + // status. + // No check for valid stack counter, since the level check + // suffices. + switch t { + case LRO: + stack.push(newLevel, L, isIsolate) + case RLO: + stack.push(newLevel, R, isIsolate) + default: + stack.push(newLevel, ON, isIsolate) + } + // Not really part of the spec + if !isIsolate { + p.resultLevels[i] = newLevel + } + } else { + // This is an invalid explicit formatting character, + // so apply the "Otherwise" part of rules X2-X5b. + if isIsolate { + overflowIsolateCount++ + } else { // !isIsolate + if overflowIsolateCount == 0 { + overflowEmbeddingCount++ + } + } + } + + // Rule X6a + case PDI: + if overflowIsolateCount > 0 { + overflowIsolateCount-- + } else if validIsolateCount == 0 { + // do nothing + } else { + overflowEmbeddingCount = 0 + for !stack.lastDirectionalIsolateStatus() { + stack.pop() + } + stack.pop() + validIsolateCount-- + } + p.resultLevels[i] = stack.lastEmbeddingLevel() + + // Rule X7 + case PDF: + // Not really part of the spec + p.resultLevels[i] = stack.lastEmbeddingLevel() + + if overflowIsolateCount > 0 { + // do nothing + } else if overflowEmbeddingCount > 0 { + overflowEmbeddingCount-- + } else if !stack.lastDirectionalIsolateStatus() && stack.depth() >= 2 { + stack.pop() + } + + case B: // paragraph separator. + // Rule X8. + + // These values are reset for clarity, in this implementation B + // can only occur as the last code in the array. + stack.empty() + overflowIsolateCount = 0 + overflowEmbeddingCount = 0 + validIsolateCount = 0 + p.resultLevels[i] = p.embeddingLevel + + default: + p.resultLevels[i] = stack.lastEmbeddingLevel() + if stack.lastDirectionalOverrideStatus() != ON { + p.resultTypes[i] = stack.lastDirectionalOverrideStatus() + } + } + } +} + +type isolatingRunSequence struct { + p *paragraph + + indexes []int // indexes to the original string + + types []Class // type of each character using the index + resolvedLevels []level // resolved levels after application of rules + level level + sos, eos Class +} + +func (i *isolatingRunSequence) Len() int { return len(i.indexes) } + +func maxLevel(a, b level) level { + if a > b { + return a + } + return b +} + +// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, +// either L or R, for each isolating run sequence. +func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { + length := len(indexes) + types := make([]Class, length) + for i, x := range indexes { + types[i] = p.resultTypes[x] + } + + // assign level, sos and eos + prevChar := indexes[0] - 1 + for prevChar >= 0 && isRemovedByX9(p.initialTypes[prevChar]) { + prevChar-- + } + prevLevel := p.embeddingLevel + if prevChar >= 0 { + prevLevel = p.resultLevels[prevChar] + } + + var succLevel level + lastType := types[length-1] + if lastType.in(LRI, RLI, FSI) { + succLevel = p.embeddingLevel + } else { + // the first character after the end of run sequence + limit := indexes[length-1] + 1 + for ; limit < p.Len() && isRemovedByX9(p.initialTypes[limit]); limit++ { + + } + succLevel = p.embeddingLevel + if limit < p.Len() { + succLevel = p.resultLevels[limit] + } + } + level := p.resultLevels[indexes[0]] + return &isolatingRunSequence{ + p: p, + indexes: indexes, + types: types, + level: level, + sos: typeForLevel(maxLevel(prevLevel, level)), + eos: typeForLevel(maxLevel(succLevel, level)), + } +} + +// Resolving weak types Rules W1-W7. +// +// Note that some weak types (EN, AN) remain after this processing is +// complete. +func (s *isolatingRunSequence) resolveWeakTypes() { + + // on entry, only these types remain + s.assertOnly(L, R, AL, EN, ES, ET, AN, CS, B, S, WS, ON, NSM, LRI, RLI, FSI, PDI) + + // Rule W1. + // Changes all NSMs. + preceedingCharacterType := s.sos + for i, t := range s.types { + if t == NSM { + s.types[i] = preceedingCharacterType + } else { + if t.in(LRI, RLI, FSI, PDI) { + preceedingCharacterType = ON + } + preceedingCharacterType = t + } + } + + // Rule W2. + // EN does not change at the start of the run, because sos != AL. + for i, t := range s.types { + if t == EN { + for j := i - 1; j >= 0; j-- { + if t := s.types[j]; t.in(L, R, AL) { + if t == AL { + s.types[i] = AN + } + break + } + } + } + } + + // Rule W3. + for i, t := range s.types { + if t == AL { + s.types[i] = R + } + } + + // Rule W4. + // Since there must be values on both sides for this rule to have an + // effect, the scan skips the first and last value. + // + // Although the scan proceeds left to right, and changes the type + // values in a way that would appear to affect the computations + // later in the scan, there is actually no problem. A change in the + // current value can only affect the value to its immediate right, + // and only affect it if it is ES or CS. But the current value can + // only change if the value to its right is not ES or CS. Thus + // either the current value will not change, or its change will have + // no effect on the remainder of the analysis. + + for i := 1; i < s.Len()-1; i++ { + t := s.types[i] + if t == ES || t == CS { + prevSepType := s.types[i-1] + succSepType := s.types[i+1] + if prevSepType == EN && succSepType == EN { + s.types[i] = EN + } else if s.types[i] == CS && prevSepType == AN && succSepType == AN { + s.types[i] = AN + } + } + } + + // Rule W5. + for i, t := range s.types { + if t == ET { + // locate end of sequence + runStart := i + runEnd := s.findRunLimit(runStart, ET) + + // check values at ends of sequence + t := s.sos + if runStart > 0 { + t = s.types[runStart-1] + } + if t != EN { + t = s.eos + if runEnd < len(s.types) { + t = s.types[runEnd] + } + } + if t == EN { + setTypes(s.types[runStart:runEnd], EN) + } + // continue at end of sequence + i = runEnd + } + } + + // Rule W6. + for i, t := range s.types { + if t.in(ES, ET, CS) { + s.types[i] = ON + } + } + + // Rule W7. + for i, t := range s.types { + if t == EN { + // set default if we reach start of run + prevStrongType := s.sos + for j := i - 1; j >= 0; j-- { + t = s.types[j] + if t == L || t == R { // AL's have been changed to R + prevStrongType = t + break + } + } + if prevStrongType == L { + s.types[i] = L + } + } + } +} + +// 6) resolving neutral types Rules N1-N2. +func (s *isolatingRunSequence) resolveNeutralTypes() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN, B, S, WS, ON, RLI, LRI, FSI, PDI) + + for i, t := range s.types { + switch t { + case WS, ON, B, S, RLI, LRI, FSI, PDI: + // find bounds of run of neutrals + runStart := i + runEnd := s.findRunLimit(runStart, B, S, WS, ON, RLI, LRI, FSI, PDI) + + // determine effective types at ends of run + var leadType, trailType Class + + // Note that the character found can only be L, R, AN, or + // EN. + if runStart == 0 { + leadType = s.sos + } else { + leadType = s.types[runStart-1] + if leadType.in(AN, EN) { + leadType = R + } + } + if runEnd == len(s.types) { + trailType = s.eos + } else { + trailType = s.types[runEnd] + if trailType.in(AN, EN) { + trailType = R + } + } + + var resolvedType Class + if leadType == trailType { + // Rule N1. + resolvedType = leadType + } else { + // Rule N2. + // Notice the embedding level of the run is used, not + // the paragraph embedding level. + resolvedType = typeForLevel(s.level) + } + + setTypes(s.types[runStart:runEnd], resolvedType) + + // skip over run of (former) neutrals + i = runEnd + } + } +} + +func setLevels(levels []level, newLevel level) { + for i := range levels { + levels[i] = newLevel + } +} + +func setTypes(types []Class, newType Class) { + for i := range types { + types[i] = newType + } +} + +// 7) resolving implicit embedding levels Rules I1, I2. +func (s *isolatingRunSequence) resolveImplicitLevels() { + + // on entry, only these types can be in resultTypes + s.assertOnly(L, R, EN, AN) + + s.resolvedLevels = make([]level, len(s.types)) + setLevels(s.resolvedLevels, s.level) + + if (s.level & 1) == 0 { // even level + for i, t := range s.types { + // Rule I1. + if t == L { + // no change + } else if t == R { + s.resolvedLevels[i] += 1 + } else { // t == AN || t == EN + s.resolvedLevels[i] += 2 + } + } + } else { // odd level + for i, t := range s.types { + // Rule I2. + if t == R { + // no change + } else { // t == L || t == AN || t == EN + s.resolvedLevels[i] += 1 + } + } + } +} + +// Applies the levels and types resolved in rules W1-I2 to the +// resultLevels array. +func (s *isolatingRunSequence) applyLevelsAndTypes() { + for i, x := range s.indexes { + s.p.resultTypes[x] = s.types[i] + s.p.resultLevels[x] = s.resolvedLevels[i] + } +} + +// Return the limit of the run consisting only of the types in validSet +// starting at index. This checks the value at index, and will return +// index if that value is not in validSet. +func (s *isolatingRunSequence) findRunLimit(index int, validSet ...Class) int { +loop: + for ; index < len(s.types); index++ { + t := s.types[index] + for _, valid := range validSet { + if t == valid { + continue loop + } + } + return index // didn't find a match in validSet + } + return len(s.types) +} + +// Algorithm validation. Assert that all values in types are in the +// provided set. +func (s *isolatingRunSequence) assertOnly(codes ...Class) { +loop: + for i, t := range s.types { + for _, c := range codes { + if t == c { + continue loop + } + } + log.Panicf("invalid bidi code %s present in assertOnly at position %d", t, s.indexes[i]) + } +} + +// determineLevelRuns returns an array of level runs. Each level run is +// described as an array of indexes into the input string. +// +// Determines the level runs. Rule X9 will be applied in determining the +// runs, in the way that makes sure the characters that are supposed to be +// removed are not included in the runs. +func (p *paragraph) determineLevelRuns() [][]int { + run := []int{} + allRuns := [][]int{} + currentLevel := implicitLevel + + for i := range p.initialTypes { + if !isRemovedByX9(p.initialTypes[i]) { + if p.resultLevels[i] != currentLevel { + // we just encountered a new run; wrap up last run + if currentLevel >= 0 { // only wrap it up if there was a run + allRuns = append(allRuns, run) + run = nil + } + // Start new run + currentLevel = p.resultLevels[i] + } + run = append(run, i) + } + } + // Wrap up the final run, if any + if len(run) > 0 { + allRuns = append(allRuns, run) + } + return allRuns +} + +// Definition BD13. Determine isolating run sequences. +func (p *paragraph) determineIsolatingRunSequences() []*isolatingRunSequence { + levelRuns := p.determineLevelRuns() + + // Compute the run that each character belongs to + runForCharacter := make([]int, p.Len()) + for i, run := range levelRuns { + for _, index := range run { + runForCharacter[index] = i + } + } + + sequences := []*isolatingRunSequence{} + + var currentRunSequence []int + + for _, run := range levelRuns { + first := run[0] + if p.initialTypes[first] != PDI || p.matchingIsolateInitiator[first] == -1 { + currentRunSequence = nil + // int run = i; + for { + // Copy this level run into currentRunSequence + currentRunSequence = append(currentRunSequence, run...) + + last := currentRunSequence[len(currentRunSequence)-1] + lastT := p.initialTypes[last] + if lastT.in(LRI, RLI, FSI) && p.matchingPDI[last] != p.Len() { + run = levelRuns[runForCharacter[p.matchingPDI[last]]] + } else { + break + } + } + sequences = append(sequences, p.isolatingRunSequence(currentRunSequence)) + } + } + return sequences +} + +// Assign level information to characters removed by rule X9. This is for +// ease of relating the level information to the original input data. Note +// that the levels assigned to these codes are arbitrary, they're chosen so +// as to avoid breaking level runs. +func (p *paragraph) assignLevelsToCharactersRemovedByX9() { + for i, t := range p.initialTypes { + if t.in(LRE, RLE, LRO, RLO, PDF, BN) { + p.resultTypes[i] = t + p.resultLevels[i] = -1 + } + } + // now propagate forward the levels information (could have + // propagated backward, the main thing is not to introduce a level + // break where one doesn't already exist). + + if p.resultLevels[0] == -1 { + p.resultLevels[0] = p.embeddingLevel + } + for i := 1; i < len(p.initialTypes); i++ { + if p.resultLevels[i] == -1 { + p.resultLevels[i] = p.resultLevels[i-1] + } + } + // Embedding information is for informational purposes only so need not be + // adjusted. +} + +// +// Output +// + +// getLevels computes levels array breaking lines at offsets in linebreaks. +// Rule L1. +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getLevels(linebreaks []int) []level { + // Note that since the previous processing has removed all + // P, S, and WS values from resultTypes, the values referred to + // in these rules are the initial types, before any processing + // has been applied (including processing of overrides). + // + // This example implementation has reinserted explicit format codes + // and BN, in order that the levels array correspond to the + // initial text. Their final placement is not normative. + // These codes are treated like WS in this implementation, + // so they don't interrupt sequences of WS. + + validateLineBreaks(linebreaks, p.Len()) + + result := append([]level(nil), p.resultLevels...) + + // don't worry about linebreaks since if there is a break within + // a series of WS values preceding S, the linebreak itself + // causes the reset. + for i, t := range p.initialTypes { + if t.in(B, S) { + // Rule L1, clauses one and two. + result[i] = p.embeddingLevel + + // Rule L1, clause three. + for j := i - 1; j >= 0; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + } + } + + // Rule L1, clause four. + start := 0 + for _, limit := range linebreaks { + for j := limit - 1; j >= start; j-- { + if isWhitespace(p.initialTypes[j]) { // including format codes + result[j] = p.embeddingLevel + } else { + break + } + } + start = limit + } + + return result +} + +// getReordering returns the reordering of lines from a visual index to a +// logical index for line breaks at the given offsets. +// +// Lines are concatenated from left to right. So for example, the fifth +// character from the left on the third line is +// +// getReordering(linebreaks)[linebreaks[1] + 4] +// +// (linebreaks[1] is the position after the last character of the second +// line, which is also the index of the first character on the third line, +// and adding four gets the fifth character from the left). +// +// The linebreaks array must include at least one value. The values must be +// in strictly increasing order (no duplicates) between 1 and the length of +// the text, inclusive. The last value must be the length of the text. +func (p *paragraph) getReordering(linebreaks []int) []int { + validateLineBreaks(linebreaks, p.Len()) + + return computeMultilineReordering(p.getLevels(linebreaks), linebreaks) +} + +// Return multiline reordering array for a given level array. Reordering +// does not occur across a line break. +func computeMultilineReordering(levels []level, linebreaks []int) []int { + result := make([]int, len(levels)) + + start := 0 + for _, limit := range linebreaks { + tempLevels := make([]level, limit-start) + copy(tempLevels, levels[start:]) + + for j, order := range computeReordering(tempLevels) { + result[start+j] = order + start + } + start = limit + } + return result +} + +// Return reordering array for a given level array. This reorders a single +// line. The reordering is a visual to logical map. For example, the +// leftmost char is string.charAt(order[0]). Rule L2. +func computeReordering(levels []level) []int { + result := make([]int, len(levels)) + // initialize order + for i := range result { + result[i] = i + } + + // locate highest level found on line. + // Note the rules say text, but no reordering across line bounds is + // performed, so this is sufficient. + highestLevel := level(0) + lowestOddLevel := level(maxDepth + 2) + for _, level := range levels { + if level > highestLevel { + highestLevel = level + } + if level&1 != 0 && level < lowestOddLevel { + lowestOddLevel = level + } + } + + for level := highestLevel; level >= lowestOddLevel; level-- { + for i := 0; i < len(levels); i++ { + if levels[i] >= level { + // find range of text at or above this level + start := i + limit := i + 1 + for limit < len(levels) && levels[limit] >= level { + limit++ + } + + for j, k := start, limit-1; j < k; j, k = j+1, k-1 { + result[j], result[k] = result[k], result[j] + } + // skip to end of level run + i = limit + } + } + } + + return result +} + +// isWhitespace reports whether the type is considered a whitespace type for the +// line break rules. +func isWhitespace(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, LRI, RLI, FSI, PDI, BN, WS: + return true + } + return false +} + +// isRemovedByX9 reports whether the type is one of the types removed in X9. +func isRemovedByX9(c Class) bool { + switch c { + case LRE, RLE, LRO, RLO, PDF, BN: + return true + } + return false +} + +// typeForLevel reports the strong type (L or R) corresponding to the level. +func typeForLevel(level level) Class { + if (level & 0x1) == 0 { + return L + } + return R +} + +// TODO: change validation to not panic + +func validateTypes(types []Class) { + if len(types) == 0 { + log.Panic("types is null") + } + for i, t := range types[:len(types)-1] { + if t == B { + log.Panicf("B type before end of paragraph at index: %d", i) + } + } +} + +func validateParagraphEmbeddingLevel(embeddingLevel level) { + if embeddingLevel != implicitLevel && + embeddingLevel != 0 && + embeddingLevel != 1 { + log.Panicf("illegal paragraph embedding level: %d", embeddingLevel) + } +} + +func validateLineBreaks(linebreaks []int, textLength int) { + prev := 0 + for i, next := range linebreaks { + if next <= prev { + log.Panicf("bad linebreak: %d at index: %d", next, i) + } + prev = next + } + if prev != textLength { + log.Panicf("last linebreak was %d, want %d", prev, textLength) + } +} + +func validatePbTypes(pairTypes []bracketType) { + if len(pairTypes) == 0 { + log.Panic("pairTypes is null") + } + for i, pt := range pairTypes { + switch pt { + case bpNone, bpOpen, bpClose: + default: + log.Panicf("illegal pairType value at %d: %v", i, pairTypes[i]) + } + } +} + +func validatePbValues(pairValues []rune, pairTypes []bracketType) { + if pairValues == nil { + log.Panic("pairValues is null") + } + if len(pairTypes) != len(pairValues) { + log.Panic("pairTypes is different length from pairValues") + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go new file mode 100644 index 000000000..040f3013d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen.go @@ -0,0 +1,133 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" +) + +var outputFile = flag.String("out", "tables.go", "output file") + +func main() { + gen.Init() + gen.Repackage("gen_trieval.go", "trieval.go", "bidi") + gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") + + genTables() +} + +// bidiClass names and codes taken from class "bc" in +// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt +var bidiClass = map[string]Class{ + "AL": AL, // ArabicLetter + "AN": AN, // ArabicNumber + "B": B, // ParagraphSeparator + "BN": BN, // BoundaryNeutral + "CS": CS, // CommonSeparator + "EN": EN, // EuropeanNumber + "ES": ES, // EuropeanSeparator + "ET": ET, // EuropeanTerminator + "L": L, // LeftToRight + "NSM": NSM, // NonspacingMark + "ON": ON, // OtherNeutral + "R": R, // RightToLeft + "S": S, // SegmentSeparator + "WS": WS, // WhiteSpace + + "FSI": Control, + "PDF": Control, + "PDI": Control, + "LRE": Control, + "LRI": Control, + "LRO": Control, + "RLE": Control, + "RLI": Control, + "RLO": Control, +} + +func genTables() { + if numClass > 0x0F { + log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) + } + w := gen.NewCodeWriter() + defer w.WriteGoFile(*outputFile, "bidi") + + gen.WriteUnicodeVersion(w) + + t := triegen.NewTrie("bidi") + + // Build data about bracket mapping. These bits need to be or-ed with + // any other bits. + orMask := map[rune]uint64{} + + xorMap := map[rune]int{} + xorMasks := []rune{0} // First value is no-op. + + ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { + r1 := p.Rune(0) + r2 := p.Rune(1) + xor := r1 ^ r2 + if _, ok := xorMap[xor]; !ok { + xorMap[xor] = len(xorMasks) + xorMasks = append(xorMasks, xor) + } + entry := uint64(xorMap[xor]) << xorMaskShift + switch p.String(2) { + case "o": + entry |= openMask + case "c", "n": + default: + log.Fatalf("Unknown bracket class %q.", p.String(2)) + } + orMask[r1] = entry + }) + + w.WriteComment(` + xorMasks contains masks to be xor-ed with brackets to get the reverse + version.`) + w.WriteVar("xorMasks", xorMasks) + + done := map[rune]bool{} + + insert := func(r rune, c Class) { + if !done[r] { + t.Insert(r, orMask[r]|uint64(c)) + done[r] = true + } + } + + // Insert the derived BiDi properties. + ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + class, ok := bidiClass[p.String(1)] + if !ok { + log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) + } + insert(r, class) + }) + visitDefaults(insert) + + // TODO: use sparse blocks. This would reduce table size considerably + // from the looks of it. + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} + +// dummy values to make methods in gen_common compile. The real versions +// will be generated by this file to tables.go. +var ( + xorMasks []rune +) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go new file mode 100644 index 000000000..51bd68fa7 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "unicode" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/rangetable" +) + +// These tables are hand-extracted from: +// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt +func visitDefaults(fn func(r rune, c Class)) { + // first write default values for ranges listed above. + visitRunes(fn, AL, []rune{ + 0x0600, 0x07BF, // Arabic + 0x08A0, 0x08FF, // Arabic Extended-A + 0xFB50, 0xFDCF, // Arabic Presentation Forms + 0xFDF0, 0xFDFF, + 0xFE70, 0xFEFF, + 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols + }) + visitRunes(fn, R, []rune{ + 0x0590, 0x05FF, // Hebrew + 0x07C0, 0x089F, // Nko et al. + 0xFB1D, 0xFB4F, + 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. + 0x0001E800, 0x0001EDFF, + 0x0001EF00, 0x0001EFFF, + }) + visitRunes(fn, ET, []rune{ // European Terminator + 0x20A0, 0x20Cf, // Currency symbols + }) + rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { + fn(r, BN) // Boundary Neutral + }) + ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { + if p.String(1) == "Default_Ignorable_Code_Point" { + fn(p.Rune(0), BN) // Boundary Neutral + } + }) +} + +func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { + for i := 0; i < len(runes); i += 2 { + lo, hi := runes[i], runes[i+1] + for j := lo; j <= hi; j++ { + fn(j, c) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go new file mode 100644 index 000000000..9cb994289 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go @@ -0,0 +1,64 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/unicode/bidi/prop.go b/vendor/golang.org/x/text/unicode/bidi/prop.go new file mode 100644 index 000000000..7c9484e1f --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/prop.go @@ -0,0 +1,206 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bidi + +import "unicode/utf8" + +// Properties provides access to BiDi properties of runes. +type Properties struct { + entry uint8 + last uint8 +} + +var trie = newBidiTrie(0) + +// TODO: using this for bidirule reduces the running time by about 5%. Consider +// if this is worth exposing or if we can find a way to speed up the Class +// method. +// +// // CompactClass is like Class, but maps all of the BiDi control classes +// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control. +// func (p Properties) CompactClass() Class { +// return Class(p.entry & 0x0F) +// } + +// Class returns the Bidi class for p. +func (p Properties) Class() Class { + c := Class(p.entry & 0x0F) + if c == Control { + c = controlByteToClass[p.last&0xF] + } + return c +} + +// IsBracket reports whether the rune is a bracket. +func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 } + +// IsOpeningBracket reports whether the rune is an opening bracket. +// IsBracket must return true. +func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 } + +// TODO: find a better API and expose. +func (p Properties) reverseBracket(r rune) rune { + return xorMasks[p.entry>>xorMaskShift] ^ r +} + +var controlByteToClass = [16]Class{ + 0xD: LRO, // U+202D LeftToRightOverride, + 0xE: RLO, // U+202E RightToLeftOverride, + 0xA: LRE, // U+202A LeftToRightEmbedding, + 0xB: RLE, // U+202B RightToLeftEmbedding, + 0xC: PDF, // U+202C PopDirectionalFormat, + 0x6: LRI, // U+2066 LeftToRightIsolate, + 0x7: RLI, // U+2067 RightToLeftIsolate, + 0x8: FSI, // U+2068 FirstStrongIsolate, + 0x9: PDI, // U+2069 PopDirectionalIsolate, +} + +// LookupRune returns properties for r. +func LookupRune(r rune) (p Properties, size int) { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + return Lookup(buf[:n]) +} + +// TODO: these lookup methods are based on the generated trie code. The returned +// sizes have slightly different semantics from the generated code, in that it +// always returns size==1 for an illegal UTF-8 byte (instead of the length +// of the maximum invalid subsequence). Most Transformers, like unicode/norm, +// leave invalid UTF-8 untouched, in which case it has performance benefits to +// do so (without changing the semantics). Bidi requires the semantics used here +// for the bidirule implementation to be compatible with the Go semantics. +// They ultimately should perhaps be adopted by all trie implementations, for +// convenience sake. +// This unrolled code also boosts performance of the secure/bidirule package by +// about 30%. +// So, to remove this code: +// - add option to trie generator to define return type. +// - always return 1 byte size for ill-formed UTF-8 runes. + +// Lookup returns properties for the first rune in s and the width in bytes of +// its encoding. The size will be 0 if s does not hold enough bytes to complete +// the encoding. +func Lookup(s []byte) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} + +// LookupString returns properties for the first rune in s and the width in +// bytes of its encoding. The size will be 0 if s does not hold enough bytes to +// complete the encoding. +func LookupString(s string) (p Properties, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return Properties{entry: bidiValues[c0]}, 1 + case c0 < 0xC2: + return Properties{}, 1 + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return Properties{}, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return Properties{}, 1 + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return Properties{}, 1 + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return Properties{}, 1 + } + return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4 + } + // Illegal rune + return Properties{}, 1 +} diff --git a/vendor/golang.org/x/text/unicode/bidi/tables.go b/vendor/golang.org/x/text/unicode/bidi/tables.go new file mode 100644 index 000000000..2d4dde07c --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables.go @@ -0,0 +1,1779 @@ +// This file was generated by go generate; DO NOT EDIT + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 15744 bytes (15.38 KiB). Checksum: b4c3b70954803b86. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 222 blocks, 14208 entries, 14208 bytes +// The third block is the zero block. +var bidiValues = [14208]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x0001, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x0001, 0x5e1: 0x0001, 0x5e2: 0x0001, 0x5e3: 0x0001, + 0x5e4: 0x0001, 0x5e5: 0x0001, 0x5e6: 0x0001, 0x5e7: 0x0001, 0x5e8: 0x0001, 0x5e9: 0x0001, + 0x5ea: 0x0001, 0x5eb: 0x0001, 0x5ec: 0x0001, 0x5ed: 0x0001, 0x5ee: 0x0001, 0x5ef: 0x0001, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000d, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa01: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, 0x16e7: 0x000a, 0x16e8: 0x000a, 0x16e9: 0x000a, + 0x16ea: 0x000a, 0x16eb: 0x000a, 0x16ec: 0x000a, 0x16ed: 0x000a, 0x16ee: 0x000a, 0x16ef: 0x000a, + 0x16f0: 0x000a, 0x16f1: 0x000a, 0x16f2: 0x000a, 0x16f3: 0x000a, 0x16f4: 0x000a, 0x16f5: 0x000a, + 0x16f6: 0x000a, 0x16f7: 0x000a, 0x16f8: 0x000a, 0x16f9: 0x000a, 0x16fa: 0x000a, 0x16fb: 0x000a, + 0x16fc: 0x000a, 0x16fd: 0x000a, 0x16fe: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, 0x170b: 0x000a, + 0x170c: 0x000a, 0x170d: 0x000a, 0x170e: 0x000a, 0x170f: 0x000a, 0x1710: 0x000a, 0x1711: 0x000a, + 0x1712: 0x000a, 0x1713: 0x000a, 0x1714: 0x000a, 0x1715: 0x000a, 0x1716: 0x000a, 0x1717: 0x000a, + 0x1718: 0x000a, 0x1719: 0x000a, 0x171a: 0x000a, 0x171b: 0x000a, 0x171c: 0x000a, 0x171d: 0x000a, + 0x171e: 0x000a, 0x171f: 0x000a, 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x000a, 0x1749: 0x000a, 0x174a: 0x000a, + 0x1760: 0x000a, 0x1761: 0x000a, 0x1762: 0x000a, 0x1763: 0x000a, + 0x1764: 0x000a, 0x1765: 0x000a, 0x1766: 0x000a, 0x1767: 0x000a, 0x1768: 0x000a, 0x1769: 0x000a, + 0x176a: 0x000a, 0x176b: 0x000a, 0x176c: 0x000a, 0x176d: 0x000a, 0x176e: 0x000a, 0x176f: 0x000a, + 0x1770: 0x000a, 0x1771: 0x000a, 0x1772: 0x000a, 0x1773: 0x000a, 0x1774: 0x000a, 0x1775: 0x000a, + 0x1776: 0x000a, 0x1777: 0x000a, 0x1778: 0x000a, 0x1779: 0x000a, 0x177a: 0x000a, 0x177b: 0x000a, + 0x177c: 0x000a, 0x177d: 0x000a, 0x177e: 0x000a, 0x177f: 0x000a, + // Block 0x5e, offset 0x1780 + 0x1780: 0x000a, 0x1781: 0x000a, 0x1782: 0x000a, 0x1783: 0x000a, 0x1784: 0x000a, 0x1785: 0x000a, + 0x1786: 0x000a, 0x1787: 0x000a, 0x1788: 0x0002, 0x1789: 0x0002, 0x178a: 0x0002, 0x178b: 0x0002, + 0x178c: 0x0002, 0x178d: 0x0002, 0x178e: 0x0002, 0x178f: 0x0002, 0x1790: 0x0002, 0x1791: 0x0002, + 0x1792: 0x0002, 0x1793: 0x0002, 0x1794: 0x0002, 0x1795: 0x0002, 0x1796: 0x0002, 0x1797: 0x0002, + 0x1798: 0x0002, 0x1799: 0x0002, 0x179a: 0x0002, 0x179b: 0x0002, + // Block 0x5f, offset 0x17c0 + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ec: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x000a, 0x1829: 0x000a, + 0x182a: 0x000a, 0x182b: 0x000a, 0x182d: 0x000a, 0x182e: 0x000a, 0x182f: 0x000a, + 0x1830: 0x000a, 0x1831: 0x000a, 0x1832: 0x000a, 0x1833: 0x000a, 0x1834: 0x000a, 0x1835: 0x000a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x000a, + 0x1846: 0x000a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x000a, 0x1867: 0x000a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x003a, 0x1871: 0x002a, 0x1872: 0x003a, 0x1873: 0x002a, 0x1874: 0x003a, 0x1875: 0x002a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x000a, 0x1884: 0x000a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x000a, 0x1888: 0x000a, 0x1889: 0x000a, 0x188a: 0x000a, 0x188b: 0x000a, + 0x188c: 0x000a, 0x188d: 0x000a, 0x188e: 0x000a, 0x188f: 0x000a, 0x1890: 0x000a, 0x1891: 0x000a, + 0x1892: 0x000a, 0x1893: 0x000a, 0x1894: 0x000a, 0x1895: 0x000a, 0x1896: 0x000a, 0x1897: 0x000a, + 0x1898: 0x000a, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x003a, 0x18a7: 0x002a, 0x18a8: 0x003a, 0x18a9: 0x002a, + 0x18aa: 0x003a, 0x18ab: 0x002a, 0x18ac: 0x003a, 0x18ad: 0x002a, 0x18ae: 0x003a, 0x18af: 0x002a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x007a, 0x18c4: 0x006a, 0x18c5: 0x009a, + 0x18c6: 0x008a, 0x18c7: 0x00ba, 0x18c8: 0x00aa, 0x18c9: 0x009a, 0x18ca: 0x008a, 0x18cb: 0x007a, + 0x18cc: 0x006a, 0x18cd: 0x00da, 0x18ce: 0x002a, 0x18cf: 0x003a, 0x18d0: 0x00ca, 0x18d1: 0x009a, + 0x18d2: 0x008a, 0x18d3: 0x007a, 0x18d4: 0x006a, 0x18d5: 0x009a, 0x18d6: 0x008a, 0x18d7: 0x00ba, + 0x18d8: 0x00aa, 0x18d9: 0x000a, 0x18da: 0x000a, 0x18db: 0x000a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x000a, 0x18fd: 0x000a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x003a, 0x1919: 0x002a, 0x191a: 0x003a, 0x191b: 0x002a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, 0x1934: 0x000a, 0x1935: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x003a, 0x193d: 0x002a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, 0x1956: 0x000a, 0x1957: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x1989: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, + 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x000a, 0x19c1: 0x000a, 0x19c2: 0x000a, 0x19c3: 0x000a, 0x19c4: 0x000a, 0x19c5: 0x000a, + 0x19c6: 0x000a, 0x19c7: 0x000a, 0x19c8: 0x000a, 0x19ca: 0x000a, 0x19cb: 0x000a, + 0x19cc: 0x000a, 0x19cd: 0x000a, 0x19ce: 0x000a, 0x19cf: 0x000a, 0x19d0: 0x000a, 0x19d1: 0x000a, + 0x19ec: 0x000a, 0x19ed: 0x000a, 0x19ee: 0x000a, 0x19ef: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a25: 0x000a, 0x1a26: 0x000a, 0x1a27: 0x000a, 0x1a28: 0x000a, 0x1a29: 0x000a, + 0x1a2a: 0x000a, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, + 0x1a39: 0x000a, 0x1a3a: 0x000a, 0x1a3b: 0x000a, + 0x1a3c: 0x000a, 0x1a3d: 0x000a, 0x1a3e: 0x000a, 0x1a3f: 0x000a, + // Block 0x69, offset 0x1a40 + 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1aa0: 0x000c, 0x1aa1: 0x000c, 0x1aa2: 0x000c, 0x1aa3: 0x000c, + 0x1aa4: 0x000c, 0x1aa5: 0x000c, 0x1aa6: 0x000c, 0x1aa7: 0x000c, 0x1aa8: 0x000c, 0x1aa9: 0x000c, + 0x1aaa: 0x000c, 0x1aab: 0x000c, 0x1aac: 0x000c, 0x1aad: 0x000c, 0x1aae: 0x000c, 0x1aaf: 0x000c, + 0x1ab0: 0x000c, 0x1ab1: 0x000c, 0x1ab2: 0x000c, 0x1ab3: 0x000c, 0x1ab4: 0x000c, 0x1ab5: 0x000c, + 0x1ab6: 0x000c, 0x1ab7: 0x000c, 0x1ab8: 0x000c, 0x1ab9: 0x000c, 0x1aba: 0x000c, 0x1abb: 0x000c, + 0x1abc: 0x000c, 0x1abd: 0x000c, 0x1abe: 0x000c, 0x1abf: 0x000c, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1ada: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x003a, 0x1ae3: 0x002a, + 0x1ae4: 0x003a, 0x1ae5: 0x002a, 0x1ae6: 0x003a, 0x1ae7: 0x002a, 0x1ae8: 0x003a, 0x1ae9: 0x002a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + 0x1b7c: 0x000a, 0x1b7d: 0x000a, 0x1b7e: 0x000a, 0x1b7f: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, 0x1b96: 0x000a, 0x1b97: 0x000a, + 0x1b98: 0x000a, 0x1b99: 0x000a, 0x1b9a: 0x000a, 0x1b9b: 0x000a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, 0x1ba1: 0x000a, 0x1ba2: 0x000a, 0x1ba3: 0x000a, + 0x1ba4: 0x000a, 0x1ba5: 0x000a, 0x1ba6: 0x000a, 0x1ba7: 0x000a, 0x1ba8: 0x000a, 0x1ba9: 0x000a, + 0x1baa: 0x000a, 0x1bab: 0x000a, 0x1bac: 0x000a, 0x1bad: 0x000a, 0x1bae: 0x000a, 0x1baf: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x000a, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, 0x1bc5: 0x000a, + 0x1bc6: 0x000a, 0x1bc7: 0x000a, 0x1bc8: 0x000a, 0x1bc9: 0x000a, 0x1bca: 0x000a, 0x1bcb: 0x000a, + 0x1bcc: 0x000a, 0x1bcd: 0x000a, 0x1bce: 0x000a, 0x1bcf: 0x000a, 0x1bd0: 0x000a, 0x1bd1: 0x000a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x000a, 0x1bd5: 0x000a, + 0x1bf0: 0x000a, 0x1bf1: 0x000a, 0x1bf2: 0x000a, 0x1bf3: 0x000a, 0x1bf4: 0x000a, 0x1bf5: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, 0x1bf8: 0x000a, 0x1bf9: 0x000a, 0x1bfa: 0x000a, 0x1bfb: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x0009, 0x1c01: 0x000a, 0x1c02: 0x000a, 0x1c03: 0x000a, 0x1c04: 0x000a, + 0x1c08: 0x003a, 0x1c09: 0x002a, 0x1c0a: 0x003a, 0x1c0b: 0x002a, + 0x1c0c: 0x003a, 0x1c0d: 0x002a, 0x1c0e: 0x003a, 0x1c0f: 0x002a, 0x1c10: 0x003a, 0x1c11: 0x002a, + 0x1c12: 0x000a, 0x1c13: 0x000a, 0x1c14: 0x003a, 0x1c15: 0x002a, 0x1c16: 0x003a, 0x1c17: 0x002a, + 0x1c18: 0x003a, 0x1c19: 0x002a, 0x1c1a: 0x003a, 0x1c1b: 0x002a, 0x1c1c: 0x000a, 0x1c1d: 0x000a, + 0x1c1e: 0x000a, 0x1c1f: 0x000a, 0x1c20: 0x000a, + 0x1c2a: 0x000c, 0x1c2b: 0x000c, 0x1c2c: 0x000c, 0x1c2d: 0x000c, + 0x1c30: 0x000a, + 0x1c36: 0x000a, 0x1c37: 0x000a, + 0x1c3d: 0x000a, 0x1c3e: 0x000a, 0x1c3f: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c59: 0x000c, 0x1c5a: 0x000c, 0x1c5b: 0x000a, 0x1c5c: 0x000a, + 0x1c60: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1cbb: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x000a, 0x1cc1: 0x000a, 0x1cc2: 0x000a, 0x1cc3: 0x000a, 0x1cc4: 0x000a, 0x1cc5: 0x000a, + 0x1cc6: 0x000a, 0x1cc7: 0x000a, 0x1cc8: 0x000a, 0x1cc9: 0x000a, 0x1cca: 0x000a, 0x1ccb: 0x000a, + 0x1ccc: 0x000a, 0x1ccd: 0x000a, 0x1cce: 0x000a, 0x1ccf: 0x000a, 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, 0x1ce0: 0x000a, 0x1ce1: 0x000a, 0x1ce2: 0x000a, 0x1ce3: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d1d: 0x000a, + 0x1d1e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d50: 0x000a, 0x1d51: 0x000a, + 0x1d52: 0x000a, 0x1d53: 0x000a, 0x1d54: 0x000a, 0x1d55: 0x000a, 0x1d56: 0x000a, 0x1d57: 0x000a, + 0x1d58: 0x000a, 0x1d59: 0x000a, 0x1d5a: 0x000a, 0x1d5b: 0x000a, 0x1d5c: 0x000a, 0x1d5d: 0x000a, + 0x1d5e: 0x000a, 0x1d5f: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db1: 0x000a, 0x1db2: 0x000a, 0x1db3: 0x000a, 0x1db4: 0x000a, 0x1db5: 0x000a, + 0x1db6: 0x000a, 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, 0x1dbb: 0x000a, + 0x1dbc: 0x000a, 0x1dbd: 0x000a, 0x1dbe: 0x000a, 0x1dbf: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dcc: 0x000a, 0x1dcd: 0x000a, 0x1dce: 0x000a, 0x1dcf: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e5e: 0x000a, 0x1e5f: 0x000a, + 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e90: 0x000a, 0x1e91: 0x000a, + 0x1e92: 0x000a, 0x1e93: 0x000a, 0x1e94: 0x000a, 0x1e95: 0x000a, 0x1e96: 0x000a, 0x1e97: 0x000a, + 0x1e98: 0x000a, 0x1e99: 0x000a, 0x1e9a: 0x000a, 0x1e9b: 0x000a, 0x1e9c: 0x000a, 0x1e9d: 0x000a, + 0x1e9e: 0x000a, 0x1e9f: 0x000a, 0x1ea0: 0x000a, 0x1ea1: 0x000a, 0x1ea2: 0x000a, 0x1ea3: 0x000a, + 0x1ea4: 0x000a, 0x1ea5: 0x000a, 0x1ea6: 0x000a, 0x1ea7: 0x000a, 0x1ea8: 0x000a, 0x1ea9: 0x000a, + 0x1eaa: 0x000a, 0x1eab: 0x000a, 0x1eac: 0x000a, 0x1ead: 0x000a, 0x1eae: 0x000a, 0x1eaf: 0x000a, + 0x1eb0: 0x000a, 0x1eb1: 0x000a, 0x1eb2: 0x000a, 0x1eb3: 0x000a, 0x1eb4: 0x000a, 0x1eb5: 0x000a, + 0x1eb6: 0x000a, 0x1eb7: 0x000a, 0x1eb8: 0x000a, 0x1eb9: 0x000a, 0x1eba: 0x000a, 0x1ebb: 0x000a, + 0x1ebc: 0x000a, 0x1ebd: 0x000a, 0x1ebe: 0x000a, 0x1ebf: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0x000a, 0x1ec1: 0x000a, 0x1ec2: 0x000a, 0x1ec3: 0x000a, 0x1ec4: 0x000a, 0x1ec5: 0x000a, + 0x1ec6: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f0d: 0x000a, 0x1f0e: 0x000a, 0x1f0f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f6f: 0x000c, + 0x1f70: 0x000c, 0x1f71: 0x000c, 0x1f72: 0x000c, 0x1f73: 0x000a, 0x1f74: 0x000c, 0x1f75: 0x000c, + 0x1f76: 0x000c, 0x1f77: 0x000c, 0x1f78: 0x000c, 0x1f79: 0x000c, 0x1f7a: 0x000c, 0x1f7b: 0x000c, + 0x1f7c: 0x000c, 0x1f7d: 0x000c, 0x1f7e: 0x000a, 0x1f7f: 0x000a, + // Block 0x7e, offset 0x1f80 + 0x1f9e: 0x000c, 0x1f9f: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1ff0: 0x000c, 0x1ff1: 0x000c, + // Block 0x80, offset 0x2000 + 0x2000: 0x000a, 0x2001: 0x000a, 0x2002: 0x000a, 0x2003: 0x000a, 0x2004: 0x000a, 0x2005: 0x000a, + 0x2006: 0x000a, 0x2007: 0x000a, 0x2008: 0x000a, 0x2009: 0x000a, 0x200a: 0x000a, 0x200b: 0x000a, + 0x200c: 0x000a, 0x200d: 0x000a, 0x200e: 0x000a, 0x200f: 0x000a, 0x2010: 0x000a, 0x2011: 0x000a, + 0x2012: 0x000a, 0x2013: 0x000a, 0x2014: 0x000a, 0x2015: 0x000a, 0x2016: 0x000a, 0x2017: 0x000a, + 0x2018: 0x000a, 0x2019: 0x000a, 0x201a: 0x000a, 0x201b: 0x000a, 0x201c: 0x000a, 0x201d: 0x000a, + 0x201e: 0x000a, 0x201f: 0x000a, 0x2020: 0x000a, 0x2021: 0x000a, + // Block 0x81, offset 0x2040 + 0x2048: 0x000a, + // Block 0x82, offset 0x2080 + 0x2082: 0x000c, + 0x2086: 0x000c, 0x208b: 0x000c, + 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a8: 0x000a, 0x20a9: 0x000a, + 0x20aa: 0x000a, 0x20ab: 0x000a, + 0x20b8: 0x0004, 0x20b9: 0x0004, + // Block 0x83, offset 0x20c0 + 0x20f4: 0x000a, 0x20f5: 0x000a, + 0x20f6: 0x000a, 0x20f7: 0x000a, + // Block 0x84, offset 0x2100 + 0x2104: 0x000c, 0x2105: 0x000c, + 0x2120: 0x000c, 0x2121: 0x000c, 0x2122: 0x000c, 0x2123: 0x000c, + 0x2124: 0x000c, 0x2125: 0x000c, 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, 0x212e: 0x000c, 0x212f: 0x000c, + 0x2130: 0x000c, 0x2131: 0x000c, + // Block 0x85, offset 0x2140 + 0x2166: 0x000c, 0x2167: 0x000c, 0x2168: 0x000c, 0x2169: 0x000c, + 0x216a: 0x000c, 0x216b: 0x000c, 0x216c: 0x000c, 0x216d: 0x000c, + // Block 0x86, offset 0x2180 + 0x2187: 0x000c, 0x2188: 0x000c, 0x2189: 0x000c, 0x218a: 0x000c, 0x218b: 0x000c, + 0x218c: 0x000c, 0x218d: 0x000c, 0x218e: 0x000c, 0x218f: 0x000c, 0x2190: 0x000c, 0x2191: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21c0: 0x000c, 0x21c1: 0x000c, 0x21c2: 0x000c, + 0x21f3: 0x000c, + 0x21f6: 0x000c, 0x21f7: 0x000c, 0x21f8: 0x000c, 0x21f9: 0x000c, + 0x21fc: 0x000c, + // Block 0x88, offset 0x2200 + 0x2225: 0x000c, + // Block 0x89, offset 0x2240 + 0x2269: 0x000c, + 0x226a: 0x000c, 0x226b: 0x000c, 0x226c: 0x000c, 0x226d: 0x000c, 0x226e: 0x000c, + 0x2271: 0x000c, 0x2272: 0x000c, 0x2275: 0x000c, + 0x2276: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2283: 0x000c, + 0x228c: 0x000c, + 0x22bc: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22f0: 0x000c, 0x22f2: 0x000c, 0x22f3: 0x000c, 0x22f4: 0x000c, + 0x22f7: 0x000c, 0x22f8: 0x000c, + 0x22fe: 0x000c, 0x22ff: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2301: 0x000c, + 0x232c: 0x000c, 0x232d: 0x000c, + 0x2336: 0x000c, + // Block 0x8d, offset 0x2340 + 0x2365: 0x000c, 0x2368: 0x000c, + 0x236d: 0x000c, + // Block 0x8e, offset 0x2380 + 0x239d: 0x0001, + 0x239e: 0x000c, 0x239f: 0x0001, 0x23a0: 0x0001, 0x23a1: 0x0001, 0x23a2: 0x0001, 0x23a3: 0x0001, + 0x23a4: 0x0001, 0x23a5: 0x0001, 0x23a6: 0x0001, 0x23a7: 0x0001, 0x23a8: 0x0001, 0x23a9: 0x0003, + 0x23aa: 0x0001, 0x23ab: 0x0001, 0x23ac: 0x0001, 0x23ad: 0x0001, 0x23ae: 0x0001, 0x23af: 0x0001, + 0x23b0: 0x0001, 0x23b1: 0x0001, 0x23b2: 0x0001, 0x23b3: 0x0001, 0x23b4: 0x0001, 0x23b5: 0x0001, + 0x23b6: 0x0001, 0x23b7: 0x0001, 0x23b8: 0x0001, 0x23b9: 0x0001, 0x23ba: 0x0001, 0x23bb: 0x0001, + 0x23bc: 0x0001, 0x23bd: 0x0001, 0x23be: 0x0001, 0x23bf: 0x0001, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x0001, 0x23c1: 0x0001, 0x23c2: 0x0001, 0x23c3: 0x0001, 0x23c4: 0x0001, 0x23c5: 0x0001, + 0x23c6: 0x0001, 0x23c7: 0x0001, 0x23c8: 0x0001, 0x23c9: 0x0001, 0x23ca: 0x0001, 0x23cb: 0x0001, + 0x23cc: 0x0001, 0x23cd: 0x0001, 0x23ce: 0x0001, 0x23cf: 0x0001, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000d, 0x2411: 0x000d, + 0x2412: 0x000d, 0x2413: 0x000d, 0x2414: 0x000d, 0x2415: 0x000d, 0x2416: 0x000d, 0x2417: 0x000d, + 0x2418: 0x000d, 0x2419: 0x000d, 0x241a: 0x000d, 0x241b: 0x000d, 0x241c: 0x000d, 0x241d: 0x000d, + 0x241e: 0x000d, 0x241f: 0x000d, 0x2420: 0x000d, 0x2421: 0x000d, 0x2422: 0x000d, 0x2423: 0x000d, + 0x2424: 0x000d, 0x2425: 0x000d, 0x2426: 0x000d, 0x2427: 0x000d, 0x2428: 0x000d, 0x2429: 0x000d, + 0x242a: 0x000d, 0x242b: 0x000d, 0x242c: 0x000d, 0x242d: 0x000d, 0x242e: 0x000d, 0x242f: 0x000d, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000d, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000d, 0x2441: 0x000d, 0x2442: 0x000d, 0x2443: 0x000d, 0x2444: 0x000d, 0x2445: 0x000d, + 0x2446: 0x000d, 0x2447: 0x000d, 0x2448: 0x000d, 0x2449: 0x000d, 0x244a: 0x000d, 0x244b: 0x000d, + 0x244c: 0x000d, 0x244d: 0x000d, 0x244e: 0x000d, 0x244f: 0x000d, 0x2450: 0x000b, 0x2451: 0x000b, + 0x2452: 0x000b, 0x2453: 0x000b, 0x2454: 0x000b, 0x2455: 0x000b, 0x2456: 0x000b, 0x2457: 0x000b, + 0x2458: 0x000b, 0x2459: 0x000b, 0x245a: 0x000b, 0x245b: 0x000b, 0x245c: 0x000b, 0x245d: 0x000b, + 0x245e: 0x000b, 0x245f: 0x000b, 0x2460: 0x000b, 0x2461: 0x000b, 0x2462: 0x000b, 0x2463: 0x000b, + 0x2464: 0x000b, 0x2465: 0x000b, 0x2466: 0x000b, 0x2467: 0x000b, 0x2468: 0x000b, 0x2469: 0x000b, + 0x246a: 0x000b, 0x246b: 0x000b, 0x246c: 0x000b, 0x246d: 0x000b, 0x246e: 0x000b, 0x246f: 0x000b, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000a, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000c, 0x2481: 0x000c, 0x2482: 0x000c, 0x2483: 0x000c, 0x2484: 0x000c, 0x2485: 0x000c, + 0x2486: 0x000c, 0x2487: 0x000c, 0x2488: 0x000c, 0x2489: 0x000c, 0x248a: 0x000c, 0x248b: 0x000c, + 0x248c: 0x000c, 0x248d: 0x000c, 0x248e: 0x000c, 0x248f: 0x000c, 0x2490: 0x000a, 0x2491: 0x000a, + 0x2492: 0x000a, 0x2493: 0x000a, 0x2494: 0x000a, 0x2495: 0x000a, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x000a, + 0x24a0: 0x000c, 0x24a1: 0x000c, 0x24a2: 0x000c, 0x24a3: 0x000c, + 0x24a4: 0x000c, 0x24a5: 0x000c, 0x24a6: 0x000c, 0x24a7: 0x000c, 0x24a8: 0x000c, 0x24a9: 0x000c, + 0x24aa: 0x000c, 0x24ab: 0x000c, 0x24ac: 0x000c, 0x24ad: 0x000c, 0x24ae: 0x000c, 0x24af: 0x000c, + 0x24b0: 0x000a, 0x24b1: 0x000a, 0x24b2: 0x000a, 0x24b3: 0x000a, 0x24b4: 0x000a, 0x24b5: 0x000a, + 0x24b6: 0x000a, 0x24b7: 0x000a, 0x24b8: 0x000a, 0x24b9: 0x000a, 0x24ba: 0x000a, 0x24bb: 0x000a, + 0x24bc: 0x000a, 0x24bd: 0x000a, 0x24be: 0x000a, 0x24bf: 0x000a, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000a, 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x000a, 0x24c4: 0x000a, 0x24c5: 0x000a, + 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x000a, 0x24c9: 0x000a, 0x24ca: 0x000a, 0x24cb: 0x000a, + 0x24cc: 0x000a, 0x24cd: 0x000a, 0x24ce: 0x000a, 0x24cf: 0x000a, 0x24d0: 0x0006, 0x24d1: 0x000a, + 0x24d2: 0x0006, 0x24d4: 0x000a, 0x24d5: 0x0006, 0x24d6: 0x000a, 0x24d7: 0x000a, + 0x24d8: 0x000a, 0x24d9: 0x009a, 0x24da: 0x008a, 0x24db: 0x007a, 0x24dc: 0x006a, 0x24dd: 0x009a, + 0x24de: 0x008a, 0x24df: 0x0004, 0x24e0: 0x000a, 0x24e1: 0x000a, 0x24e2: 0x0003, 0x24e3: 0x0003, + 0x24e4: 0x000a, 0x24e5: 0x000a, 0x24e6: 0x000a, 0x24e8: 0x000a, 0x24e9: 0x0004, + 0x24ea: 0x0004, 0x24eb: 0x000a, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000d, + // Block 0x94, offset 0x2500 + 0x2500: 0x000d, 0x2501: 0x000d, 0x2502: 0x000d, 0x2503: 0x000d, 0x2504: 0x000d, 0x2505: 0x000d, + 0x2506: 0x000d, 0x2507: 0x000d, 0x2508: 0x000d, 0x2509: 0x000d, 0x250a: 0x000d, 0x250b: 0x000d, + 0x250c: 0x000d, 0x250d: 0x000d, 0x250e: 0x000d, 0x250f: 0x000d, 0x2510: 0x000d, 0x2511: 0x000d, + 0x2512: 0x000d, 0x2513: 0x000d, 0x2514: 0x000d, 0x2515: 0x000d, 0x2516: 0x000d, 0x2517: 0x000d, + 0x2518: 0x000d, 0x2519: 0x000d, 0x251a: 0x000d, 0x251b: 0x000d, 0x251c: 0x000d, 0x251d: 0x000d, + 0x251e: 0x000d, 0x251f: 0x000d, 0x2520: 0x000d, 0x2521: 0x000d, 0x2522: 0x000d, 0x2523: 0x000d, + 0x2524: 0x000d, 0x2525: 0x000d, 0x2526: 0x000d, 0x2527: 0x000d, 0x2528: 0x000d, 0x2529: 0x000d, + 0x252a: 0x000d, 0x252b: 0x000d, 0x252c: 0x000d, 0x252d: 0x000d, 0x252e: 0x000d, 0x252f: 0x000d, + 0x2530: 0x000d, 0x2531: 0x000d, 0x2532: 0x000d, 0x2533: 0x000d, 0x2534: 0x000d, 0x2535: 0x000d, + 0x2536: 0x000d, 0x2537: 0x000d, 0x2538: 0x000d, 0x2539: 0x000d, 0x253a: 0x000d, 0x253b: 0x000d, + 0x253c: 0x000d, 0x253d: 0x000d, 0x253e: 0x000d, 0x253f: 0x000b, + // Block 0x95, offset 0x2540 + 0x2541: 0x000a, 0x2542: 0x000a, 0x2543: 0x0004, 0x2544: 0x0004, 0x2545: 0x0004, + 0x2546: 0x000a, 0x2547: 0x000a, 0x2548: 0x003a, 0x2549: 0x002a, 0x254a: 0x000a, 0x254b: 0x0003, + 0x254c: 0x0006, 0x254d: 0x0003, 0x254e: 0x0006, 0x254f: 0x0006, 0x2550: 0x0002, 0x2551: 0x0002, + 0x2552: 0x0002, 0x2553: 0x0002, 0x2554: 0x0002, 0x2555: 0x0002, 0x2556: 0x0002, 0x2557: 0x0002, + 0x2558: 0x0002, 0x2559: 0x0002, 0x255a: 0x0006, 0x255b: 0x000a, 0x255c: 0x000a, 0x255d: 0x000a, + 0x255e: 0x000a, 0x255f: 0x000a, 0x2560: 0x000a, + 0x257b: 0x005a, + 0x257c: 0x000a, 0x257d: 0x004a, 0x257e: 0x000a, 0x257f: 0x000a, + // Block 0x96, offset 0x2580 + 0x2580: 0x000a, + 0x259b: 0x005a, 0x259c: 0x000a, 0x259d: 0x004a, + 0x259e: 0x000a, 0x259f: 0x00fa, 0x25a0: 0x00ea, 0x25a1: 0x000a, 0x25a2: 0x003a, 0x25a3: 0x002a, + 0x25a4: 0x000a, 0x25a5: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25e0: 0x0004, 0x25e1: 0x0004, 0x25e2: 0x000a, 0x25e3: 0x000a, + 0x25e4: 0x000a, 0x25e5: 0x0004, 0x25e6: 0x0004, 0x25e8: 0x000a, 0x25e9: 0x000a, + 0x25ea: 0x000a, 0x25eb: 0x000a, 0x25ec: 0x000a, 0x25ed: 0x000a, 0x25ee: 0x000a, + 0x25f0: 0x000b, 0x25f1: 0x000b, 0x25f2: 0x000b, 0x25f3: 0x000b, 0x25f4: 0x000b, 0x25f5: 0x000b, + 0x25f6: 0x000b, 0x25f7: 0x000b, 0x25f8: 0x000b, 0x25f9: 0x000a, 0x25fa: 0x000a, 0x25fb: 0x000a, + 0x25fc: 0x000a, 0x25fd: 0x000a, 0x25fe: 0x000b, 0x25ff: 0x000b, + // Block 0x98, offset 0x2600 + 0x2601: 0x000a, + // Block 0x99, offset 0x2640 + 0x2640: 0x000a, 0x2641: 0x000a, 0x2642: 0x000a, 0x2643: 0x000a, 0x2644: 0x000a, 0x2645: 0x000a, + 0x2646: 0x000a, 0x2647: 0x000a, 0x2648: 0x000a, 0x2649: 0x000a, 0x264a: 0x000a, 0x264b: 0x000a, + 0x264c: 0x000a, 0x2650: 0x000a, 0x2651: 0x000a, + 0x2652: 0x000a, 0x2653: 0x000a, 0x2654: 0x000a, 0x2655: 0x000a, 0x2656: 0x000a, 0x2657: 0x000a, + 0x2658: 0x000a, 0x2659: 0x000a, 0x265a: 0x000a, 0x265b: 0x000a, + 0x2660: 0x000a, + // Block 0x9a, offset 0x2680 + 0x26bd: 0x000c, + // Block 0x9b, offset 0x26c0 + 0x26e0: 0x000c, 0x26e1: 0x0002, 0x26e2: 0x0002, 0x26e3: 0x0002, + 0x26e4: 0x0002, 0x26e5: 0x0002, 0x26e6: 0x0002, 0x26e7: 0x0002, 0x26e8: 0x0002, 0x26e9: 0x0002, + 0x26ea: 0x0002, 0x26eb: 0x0002, 0x26ec: 0x0002, 0x26ed: 0x0002, 0x26ee: 0x0002, 0x26ef: 0x0002, + 0x26f0: 0x0002, 0x26f1: 0x0002, 0x26f2: 0x0002, 0x26f3: 0x0002, 0x26f4: 0x0002, 0x26f5: 0x0002, + 0x26f6: 0x0002, 0x26f7: 0x0002, 0x26f8: 0x0002, 0x26f9: 0x0002, 0x26fa: 0x0002, 0x26fb: 0x0002, + // Block 0x9c, offset 0x2700 + 0x2736: 0x000c, 0x2737: 0x000c, 0x2738: 0x000c, 0x2739: 0x000c, 0x273a: 0x000c, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x000a, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x000c, 0x27c2: 0x000c, 0x27c3: 0x000c, 0x27c4: 0x0001, 0x27c5: 0x000c, + 0x27c6: 0x000c, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x000c, 0x27cd: 0x000c, 0x27ce: 0x000c, 0x27cf: 0x000c, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x000c, 0x27f9: 0x000c, 0x27fa: 0x000c, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x000c, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x0001, 0x283a: 0x0001, 0x283b: 0x0001, + 0x283c: 0x0001, 0x283d: 0x0001, 0x283e: 0x0001, 0x283f: 0x0001, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0001, 0x2861: 0x0001, 0x2862: 0x0001, 0x2863: 0x0001, + 0x2864: 0x0001, 0x2865: 0x0001, 0x2866: 0x0001, 0x2867: 0x0001, 0x2868: 0x0001, 0x2869: 0x0001, + 0x286a: 0x0001, 0x286b: 0x0001, 0x286c: 0x0001, 0x286d: 0x0001, 0x286e: 0x0001, 0x286f: 0x0001, + 0x2870: 0x0001, 0x2871: 0x0001, 0x2872: 0x0001, 0x2873: 0x0001, 0x2874: 0x0001, 0x2875: 0x0001, + 0x2876: 0x0001, 0x2877: 0x0001, 0x2878: 0x0001, 0x2879: 0x000a, 0x287a: 0x000a, 0x287b: 0x000a, + 0x287c: 0x000a, 0x287d: 0x000a, 0x287e: 0x000a, 0x287f: 0x000a, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c1: 0x000c, + 0x28f8: 0x000c, 0x28f9: 0x000c, 0x28fa: 0x000c, 0x28fb: 0x000c, + 0x28fc: 0x000c, 0x28fd: 0x000c, 0x28fe: 0x000c, 0x28ff: 0x000c, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000c, 0x2901: 0x000c, 0x2902: 0x000c, 0x2903: 0x000c, 0x2904: 0x000c, 0x2905: 0x000c, + 0x2906: 0x000c, + 0x2912: 0x000a, 0x2913: 0x000a, 0x2914: 0x000a, 0x2915: 0x000a, 0x2916: 0x000a, 0x2917: 0x000a, + 0x2918: 0x000a, 0x2919: 0x000a, 0x291a: 0x000a, 0x291b: 0x000a, 0x291c: 0x000a, 0x291d: 0x000a, + 0x291e: 0x000a, 0x291f: 0x000a, 0x2920: 0x000a, 0x2921: 0x000a, 0x2922: 0x000a, 0x2923: 0x000a, + 0x2924: 0x000a, 0x2925: 0x000a, + 0x293f: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, + 0x2973: 0x000c, 0x2974: 0x000c, 0x2975: 0x000c, + 0x2976: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, + 0x29a7: 0x000c, 0x29a8: 0x000c, 0x29a9: 0x000c, + 0x29aa: 0x000c, 0x29ab: 0x000c, 0x29ad: 0x000c, 0x29ae: 0x000c, 0x29af: 0x000c, + 0x29b0: 0x000c, 0x29b1: 0x000c, 0x29b2: 0x000c, 0x29b3: 0x000c, 0x29b4: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29f3: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, + 0x2a36: 0x000c, 0x2a37: 0x000c, 0x2a38: 0x000c, 0x2a39: 0x000c, 0x2a3a: 0x000c, 0x2a3b: 0x000c, + 0x2a3c: 0x000c, 0x2a3d: 0x000c, 0x2a3e: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a4a: 0x000c, 0x2a4b: 0x000c, + 0x2a4c: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2aaf: 0x000c, + 0x2ab0: 0x000c, 0x2ab1: 0x000c, 0x2ab4: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, + 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2adf: 0x000c, 0x2ae3: 0x000c, + 0x2ae4: 0x000c, 0x2ae5: 0x000c, 0x2ae6: 0x000c, 0x2ae7: 0x000c, 0x2ae8: 0x000c, 0x2ae9: 0x000c, + 0x2aea: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b00: 0x000c, 0x2b01: 0x000c, + 0x2b3c: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, + 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, + 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, + 0x2bc6: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, + 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, + 0x2c3f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2cdc: 0x000c, 0x2cdd: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, + 0x2d3d: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, + 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, + 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, + 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, + // Block 0xb6, offset 0x2d80 + 0x2dab: 0x000c, 0x2dad: 0x000c, + 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db7: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2ddd: 0x000c, + 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, + 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, + 0x2dea: 0x000c, 0x2deb: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e38: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, 0x2e3b: 0x000c, + 0x2e3c: 0x000c, 0x2e3d: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e52: 0x000c, 0x2e53: 0x000c, 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, + 0x2e58: 0x000c, 0x2e59: 0x000c, 0x2e5a: 0x000c, 0x2e5b: 0x000c, 0x2e5c: 0x000c, 0x2e5d: 0x000c, + 0x2e5e: 0x000c, 0x2e5f: 0x000c, 0x2e60: 0x000c, 0x2e61: 0x000c, 0x2e62: 0x000c, 0x2e63: 0x000c, + 0x2e64: 0x000c, 0x2e65: 0x000c, 0x2e66: 0x000c, 0x2e67: 0x000c, + 0x2e6a: 0x000c, 0x2e6b: 0x000c, 0x2e6c: 0x000c, 0x2e6d: 0x000c, 0x2e6e: 0x000c, 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2eb0: 0x000c, 0x2eb1: 0x000c, 0x2eb2: 0x000c, 0x2eb3: 0x000c, 0x2eb4: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ef0: 0x000c, 0x2ef1: 0x000c, 0x2ef2: 0x000c, 0x2ef3: 0x000c, 0x2ef4: 0x000c, 0x2ef5: 0x000c, + 0x2ef6: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f5d: 0x000c, + 0x2f5e: 0x000c, 0x2f60: 0x000b, 0x2f61: 0x000b, 0x2f62: 0x000b, 0x2f63: 0x000b, + // Block 0xbe, offset 0x2f80 + 0x2fa7: 0x000c, 0x2fa8: 0x000c, 0x2fa9: 0x000c, + 0x2fb3: 0x000b, 0x2fb4: 0x000b, 0x2fb5: 0x000b, + 0x2fb6: 0x000b, 0x2fb7: 0x000b, 0x2fb8: 0x000b, 0x2fb9: 0x000b, 0x2fba: 0x000b, 0x2fbb: 0x000c, + 0x2fbc: 0x000c, 0x2fbd: 0x000c, 0x2fbe: 0x000c, 0x2fbf: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2fc0: 0x000c, 0x2fc1: 0x000c, 0x2fc2: 0x000c, 0x2fc5: 0x000c, + 0x2fc6: 0x000c, 0x2fc7: 0x000c, 0x2fc8: 0x000c, 0x2fc9: 0x000c, 0x2fca: 0x000c, 0x2fcb: 0x000c, + 0x2fea: 0x000c, 0x2feb: 0x000c, 0x2fec: 0x000c, 0x2fed: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000a, 0x3001: 0x000a, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000a, + // Block 0xc1, offset 0x3040 + 0x3040: 0x000a, 0x3041: 0x000a, 0x3042: 0x000a, 0x3043: 0x000a, 0x3044: 0x000a, 0x3045: 0x000a, + 0x3046: 0x000a, 0x3047: 0x000a, 0x3048: 0x000a, 0x3049: 0x000a, 0x304a: 0x000a, 0x304b: 0x000a, + 0x304c: 0x000a, 0x304d: 0x000a, 0x304e: 0x000a, 0x304f: 0x000a, 0x3050: 0x000a, 0x3051: 0x000a, + 0x3052: 0x000a, 0x3053: 0x000a, 0x3054: 0x000a, 0x3055: 0x000a, 0x3056: 0x000a, + // Block 0xc2, offset 0x3080 + 0x309b: 0x000a, + // Block 0xc3, offset 0x30c0 + 0x30d5: 0x000a, + // Block 0xc4, offset 0x3100 + 0x310f: 0x000a, + // Block 0xc5, offset 0x3140 + 0x3149: 0x000a, + // Block 0xc6, offset 0x3180 + 0x3183: 0x000a, + 0x318e: 0x0002, 0x318f: 0x0002, 0x3190: 0x0002, 0x3191: 0x0002, + 0x3192: 0x0002, 0x3193: 0x0002, 0x3194: 0x0002, 0x3195: 0x0002, 0x3196: 0x0002, 0x3197: 0x0002, + 0x3198: 0x0002, 0x3199: 0x0002, 0x319a: 0x0002, 0x319b: 0x0002, 0x319c: 0x0002, 0x319d: 0x0002, + 0x319e: 0x0002, 0x319f: 0x0002, 0x31a0: 0x0002, 0x31a1: 0x0002, 0x31a2: 0x0002, 0x31a3: 0x0002, + 0x31a4: 0x0002, 0x31a5: 0x0002, 0x31a6: 0x0002, 0x31a7: 0x0002, 0x31a8: 0x0002, 0x31a9: 0x0002, + 0x31aa: 0x0002, 0x31ab: 0x0002, 0x31ac: 0x0002, 0x31ad: 0x0002, 0x31ae: 0x0002, 0x31af: 0x0002, + 0x31b0: 0x0002, 0x31b1: 0x0002, 0x31b2: 0x0002, 0x31b3: 0x0002, 0x31b4: 0x0002, 0x31b5: 0x0002, + 0x31b6: 0x0002, 0x31b7: 0x0002, 0x31b8: 0x0002, 0x31b9: 0x0002, 0x31ba: 0x0002, 0x31bb: 0x0002, + 0x31bc: 0x0002, 0x31bd: 0x0002, 0x31be: 0x0002, 0x31bf: 0x0002, + // Block 0xc7, offset 0x31c0 + 0x31c0: 0x000c, 0x31c1: 0x000c, 0x31c2: 0x000c, 0x31c3: 0x000c, 0x31c4: 0x000c, 0x31c5: 0x000c, + 0x31c6: 0x000c, 0x31c7: 0x000c, 0x31c8: 0x000c, 0x31c9: 0x000c, 0x31ca: 0x000c, 0x31cb: 0x000c, + 0x31cc: 0x000c, 0x31cd: 0x000c, 0x31ce: 0x000c, 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d2: 0x000c, 0x31d3: 0x000c, 0x31d4: 0x000c, 0x31d5: 0x000c, 0x31d6: 0x000c, 0x31d7: 0x000c, + 0x31d8: 0x000c, 0x31d9: 0x000c, 0x31da: 0x000c, 0x31db: 0x000c, 0x31dc: 0x000c, 0x31dd: 0x000c, + 0x31de: 0x000c, 0x31df: 0x000c, 0x31e0: 0x000c, 0x31e1: 0x000c, 0x31e2: 0x000c, 0x31e3: 0x000c, + 0x31e4: 0x000c, 0x31e5: 0x000c, 0x31e6: 0x000c, 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31ea: 0x000c, 0x31eb: 0x000c, 0x31ec: 0x000c, 0x31ed: 0x000c, 0x31ee: 0x000c, 0x31ef: 0x000c, + 0x31f0: 0x000c, 0x31f1: 0x000c, 0x31f2: 0x000c, 0x31f3: 0x000c, 0x31f4: 0x000c, 0x31f5: 0x000c, + 0x31f6: 0x000c, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3203: 0x000c, 0x3204: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x320c: 0x000c, 0x320d: 0x000c, 0x320e: 0x000c, 0x320f: 0x000c, 0x3210: 0x000c, 0x3211: 0x000c, + 0x3212: 0x000c, 0x3213: 0x000c, 0x3214: 0x000c, 0x3215: 0x000c, 0x3216: 0x000c, 0x3217: 0x000c, + 0x3218: 0x000c, 0x3219: 0x000c, 0x321a: 0x000c, 0x321b: 0x000c, 0x321c: 0x000c, 0x321d: 0x000c, + 0x321e: 0x000c, 0x321f: 0x000c, 0x3220: 0x000c, 0x3221: 0x000c, 0x3222: 0x000c, 0x3223: 0x000c, + 0x3224: 0x000c, 0x3225: 0x000c, 0x3226: 0x000c, 0x3227: 0x000c, 0x3228: 0x000c, 0x3229: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, + 0x3235: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3244: 0x000c, + 0x325b: 0x000c, 0x325c: 0x000c, 0x325d: 0x000c, + 0x325e: 0x000c, 0x325f: 0x000c, 0x3261: 0x000c, 0x3262: 0x000c, 0x3263: 0x000c, + 0x3264: 0x000c, 0x3265: 0x000c, 0x3266: 0x000c, 0x3267: 0x000c, 0x3268: 0x000c, 0x3269: 0x000c, + 0x326a: 0x000c, 0x326b: 0x000c, 0x326c: 0x000c, 0x326d: 0x000c, 0x326e: 0x000c, 0x326f: 0x000c, + // Block 0xca, offset 0x3280 + 0x3280: 0x000c, 0x3281: 0x000c, 0x3282: 0x000c, 0x3283: 0x000c, 0x3284: 0x000c, 0x3285: 0x000c, + 0x3286: 0x000c, 0x3288: 0x000c, 0x3289: 0x000c, 0x328a: 0x000c, 0x328b: 0x000c, + 0x328c: 0x000c, 0x328d: 0x000c, 0x328e: 0x000c, 0x328f: 0x000c, 0x3290: 0x000c, 0x3291: 0x000c, + 0x3292: 0x000c, 0x3293: 0x000c, 0x3294: 0x000c, 0x3295: 0x000c, 0x3296: 0x000c, 0x3297: 0x000c, + 0x3298: 0x000c, 0x329b: 0x000c, 0x329c: 0x000c, 0x329d: 0x000c, + 0x329e: 0x000c, 0x329f: 0x000c, 0x32a0: 0x000c, 0x32a1: 0x000c, 0x32a3: 0x000c, + 0x32a4: 0x000c, 0x32a6: 0x000c, 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, + 0x32aa: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x0001, 0x32c1: 0x0001, 0x32c2: 0x0001, 0x32c3: 0x0001, 0x32c4: 0x0001, 0x32c5: 0x0001, + 0x32c6: 0x0001, 0x32c7: 0x0001, 0x32c8: 0x0001, 0x32c9: 0x0001, 0x32ca: 0x0001, 0x32cb: 0x0001, + 0x32cc: 0x0001, 0x32cd: 0x0001, 0x32ce: 0x0001, 0x32cf: 0x0001, 0x32d0: 0x000c, 0x32d1: 0x000c, + 0x32d2: 0x000c, 0x32d3: 0x000c, 0x32d4: 0x000c, 0x32d5: 0x000c, 0x32d6: 0x000c, 0x32d7: 0x0001, + 0x32d8: 0x0001, 0x32d9: 0x0001, 0x32da: 0x0001, 0x32db: 0x0001, 0x32dc: 0x0001, 0x32dd: 0x0001, + 0x32de: 0x0001, 0x32df: 0x0001, 0x32e0: 0x0001, 0x32e1: 0x0001, 0x32e2: 0x0001, 0x32e3: 0x0001, + 0x32e4: 0x0001, 0x32e5: 0x0001, 0x32e6: 0x0001, 0x32e7: 0x0001, 0x32e8: 0x0001, 0x32e9: 0x0001, + 0x32ea: 0x0001, 0x32eb: 0x0001, 0x32ec: 0x0001, 0x32ed: 0x0001, 0x32ee: 0x0001, 0x32ef: 0x0001, + 0x32f0: 0x0001, 0x32f1: 0x0001, 0x32f2: 0x0001, 0x32f3: 0x0001, 0x32f4: 0x0001, 0x32f5: 0x0001, + 0x32f6: 0x0001, 0x32f7: 0x0001, 0x32f8: 0x0001, 0x32f9: 0x0001, 0x32fa: 0x0001, 0x32fb: 0x0001, + 0x32fc: 0x0001, 0x32fd: 0x0001, 0x32fe: 0x0001, 0x32ff: 0x0001, + // Block 0xcc, offset 0x3300 + 0x3300: 0x0001, 0x3301: 0x0001, 0x3302: 0x0001, 0x3303: 0x0001, 0x3304: 0x000c, 0x3305: 0x000c, + 0x3306: 0x000c, 0x3307: 0x000c, 0x3308: 0x000c, 0x3309: 0x000c, 0x330a: 0x000c, 0x330b: 0x0001, + 0x330c: 0x0001, 0x330d: 0x0001, 0x330e: 0x0001, 0x330f: 0x0001, 0x3310: 0x0001, 0x3311: 0x0001, + 0x3312: 0x0001, 0x3313: 0x0001, 0x3314: 0x0001, 0x3315: 0x0001, 0x3316: 0x0001, 0x3317: 0x0001, + 0x3318: 0x0001, 0x3319: 0x0001, 0x331a: 0x0001, 0x331b: 0x0001, 0x331c: 0x0001, 0x331d: 0x0001, + 0x331e: 0x0001, 0x331f: 0x0001, 0x3320: 0x0001, 0x3321: 0x0001, 0x3322: 0x0001, 0x3323: 0x0001, + 0x3324: 0x0001, 0x3325: 0x0001, 0x3326: 0x0001, 0x3327: 0x0001, 0x3328: 0x0001, 0x3329: 0x0001, + 0x332a: 0x0001, 0x332b: 0x0001, 0x332c: 0x0001, 0x332d: 0x0001, 0x332e: 0x0001, 0x332f: 0x0001, + 0x3330: 0x0001, 0x3331: 0x0001, 0x3332: 0x0001, 0x3333: 0x0001, 0x3334: 0x0001, 0x3335: 0x0001, + 0x3336: 0x0001, 0x3337: 0x0001, 0x3338: 0x0001, 0x3339: 0x0001, 0x333a: 0x0001, 0x333b: 0x0001, + 0x333c: 0x0001, 0x333d: 0x0001, 0x333e: 0x0001, 0x333f: 0x0001, + // Block 0xcd, offset 0x3340 + 0x3340: 0x000d, 0x3341: 0x000d, 0x3342: 0x000d, 0x3343: 0x000d, 0x3344: 0x000d, 0x3345: 0x000d, + 0x3346: 0x000d, 0x3347: 0x000d, 0x3348: 0x000d, 0x3349: 0x000d, 0x334a: 0x000d, 0x334b: 0x000d, + 0x334c: 0x000d, 0x334d: 0x000d, 0x334e: 0x000d, 0x334f: 0x000d, 0x3350: 0x000d, 0x3351: 0x000d, + 0x3352: 0x000d, 0x3353: 0x000d, 0x3354: 0x000d, 0x3355: 0x000d, 0x3356: 0x000d, 0x3357: 0x000d, + 0x3358: 0x000d, 0x3359: 0x000d, 0x335a: 0x000d, 0x335b: 0x000d, 0x335c: 0x000d, 0x335d: 0x000d, + 0x335e: 0x000d, 0x335f: 0x000d, 0x3360: 0x000d, 0x3361: 0x000d, 0x3362: 0x000d, 0x3363: 0x000d, + 0x3364: 0x000d, 0x3365: 0x000d, 0x3366: 0x000d, 0x3367: 0x000d, 0x3368: 0x000d, 0x3369: 0x000d, + 0x336a: 0x000d, 0x336b: 0x000d, 0x336c: 0x000d, 0x336d: 0x000d, 0x336e: 0x000d, 0x336f: 0x000d, + 0x3370: 0x000a, 0x3371: 0x000a, 0x3372: 0x000d, 0x3373: 0x000d, 0x3374: 0x000d, 0x3375: 0x000d, + 0x3376: 0x000d, 0x3377: 0x000d, 0x3378: 0x000d, 0x3379: 0x000d, 0x337a: 0x000d, 0x337b: 0x000d, + 0x337c: 0x000d, 0x337d: 0x000d, 0x337e: 0x000d, 0x337f: 0x000d, + // Block 0xce, offset 0x3380 + 0x3380: 0x000a, 0x3381: 0x000a, 0x3382: 0x000a, 0x3383: 0x000a, 0x3384: 0x000a, 0x3385: 0x000a, + 0x3386: 0x000a, 0x3387: 0x000a, 0x3388: 0x000a, 0x3389: 0x000a, 0x338a: 0x000a, 0x338b: 0x000a, + 0x338c: 0x000a, 0x338d: 0x000a, 0x338e: 0x000a, 0x338f: 0x000a, 0x3390: 0x000a, 0x3391: 0x000a, + 0x3392: 0x000a, 0x3393: 0x000a, 0x3394: 0x000a, 0x3395: 0x000a, 0x3396: 0x000a, 0x3397: 0x000a, + 0x3398: 0x000a, 0x3399: 0x000a, 0x339a: 0x000a, 0x339b: 0x000a, 0x339c: 0x000a, 0x339d: 0x000a, + 0x339e: 0x000a, 0x339f: 0x000a, 0x33a0: 0x000a, 0x33a1: 0x000a, 0x33a2: 0x000a, 0x33a3: 0x000a, + 0x33a4: 0x000a, 0x33a5: 0x000a, 0x33a6: 0x000a, 0x33a7: 0x000a, 0x33a8: 0x000a, 0x33a9: 0x000a, + 0x33aa: 0x000a, 0x33ab: 0x000a, + 0x33b0: 0x000a, 0x33b1: 0x000a, 0x33b2: 0x000a, 0x33b3: 0x000a, 0x33b4: 0x000a, 0x33b5: 0x000a, + 0x33b6: 0x000a, 0x33b7: 0x000a, 0x33b8: 0x000a, 0x33b9: 0x000a, 0x33ba: 0x000a, 0x33bb: 0x000a, + 0x33bc: 0x000a, 0x33bd: 0x000a, 0x33be: 0x000a, 0x33bf: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c0: 0x000a, 0x33c1: 0x000a, 0x33c2: 0x000a, 0x33c3: 0x000a, 0x33c4: 0x000a, 0x33c5: 0x000a, + 0x33c6: 0x000a, 0x33c7: 0x000a, 0x33c8: 0x000a, 0x33c9: 0x000a, 0x33ca: 0x000a, 0x33cb: 0x000a, + 0x33cc: 0x000a, 0x33cd: 0x000a, 0x33ce: 0x000a, 0x33cf: 0x000a, 0x33d0: 0x000a, 0x33d1: 0x000a, + 0x33d2: 0x000a, 0x33d3: 0x000a, + 0x33e0: 0x000a, 0x33e1: 0x000a, 0x33e2: 0x000a, 0x33e3: 0x000a, + 0x33e4: 0x000a, 0x33e5: 0x000a, 0x33e6: 0x000a, 0x33e7: 0x000a, 0x33e8: 0x000a, 0x33e9: 0x000a, + 0x33ea: 0x000a, 0x33eb: 0x000a, 0x33ec: 0x000a, 0x33ed: 0x000a, 0x33ee: 0x000a, + 0x33f1: 0x000a, 0x33f2: 0x000a, 0x33f3: 0x000a, 0x33f4: 0x000a, 0x33f5: 0x000a, + 0x33f6: 0x000a, 0x33f7: 0x000a, 0x33f8: 0x000a, 0x33f9: 0x000a, 0x33fa: 0x000a, 0x33fb: 0x000a, + 0x33fc: 0x000a, 0x33fd: 0x000a, 0x33fe: 0x000a, 0x33ff: 0x000a, + // Block 0xd0, offset 0x3400 + 0x3401: 0x000a, 0x3402: 0x000a, 0x3403: 0x000a, 0x3404: 0x000a, 0x3405: 0x000a, + 0x3406: 0x000a, 0x3407: 0x000a, 0x3408: 0x000a, 0x3409: 0x000a, 0x340a: 0x000a, 0x340b: 0x000a, + 0x340c: 0x000a, 0x340d: 0x000a, 0x340e: 0x000a, 0x340f: 0x000a, 0x3411: 0x000a, + 0x3412: 0x000a, 0x3413: 0x000a, 0x3414: 0x000a, 0x3415: 0x000a, 0x3416: 0x000a, 0x3417: 0x000a, + 0x3418: 0x000a, 0x3419: 0x000a, 0x341a: 0x000a, 0x341b: 0x000a, 0x341c: 0x000a, 0x341d: 0x000a, + 0x341e: 0x000a, 0x341f: 0x000a, 0x3420: 0x000a, 0x3421: 0x000a, 0x3422: 0x000a, 0x3423: 0x000a, + 0x3424: 0x000a, 0x3425: 0x000a, 0x3426: 0x000a, 0x3427: 0x000a, 0x3428: 0x000a, 0x3429: 0x000a, + 0x342a: 0x000a, 0x342b: 0x000a, 0x342c: 0x000a, 0x342d: 0x000a, 0x342e: 0x000a, 0x342f: 0x000a, + 0x3430: 0x000a, 0x3431: 0x000a, 0x3432: 0x000a, 0x3433: 0x000a, 0x3434: 0x000a, 0x3435: 0x000a, + // Block 0xd1, offset 0x3440 + 0x3440: 0x0002, 0x3441: 0x0002, 0x3442: 0x0002, 0x3443: 0x0002, 0x3444: 0x0002, 0x3445: 0x0002, + 0x3446: 0x0002, 0x3447: 0x0002, 0x3448: 0x0002, 0x3449: 0x0002, 0x344a: 0x0002, 0x344b: 0x000a, + 0x344c: 0x000a, + // Block 0xd2, offset 0x3480 + 0x34aa: 0x000a, 0x34ab: 0x000a, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000a, 0x34c1: 0x000a, 0x34c2: 0x000a, 0x34c3: 0x000a, 0x34c4: 0x000a, 0x34c5: 0x000a, + 0x34c6: 0x000a, 0x34c7: 0x000a, 0x34c8: 0x000a, 0x34c9: 0x000a, 0x34ca: 0x000a, 0x34cb: 0x000a, + 0x34cc: 0x000a, 0x34cd: 0x000a, 0x34ce: 0x000a, 0x34cf: 0x000a, 0x34d0: 0x000a, 0x34d1: 0x000a, + 0x34d2: 0x000a, + 0x34e0: 0x000a, 0x34e1: 0x000a, 0x34e2: 0x000a, 0x34e3: 0x000a, + 0x34e4: 0x000a, 0x34e5: 0x000a, 0x34e6: 0x000a, 0x34e7: 0x000a, 0x34e8: 0x000a, 0x34e9: 0x000a, + 0x34ea: 0x000a, 0x34eb: 0x000a, 0x34ec: 0x000a, + 0x34f0: 0x000a, 0x34f1: 0x000a, 0x34f2: 0x000a, 0x34f3: 0x000a, 0x34f4: 0x000a, 0x34f5: 0x000a, + 0x34f6: 0x000a, + // Block 0xd4, offset 0x3500 + 0x3500: 0x000a, 0x3501: 0x000a, 0x3502: 0x000a, 0x3503: 0x000a, 0x3504: 0x000a, 0x3505: 0x000a, + 0x3506: 0x000a, 0x3507: 0x000a, 0x3508: 0x000a, 0x3509: 0x000a, 0x350a: 0x000a, 0x350b: 0x000a, + 0x350c: 0x000a, 0x350d: 0x000a, 0x350e: 0x000a, 0x350f: 0x000a, 0x3510: 0x000a, 0x3511: 0x000a, + 0x3512: 0x000a, 0x3513: 0x000a, 0x3514: 0x000a, + // Block 0xd5, offset 0x3540 + 0x3540: 0x000a, 0x3541: 0x000a, 0x3542: 0x000a, 0x3543: 0x000a, 0x3544: 0x000a, 0x3545: 0x000a, + 0x3546: 0x000a, 0x3547: 0x000a, 0x3548: 0x000a, 0x3549: 0x000a, 0x354a: 0x000a, 0x354b: 0x000a, + 0x3550: 0x000a, 0x3551: 0x000a, + 0x3552: 0x000a, 0x3553: 0x000a, 0x3554: 0x000a, 0x3555: 0x000a, 0x3556: 0x000a, 0x3557: 0x000a, + 0x3558: 0x000a, 0x3559: 0x000a, 0x355a: 0x000a, 0x355b: 0x000a, 0x355c: 0x000a, 0x355d: 0x000a, + 0x355e: 0x000a, 0x355f: 0x000a, 0x3560: 0x000a, 0x3561: 0x000a, 0x3562: 0x000a, 0x3563: 0x000a, + 0x3564: 0x000a, 0x3565: 0x000a, 0x3566: 0x000a, 0x3567: 0x000a, 0x3568: 0x000a, 0x3569: 0x000a, + 0x356a: 0x000a, 0x356b: 0x000a, 0x356c: 0x000a, 0x356d: 0x000a, 0x356e: 0x000a, 0x356f: 0x000a, + 0x3570: 0x000a, 0x3571: 0x000a, 0x3572: 0x000a, 0x3573: 0x000a, 0x3574: 0x000a, 0x3575: 0x000a, + 0x3576: 0x000a, 0x3577: 0x000a, 0x3578: 0x000a, 0x3579: 0x000a, 0x357a: 0x000a, 0x357b: 0x000a, + 0x357c: 0x000a, 0x357d: 0x000a, 0x357e: 0x000a, 0x357f: 0x000a, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000a, 0x3581: 0x000a, 0x3582: 0x000a, 0x3583: 0x000a, 0x3584: 0x000a, 0x3585: 0x000a, + 0x3586: 0x000a, 0x3587: 0x000a, + 0x3590: 0x000a, 0x3591: 0x000a, + 0x3592: 0x000a, 0x3593: 0x000a, 0x3594: 0x000a, 0x3595: 0x000a, 0x3596: 0x000a, 0x3597: 0x000a, + 0x3598: 0x000a, 0x3599: 0x000a, + 0x35a0: 0x000a, 0x35a1: 0x000a, 0x35a2: 0x000a, 0x35a3: 0x000a, + 0x35a4: 0x000a, 0x35a5: 0x000a, 0x35a6: 0x000a, 0x35a7: 0x000a, 0x35a8: 0x000a, 0x35a9: 0x000a, + 0x35aa: 0x000a, 0x35ab: 0x000a, 0x35ac: 0x000a, 0x35ad: 0x000a, 0x35ae: 0x000a, 0x35af: 0x000a, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000a, 0x35b3: 0x000a, 0x35b4: 0x000a, 0x35b5: 0x000a, + 0x35b6: 0x000a, 0x35b7: 0x000a, 0x35b8: 0x000a, 0x35b9: 0x000a, 0x35ba: 0x000a, 0x35bb: 0x000a, + 0x35bc: 0x000a, 0x35bd: 0x000a, 0x35be: 0x000a, 0x35bf: 0x000a, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, + 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, 0x35ec: 0x000a, 0x35ed: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, 0x3614: 0x000a, 0x3615: 0x000a, 0x3616: 0x000a, 0x3617: 0x000a, + 0x3618: 0x000a, 0x3619: 0x000a, 0x361a: 0x000a, 0x361b: 0x000a, 0x361c: 0x000a, 0x361d: 0x000a, + 0x361e: 0x000a, 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, + 0x3630: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3640: 0x000a, 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x3650: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x000a, 0x3681: 0x000a, 0x3682: 0x000a, 0x3683: 0x000a, 0x3684: 0x000a, 0x3685: 0x000a, + 0x3686: 0x000a, 0x3687: 0x000a, 0x3688: 0x000a, 0x3689: 0x000a, 0x368a: 0x000a, 0x368b: 0x000a, + 0x368c: 0x000a, 0x368d: 0x000a, 0x368e: 0x000a, 0x368f: 0x000a, 0x3690: 0x000a, 0x3691: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36fe: 0x000b, 0x36ff: 0x000b, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000b, 0x3701: 0x000b, 0x3702: 0x000b, 0x3703: 0x000b, 0x3704: 0x000b, 0x3705: 0x000b, + 0x3706: 0x000b, 0x3707: 0x000b, 0x3708: 0x000b, 0x3709: 0x000b, 0x370a: 0x000b, 0x370b: 0x000b, + 0x370c: 0x000b, 0x370d: 0x000b, 0x370e: 0x000b, 0x370f: 0x000b, 0x3710: 0x000b, 0x3711: 0x000b, + 0x3712: 0x000b, 0x3713: 0x000b, 0x3714: 0x000b, 0x3715: 0x000b, 0x3716: 0x000b, 0x3717: 0x000b, + 0x3718: 0x000b, 0x3719: 0x000b, 0x371a: 0x000b, 0x371b: 0x000b, 0x371c: 0x000b, 0x371d: 0x000b, + 0x371e: 0x000b, 0x371f: 0x000b, 0x3720: 0x000b, 0x3721: 0x000b, 0x3722: 0x000b, 0x3723: 0x000b, + 0x3724: 0x000b, 0x3725: 0x000b, 0x3726: 0x000b, 0x3727: 0x000b, 0x3728: 0x000b, 0x3729: 0x000b, + 0x372a: 0x000b, 0x372b: 0x000b, 0x372c: 0x000b, 0x372d: 0x000b, 0x372e: 0x000b, 0x372f: 0x000b, + 0x3730: 0x000b, 0x3731: 0x000b, 0x3732: 0x000b, 0x3733: 0x000b, 0x3734: 0x000b, 0x3735: 0x000b, + 0x3736: 0x000b, 0x3737: 0x000b, 0x3738: 0x000b, 0x3739: 0x000b, 0x373a: 0x000b, 0x373b: 0x000b, + 0x373c: 0x000b, 0x373d: 0x000b, 0x373e: 0x000b, 0x373f: 0x000b, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000c, 0x3741: 0x000c, 0x3742: 0x000c, 0x3743: 0x000c, 0x3744: 0x000c, 0x3745: 0x000c, + 0x3746: 0x000c, 0x3747: 0x000c, 0x3748: 0x000c, 0x3749: 0x000c, 0x374a: 0x000c, 0x374b: 0x000c, + 0x374c: 0x000c, 0x374d: 0x000c, 0x374e: 0x000c, 0x374f: 0x000c, 0x3750: 0x000c, 0x3751: 0x000c, + 0x3752: 0x000c, 0x3753: 0x000c, 0x3754: 0x000c, 0x3755: 0x000c, 0x3756: 0x000c, 0x3757: 0x000c, + 0x3758: 0x000c, 0x3759: 0x000c, 0x375a: 0x000c, 0x375b: 0x000c, 0x375c: 0x000c, 0x375d: 0x000c, + 0x375e: 0x000c, 0x375f: 0x000c, 0x3760: 0x000c, 0x3761: 0x000c, 0x3762: 0x000c, 0x3763: 0x000c, + 0x3764: 0x000c, 0x3765: 0x000c, 0x3766: 0x000c, 0x3767: 0x000c, 0x3768: 0x000c, 0x3769: 0x000c, + 0x376a: 0x000c, 0x376b: 0x000c, 0x376c: 0x000c, 0x376d: 0x000c, 0x376e: 0x000c, 0x376f: 0x000c, + 0x3770: 0x000b, 0x3771: 0x000b, 0x3772: 0x000b, 0x3773: 0x000b, 0x3774: 0x000b, 0x3775: 0x000b, + 0x3776: 0x000b, 0x3777: 0x000b, 0x3778: 0x000b, 0x3779: 0x000b, 0x377a: 0x000b, 0x377b: 0x000b, + 0x377c: 0x000b, 0x377d: 0x000b, 0x377e: 0x000b, 0x377f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x59, + 0x190: 0x5a, 0x191: 0x5b, 0x192: 0x5c, 0x193: 0x5d, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5e, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5f, 0x19e: 0x54, 0x19f: 0x60, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x61, 0x1a7: 0x62, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x63, 0x1ae: 0x64, 0x1af: 0x65, + 0x1b3: 0x66, 0x1b5: 0x67, 0x1b7: 0x68, + 0x1b8: 0x69, 0x1b9: 0x6a, 0x1ba: 0x6b, 0x1bb: 0x6c, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6d, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6e, 0x1c2: 0x6f, 0x1c3: 0x70, 0x1c7: 0x71, + 0x1c8: 0x72, 0x1c9: 0x73, 0x1ca: 0x74, 0x1cb: 0x75, 0x1cd: 0x76, 0x1cf: 0x77, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x78, 0x253: 0x79, + 0x258: 0x7a, 0x259: 0x7b, 0x25a: 0x7c, 0x25b: 0x7d, 0x25c: 0x7e, 0x25e: 0x7f, + 0x260: 0x80, 0x261: 0x81, 0x263: 0x82, 0x264: 0x83, 0x265: 0x84, 0x266: 0x85, 0x267: 0x86, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26f: 0x8b, + // Block 0xa, offset 0x280 + 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8e, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8f, + 0x2b8: 0x90, 0x2b9: 0x91, 0x2ba: 0x0e, 0x2bb: 0x92, 0x2bc: 0x93, 0x2bd: 0x94, 0x2bf: 0x95, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x96, 0x2c5: 0x54, 0x2c6: 0x97, 0x2c7: 0x98, + 0x2cb: 0x99, 0x2cd: 0x9a, + 0x2e0: 0x9b, 0x2e1: 0x9b, 0x2e2: 0x9b, 0x2e3: 0x9b, 0x2e4: 0x9c, 0x2e5: 0x9b, 0x2e6: 0x9b, 0x2e7: 0x9b, + 0x2e8: 0x9d, 0x2e9: 0x9b, 0x2ea: 0x9b, 0x2eb: 0x9e, 0x2ec: 0x9f, 0x2ed: 0x9b, 0x2ee: 0x9b, 0x2ef: 0x9b, + 0x2f0: 0x9b, 0x2f1: 0x9b, 0x2f2: 0x9b, 0x2f3: 0x9b, 0x2f4: 0x9b, 0x2f5: 0x9b, 0x2f6: 0x9b, 0x2f7: 0x9b, + 0x2f8: 0x9b, 0x2f9: 0xa0, 0x2fa: 0x9b, 0x2fb: 0x9b, 0x2fc: 0x9b, 0x2fd: 0x9b, 0x2fe: 0x9b, 0x2ff: 0x9b, + // Block 0xc, offset 0x300 + 0x300: 0xa1, 0x301: 0xa2, 0x302: 0xa3, 0x304: 0xa4, 0x305: 0xa5, 0x306: 0xa6, 0x307: 0xa7, + 0x308: 0xa8, 0x30b: 0xa9, 0x30c: 0xaa, 0x30d: 0xab, + 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, + 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, + 0x330: 0xb6, 0x332: 0xb7, + // Block 0xd, offset 0x340 + 0x36b: 0xb8, 0x36c: 0xb9, + 0x37e: 0xba, + // Block 0xe, offset 0x380 + 0x3b2: 0xbb, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xbc, 0x3c6: 0xbd, + 0x3c8: 0x54, 0x3c9: 0xbe, 0x3cc: 0x54, 0x3cd: 0xbf, + 0x3db: 0xc0, 0x3dc: 0xc1, 0x3dd: 0xc2, 0x3de: 0xc3, 0x3df: 0xc4, + 0x3e8: 0xc5, 0x3e9: 0xc6, 0x3ea: 0xc7, + // Block 0x10, offset 0x400 + 0x400: 0xc8, + 0x420: 0x9b, 0x421: 0x9b, 0x422: 0x9b, 0x423: 0xc9, 0x424: 0x9b, 0x425: 0xca, 0x426: 0x9b, 0x427: 0x9b, + 0x428: 0x9b, 0x429: 0x9b, 0x42a: 0x9b, 0x42b: 0x9b, 0x42c: 0x9b, 0x42d: 0x9b, 0x42e: 0x9b, 0x42f: 0x9b, + 0x430: 0x9b, 0x431: 0x9b, 0x432: 0x9b, 0x433: 0x9b, 0x434: 0x9b, 0x435: 0x9b, 0x436: 0x9b, 0x437: 0x9b, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xcb, 0x43c: 0x9b, 0x43d: 0x9b, 0x43e: 0x9b, 0x43f: 0x9b, + // Block 0x11, offset 0x440 + 0x440: 0xcc, 0x441: 0x54, 0x442: 0xcd, 0x443: 0xce, 0x444: 0xcf, 0x445: 0xd0, + 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xd1, 0x45c: 0x54, 0x45d: 0x6c, 0x45e: 0x54, 0x45f: 0xd2, + 0x460: 0xd3, 0x461: 0xd4, 0x462: 0xd5, 0x464: 0xd6, 0x465: 0xd7, 0x466: 0xd8, 0x467: 0x36, + 0x47f: 0xd9, + // Block 0x12, offset 0x480 + 0x4bf: 0xd9, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xda, 0x541: 0xda, 0x542: 0xda, 0x543: 0xda, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xdb, + 0x548: 0xda, 0x549: 0xda, 0x54a: 0xda, 0x54b: 0xda, 0x54c: 0xda, 0x54d: 0xda, 0x54e: 0xda, 0x54f: 0xda, + 0x550: 0xda, 0x551: 0xda, 0x552: 0xda, 0x553: 0xda, 0x554: 0xda, 0x555: 0xda, 0x556: 0xda, 0x557: 0xda, + 0x558: 0xda, 0x559: 0xda, 0x55a: 0xda, 0x55b: 0xda, 0x55c: 0xda, 0x55d: 0xda, 0x55e: 0xda, 0x55f: 0xda, + 0x560: 0xda, 0x561: 0xda, 0x562: 0xda, 0x563: 0xda, 0x564: 0xda, 0x565: 0xda, 0x566: 0xda, 0x567: 0xda, + 0x568: 0xda, 0x569: 0xda, 0x56a: 0xda, 0x56b: 0xda, 0x56c: 0xda, 0x56d: 0xda, 0x56e: 0xda, 0x56f: 0xda, + 0x570: 0xda, 0x571: 0xda, 0x572: 0xda, 0x573: 0xda, 0x574: 0xda, 0x575: 0xda, 0x576: 0xda, 0x577: 0xda, + 0x578: 0xda, 0x579: 0xda, 0x57a: 0xda, 0x57b: 0xda, 0x57c: 0xda, 0x57d: 0xda, 0x57e: 0xda, 0x57f: 0xda, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 15800 bytes (15KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go new file mode 100644 index 000000000..bebd855ef --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -0,0 +1,60 @@ +// This file was generated by go generate; DO NOT EDIT + +package bidi + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go new file mode 100644 index 000000000..03d9f99ad --- /dev/null +++ b/vendor/golang.org/x/text/width/gen.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates the trie for width operations. The generated table +// includes width category information as well as the normalization mappings. +package main + +import ( + "bytes" + "fmt" + "io" + "log" + "math" + "unicode/utf8" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" +) + +// See gen_common.go for flags. + +func main() { + gen.Init() + genTables() + genTests() + gen.Repackage("gen_trieval.go", "trieval.go", "width") + gen.Repackage("gen_common.go", "common_test.go", "width") +} + +func genTables() { + t := triegen.NewTrie("width") + // fold and inverse mappings. See mapComment for a description of the format + // of each entry. Add dummy value to make an index of 0 mean no mapping. + inverse := [][4]byte{{}} + mapping := map[[4]byte]int{[4]byte{}: 0} + + getWidthData(func(r rune, tag elem, alt rune) { + idx := 0 + if alt != 0 { + var buf [4]byte + buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) + s := string(r) + buf[buf[0]] ^= s[len(s)-1] + var ok bool + if idx, ok = mapping[buf]; !ok { + idx = len(mapping) + if idx > math.MaxUint8 { + log.Fatalf("Index %d does not fit in a byte.", idx) + } + mapping[buf] = idx + inverse = append(inverse, buf) + } + } + t.Insert(r, uint64(tag|elem(idx))) + }) + + w := &bytes.Buffer{} + gen.WriteUnicodeVersion(w) + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + + sz += writeMappings(w, inverse) + + fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) + + gen.WriteGoFile(*outputFile, "width", w.Bytes()) +} + +const inverseDataComment = ` +// inverseData contains 4-byte entries of the following format: +// <length> <modified UTF-8-encoded rune> <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8.` + +func writeMappings(w io.Writer, data [][4]byte) int { + fmt.Fprintln(w, inverseDataComment) + fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) + for _, x := range data { + fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) + } + fmt.Fprintln(w, "}") + return len(data) * 4 +} + +func genTests() { + w := &bytes.Buffer{} + fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") + getWidthData(func(r rune, tag elem, alt rune) { + if alt != 0 { + fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) + } + }) + fmt.Fprintln(w, "}") + gen.WriteGoFile("runes_test.go", "width", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go new file mode 100644 index 000000000..601e75268 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_common.go @@ -0,0 +1,96 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This code is shared between the main code generator and the test code. + +import ( + "flag" + "log" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" +) + +var ( + outputFile = flag.String("out", "tables.go", "output file") +) + +var typeMap = map[string]elem{ + "A": tagAmbiguous, + "N": tagNeutral, + "Na": tagNarrow, + "W": tagWide, + "F": tagFullwidth, + "H": tagHalfwidth, +} + +// getWidthData calls f for every entry for which it is defined. +// +// f may be called multiple times for the same rune. The last call to f is the +// correct value. f is not called for all runes. The default tag type is +// Neutral. +func getWidthData(f func(r rune, tag elem, alt rune)) { + // Set the default values for Unified Ideographs. In line with Annex 11, + // we encode full ranges instead of the defined runes in Unified_Ideograph. + for _, b := range []struct{ lo, hi rune }{ + {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, + {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, + {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, + {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, + {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, + } { + for r := b.lo; r <= b.hi; r++ { + f(r, tagWide, 0) + } + } + + inverse := map[rune]rune{} + maps := map[string]bool{ + "<wide>": true, + "<narrow>": true, + } + + // We cannot reuse package norm's decomposition, as we need an unexpanded + // decomposition. We make use of the opportunity to verify that the + // decomposition type is as expected. + ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) + if !maps[s[0]] { + return + } + x, err := strconv.ParseUint(s[1], 16, 32) + if err != nil { + log.Fatalf("Error parsing rune %q", s[1]) + } + if inverse[r] != 0 || inverse[rune(x)] != 0 { + log.Fatalf("Circular dependency in mapping between %U and %U", r, x) + } + inverse[r] = rune(x) + inverse[rune(x)] = r + }) + + // <rune range>;<type> + ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { + tag, ok := typeMap[p.String(1)] + if !ok { + log.Fatalf("Unknown width type %q", p.String(1)) + } + r := p.Rune(0) + alt, ok := inverse[r] + if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { + tag |= tagNeedsFold + if !ok { + log.Fatalf("Narrow or wide rune %U has no decomposition", r) + } + } + f(r, tag, alt) + }) +} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go new file mode 100644 index 000000000..c17334aa6 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_trieval.go @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go new file mode 100644 index 000000000..ab4fee542 --- /dev/null +++ b/vendor/golang.org/x/text/width/kind_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Kind"; DO NOT EDIT + +package width + +import "fmt" + +const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth" + +var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return fmt.Sprintf("Kind(%d)", i) + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/golang.org/x/text/width/tables.go b/vendor/golang.org/x/text/width/tables.go new file mode 100644 index 000000000..242da0fdb --- /dev/null +++ b/vendor/golang.org/x/text/width/tables.go @@ -0,0 +1,1284 @@ +// This file was generated by go generate; DO NOT EDIT + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 99 blocks, 6336 entries, 12672 bytes +// The third block is the zero block. +var widthValues = [6336]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12c4: 0x4000, + // Block 0x4c, offset 0x1300 + 0x130f: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000, + 0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000, + 0x1350: 0x2000, 0x1351: 0x2000, + 0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000, + 0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000, + 0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000, + 0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000, + 0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000, + 0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000, + 0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000, + 0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000, + // Block 0x4e, offset 0x1380 + 0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000, + 0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000, + 0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000, + 0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000, + 0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000, + 0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000, + 0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000, + 0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000, + 0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000, + 0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000, + 0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000, + 0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000, + 0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000, + 0x1410: 0x4000, 0x1411: 0x4000, + 0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000, + 0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000, + 0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000, + 0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000, + 0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000, + 0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000, + 0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000, + // Block 0x51, offset 0x1440 + 0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000, + 0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000, + 0x1450: 0x4000, 0x1451: 0x4000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000, + 0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000, + 0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, + 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + 0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000, + 0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000, + 0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000, + 0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000, + 0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000, + 0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000, + 0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000, + 0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, + 0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000, + 0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000, + 0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, + 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, + 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1574: 0x4000, + 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000, + 0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000, + 0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000, + 0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000, + 0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000, + 0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000, + 0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, + // Block 0x59, offset 0x1640 + 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, + 0x167a: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1695: 0x4000, 0x1696: 0x4000, + 0x16a4: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16fb: 0x4000, + 0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000, + 0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000, + 0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000, + // Block 0x5d, offset 0x1740 + 0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000, + 0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000, + 0x1752: 0x4000, + 0x176b: 0x4000, 0x176c: 0x4000, + 0x1774: 0x4000, 0x1775: 0x4000, + 0x1776: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1790: 0x4000, 0x1791: 0x4000, + 0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000, + 0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000, + 0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000, + 0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000, + 0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000, + 0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000, + 0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000, + 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000, + 0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000, + 0x17de: 0x4000, + // Block 0x60, offset 0x1800 + 0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000, + 0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000, + 0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000, + // Block 0x61, offset 0x1840 + 0x1840: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000, + 0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000, + 0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000, + 0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000, + 0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000, + 0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000, + 0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000, + 0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000, + 0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000, + 0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000, + 0x18bc: 0x2000, 0x18bd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x48, + // Block 0x10, offset 0x400 + 0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d, + 0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53, + 0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59, + 0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b, + 0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <length> <modified UTF-8-encoded rune> <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 14680 bytes (14KiB) diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go new file mode 100644 index 000000000..0049f700a --- /dev/null +++ b/vendor/golang.org/x/text/width/transform.go @@ -0,0 +1,239 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package width + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +type foldTransform struct { + transform.NopResetter +} + +func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if elem(v)&tagNeedsFold != 0 { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if elem(v)&tagNeedsFold == 0 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type narrowTransform struct { + transform.NopResetter +} + +func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + if src[n] < utf8.RuneSelf { + // ASCII fast path. + for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ { + } + continue + } + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + if src[nSrc] < utf8.RuneSelf { + // ASCII fast path. + start, end := nSrc, len(src) + if d := len(dst) - nDst; d < end-start { + end = nSrc + d + } + for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ { + } + n := copy(dst[nDst:], src[start:nSrc]) + if nDst += n; nDst == len(dst) { + nSrc = start + n + if nSrc == len(src) { + return nDst, nSrc, nil + } + if src[nSrc] < utf8.RuneSelf { + return nDst, nSrc, transform.ErrShortDst + } + } + continue + } + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} + +type wideTransform struct { + transform.NopResetter +} + +func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[n:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + err = transform.ErrShortSrc + } else { + n = len(src) + } + break + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + } else { + err = transform.ErrEndOfSpan + break + } + n += size + } + return n, err +} + +func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // TODO: Consider ASCII fast path. Special-casing ASCII handling can + // reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably + // not enough to warrant the extra code and complexity. + v, size := trie.lookup(src[nSrc:]) + if size == 0 { // incomplete UTF-8 encoding + if !atEOF { + return nDst, nSrc, transform.ErrShortSrc + } + size = 1 // gobble 1 byte + } + if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += size + } else { + data := inverseData[byte(v)] + if len(dst)-nDst < int(data[0]) { + return nDst, nSrc, transform.ErrShortDst + } + i := 1 + for end := int(data[0]); i < end; i++ { + dst[nDst] = data[i] + nDst++ + } + dst[nDst] = data[i] ^ src[nSrc+size-1] + nDst++ + } + nSrc += size + } + return nDst, nSrc, nil +} diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go new file mode 100644 index 000000000..0ecffb4c6 --- /dev/null +++ b/vendor/golang.org/x/text/width/trieval.go @@ -0,0 +1,30 @@ +// This file was generated by go generate; DO NOT EDIT + +package width + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go new file mode 100644 index 000000000..0c9aec474 --- /dev/null +++ b/vendor/golang.org/x/text/width/width.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type=Kind +//go:generate go run gen.go gen_common.go gen_trieval.go + +// Package width provides functionality for handling different widths in text. +// +// Wide characters behave like ideographs; they tend to allow line breaks after +// each character and remain upright in vertical text layout. Narrow characters +// are kept together in words or runs that are rotated sideways in vertical text +// layout. +// +// For more information, see http://unicode.org/reports/tr11/. +package width + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// TODO +// 1) Reduce table size by compressing blocks. +// 2) API proposition for computing display length +// (approximation, fixed pitch only). +// 3) Implement display length. + +// Kind indicates the type of width property as defined in http://unicode.org/reports/tr11/. +type Kind int + +const ( + // Neutral characters do not occur in legacy East Asian character sets. + Neutral Kind = iota + + // EastAsianAmbiguous characters that can be sometimes wide and sometimes + // narrow and require additional information not contained in the character + // code to further resolve their width. + EastAsianAmbiguous + + // EastAsianWide characters are wide in its usual form. They occur only in + // the context of East Asian typography. These runes may have explicit + // halfwidth counterparts. + EastAsianWide + + // EastAsianNarrow characters are narrow in its usual form. They often have + // fullwidth counterparts. + EastAsianNarrow + + // Note: there exist Narrow runes that do not have fullwidth or wide + // counterparts, despite what the definition says (e.g. U+27E6). + + // EastAsianFullwidth characters have a compatibility decompositions of type + // wide that map to a narrow counterpart. + EastAsianFullwidth + + // EastAsianHalfwidth characters have a compatibility decomposition of type + // narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON + // SIGN. + EastAsianHalfwidth + + // Note: there exist runes that have a halfwidth counterparts but that are + // classified as Ambiguous, rather than wide (e.g. U+2190). +) + +// TODO: the generated tries need to return size 1 for invalid runes for the +// width to be computed correctly (each byte should render width 1) + +var trie = newWidthTrie(0) + +// Lookup reports the Properties of the first rune in b and the number of bytes +// of its UTF-8 encoding. +func Lookup(b []byte) (p Properties, size int) { + v, sz := trie.lookup(b) + return Properties{elem(v), b[sz-1]}, sz +} + +// LookupString reports the Properties of the first rune in s and the number of +// bytes of its UTF-8 encoding. +func LookupString(s string) (p Properties, size int) { + v, sz := trie.lookupString(s) + return Properties{elem(v), s[sz-1]}, sz +} + +// LookupRune reports the Properties of rune r. +func LookupRune(r rune) Properties { + var buf [4]byte + n := utf8.EncodeRune(buf[:], r) + v, _ := trie.lookup(buf[:n]) + last := byte(r) + if r >= utf8.RuneSelf { + last = 0x80 + byte(r&0x3f) + } + return Properties{elem(v), last} +} + +// Properties provides access to width properties of a rune. +type Properties struct { + elem elem + last byte +} + +func (e elem) kind() Kind { + return Kind(e >> typeShift) +} + +// Kind returns the Kind of a rune as defined in Unicode TR #11. +// See http://unicode.org/reports/tr11/ for more details. +func (p Properties) Kind() Kind { + return p.elem.kind() +} + +// Folded returns the folded variant of a rune or 0 if the rune is canonical. +func (p Properties) Folded() rune { + if p.elem&tagNeedsFold != 0 { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Narrow returns the narrow variant of a rune or 0 if the rune is already +// narrow or doesn't have a narrow variant. +func (p Properties) Narrow() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// Wide returns the wide variant of a rune or 0 if the rune is already +// wide or doesn't have a wide variant. +func (p Properties) Wide() rune { + if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) { + buf := inverseData[byte(p.elem)] + buf[buf[0]] ^= p.last + r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]]) + return r + } + return 0 +} + +// TODO for Properties: +// - Add Fullwidth/Halfwidth or Inverted methods for computing variants +// mapping. +// - Add width information (including information on non-spacing runes). + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +// Reset implements the transform.Transformer interface. +func (t Transformer) Reset() { t.t.Reset() } + +// Transform implements the transform.Transformer interface. +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +// Span implements the transform.SpanningTransformer interface. +func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) { + return t.t.Span(src, atEOF) +} + +// Bytes returns a new byte slice with the result of applying t to b. +func (t Transformer) Bytes(b []byte) []byte { + b, _, _ = transform.Bytes(t, b) + return b +} + +// String returns a string with the result of applying t to s. +func (t Transformer) String(s string) string { + s, _, _ = transform.String(t, s) + return s +} + +var ( + // Fold is a transform that maps all runes to their canonical width. + // + // Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm + // provide a more generic folding mechanism. + Fold Transformer = Transformer{foldTransform{}} + + // Widen is a transform that maps runes to their wide variant, if + // available. + Widen Transformer = Transformer{wideTransform{}} + + // Narrow is a transform that maps runes to their narrow variant, if + // available. + Narrow Transformer = Transformer{narrowTransform{}} +) + +// TODO: Consider the following options: +// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some +// generalized variant of this. +// - Consider a wide Won character to be the default width (or some generalized +// variant of this). +// - Filter the set of characters that gets converted (the preferred approach is +// to allow applying filters to transforms). diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 771520977..000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go -sudo: false - -go: - - 1.4 - -install: - - go get -v -t -d google.golang.org/appengine/... - - mkdir sdk - - curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip" - - unzip sdk.zip -d sdk - - export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py - -script: - - go version - - go test -v google.golang.org/appengine/... - - go test -v -race google.golang.org/appengine/... - - sdk/go_appengine/goapp test -v google.golang.org/appengine/... diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index 1efd95583..000000000 --- a/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Go App Engine packages - -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) - -This repository supports the Go runtime on App Engine, -including both the standard App Engine and the -"App Engine flexible environment" (formerly known as "Managed VMs"). -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [Google App Engine issue -tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect). - -## Directory structure -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating a Go App Engine app - -This section describes how to update an older Go App Engine app to use -these packages. A provided tool, `aefix`, can help automate steps 2 and 3 -(run `go get google.golang.org/appengine/cmd/aefix` to install it), but -read the details below since `aefix` can't perform all the changes. - -### 1. Update YAML files (App Engine flexible environment / Managed VMs only) - -The `app.yaml` file (and YAML files for modules) should have these new lines added: -``` -vm: true -``` -See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details. - -### 2. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. - -### 3. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and some are not available yet. -This list summarises the differences: - -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead. -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. - Use the standard `net` package instead. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index be0b5f2bc..000000000 --- a/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine - -import ( - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return WithContext(context.Background(), req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call. - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index e4399ed7d..000000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// The comment below must not be changed. -// It is used by go-app-builder to recognise that this package has -// the Main function to use in the synthetic main. -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for an app running in App Engine "flexible environment". -// It installs a trivial health checker if one isn't already registered, -// and starts listening on port 8080 (overridden by the $PORT environment -// variable). -// -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772e2..000000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f36..000000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index ec5aa59b3..000000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index 597f66e6e..000000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "fmt" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) appengine.Context { - return fromContext(ctx) -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -type testingContext struct { - appengine.Context - - req *http.Request -} - -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { - if service == "__go__" && method == "GetNamespace" { - return nil - } - return fmt.Errorf("testingContext: unsupported Call") -} -func (t *testingContext) Request() interface{} { return t.req } - -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errors.New("not an App Engine context") - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index 2db33a774..000000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - logf(fromContext(ctx), level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - return withNamespace(ctx, namespace) -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c07b..000000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 87d9701b8..000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto -// DO NOT EDIT! - -/* -Package app_identity is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -It has these top-level messages: - AppIdentityServiceError - SignForAppRequest - SignForAppResponse - GetPublicCertificateForAppRequest - PublicCertificate - GetPublicCertificateForAppResponse - GetServiceAccountNameRequest - GetServiceAccountNameResponse - GetAccessTokenRequest - GetAccessTokenResponse - GetDefaultGcsBucketNameRequest - GetDefaultGcsBucketNameResponse -*/ -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} - -type AppIdentityServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca5b..000000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index 36a195650..000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/base/api_base.proto -// DO NOT EDIT! - -/* -Package base is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/base/api_base.proto - -It has these top-level messages: - StringProto - Integer32Proto - Integer64Proto - BoolProto - DoubleProto - BytesProto - VoidProto -*/ -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3ca..000000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 8613cb731..000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,2778 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto -// DO NOT EDIT! - -/* -Package datastore is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/datastore/datastore_v3.proto - -It has these top-level messages: - Action - PropertyValue - Property - Path - Reference - User - EntityProto - CompositeProperty - Index - CompositeIndex - IndexPostfix - IndexPosition - Snapshot - InternalHeader - Transaction - Query - CompiledQuery - CompiledCursor - Cursor - Error - Cost - GetRequest - GetResponse - PutRequest - PutResponse - TouchRequest - TouchResponse - DeleteRequest - DeleteResponse - NextRequest - QueryResult - AllocateIdsRequest - AllocateIdsResponse - CompositeIndices - AddActionsRequest - AddActionsResponse - BeginTransactionRequest - CommitResponse -*/ -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} - -type Action struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100755 index e76f126ff..000000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,541 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index d538701ab..000000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import netcontext "golang.org/x/net/context" - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index e6b9227c5..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func DefaultVersionHostname(ctx netcontext.Context) string { - return appengine.DefaultVersionHostname(fromContext(ctx)) -} - -func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) } -func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() } diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index ebe68b785..000000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "net/http" - "os" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - return fromContext(ctx).Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDatacenter) -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - appID := os.Getenv("GAE_LONG_APP_ID") - if appID == "" { - appID = string(mustGetMetadata("instance/attributes/gae_project")) - } - return appID -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 66e8d7686..000000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - "io" - "log" - "net/http" - "net/url" - "os" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError <empty>" - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -func Main() { - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 20c595be3..000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,899 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/log/log_service.proto -// DO NOT EDIT! - -/* -Package log is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/log/log_service.proto - -It has these top-level messages: - LogServiceError - UserAppLogLine - UserAppLogGroup - FlushRequest - SetStatusRequest - LogOffset - LogLine - RequestLog - LogModuleVersion - LogReadRequest - LogReadResponse - LogUsageRecord - LogUsageRequest - LogUsageResponse -*/ -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} - -type LogServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc475..000000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index 9cc1f71d1..000000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - log.Fatalf("Metadata fetch failed: %v", err) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index a0145ed31..000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,375 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/modules/modules_service.proto -// DO NOT EDIT! - -/* -Package modules is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/modules/modules_service.proto - -It has these top-level messages: - ModulesServiceError - GetModulesRequest - GetModulesResponse - GetVersionsRequest - GetVersionsResponse - GetDefaultVersionRequest - GetDefaultVersionResponse - GetNumInstancesRequest - GetNumInstancesResponse - SetNumInstancesRequest - SetNumInstancesResponse - StartModuleRequest - StartModuleResponse - StopModuleRequest - StopModuleResponse - GetHostnameRequest - GetHostnameResponse -*/ -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} - -type ModulesServiceError struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} - -type GetModulesRequest struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f0065a..000000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index 3b94cf0c6..000000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100755 index 2fdb546a6..000000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 526bd39e6..000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,231 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto -// DO NOT EDIT! - -/* -Package remote_api is a generated protocol buffer package. - -It is generated from these files: - google.golang.org/appengine/internal/remote_api/remote_api.proto - -It has these top-level messages: - Request - ApplicationError - RpcError - Response -*/ -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a4e..000000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 28a6d1812..000000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error { - if transactionFromContext(c) != nil { - return errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return ErrConcurrentTransaction - } - } - return err -} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca08..000000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a992..000000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/cloud/LICENSE b/vendor/google.golang.org/cloud/LICENSE deleted file mode 100644 index a4c5efd82..000000000 --- a/vendor/google.golang.org/cloud/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/google.golang.org/cloud/compute/metadata/metadata.go deleted file mode 100644 index 3b537ef0c..000000000 --- a/vendor/google.golang.org/cloud/compute/metadata/metadata.go +++ /dev/null @@ -1,437 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" - - "google.golang.org/cloud/internal" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - metaClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }, - } - subscribeClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }, - } -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func Get(suffix string) (string, error) { - val, _, err := getETag(metaClient, suffix) - return val, err -} - -// getETag returns a value from the metadata service as well as the associated -// ETag using the provided client. This func is otherwise equivalent to Get. -func getETag(client *http.Client, suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - url := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", url, nil) - req.Header.Set("Metadata-Flavor", "Google") - res, err := client.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - if res.StatusCode != 200 { - return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - return string(all), res.Header.Get("Etag"), nil -} - -func getTrimmed(suffix string) (s string, err error) { - s, err = Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *cachedValue) get() (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = getTrimmed(c.k) - } else { - v, err = Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194 - go func() { - res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - return strings.TrimSpace(string(slurp)) == "Google" -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := getETag(subscribeClient, suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return projID.get() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return projNum.get() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { - return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// "<instanceID>.c.<projID>.internal". -func Hostname() (string, error) { - return getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { - var s []string - j, err := Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { - return instID.get() -} - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { - host, err := Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { - zone, err := getTrimmed("instance/zone") - // zone is of the form "projects/<projNum>/zones/<zoneName>". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } - -func lines(suffix string) ([]string, error) { - j, err := Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func InstanceAttributeValue(attr string) (string, error) { - return Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func ProjectAttributeValue(attr string) (string, error) { - return Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} diff --git a/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/google.golang.org/cloud/internal/cloud.go deleted file mode 100644 index 59428803d..000000000 --- a/vendor/google.golang.org/cloud/internal/cloud.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" - "sync" - - "golang.org/x/net/context" -) - -type contextKey struct{} - -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - if c == nil { - panic("nil *http.Client passed to WithContext") - } - if projID == "" { - panic("empty project ID passed to WithContext") - } - return context.WithValue(parent, contextKey{}, &cloudContext{ - ProjectID: projID, - HTTPClient: c, - }) -} - -const userAgent = "gcloud-golang/0.1" - -type cloudContext struct { - ProjectID string - HTTPClient *http.Client - - mu sync.Mutex // guards svc - svc map[string]interface{} // e.g. "storage" => *rawStorage.Service -} - -// Service returns the result of the fill function if it's never been -// called before for the given name (which is assumed to be an API -// service name, like "datastore"). If it has already been cached, the fill -// func is not run. -// It's safe for concurrent use by multiple goroutines. -func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { - return cc(ctx).service(name, fill) -} - -func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { - c.mu.Lock() - defer c.mu.Unlock() - - if c.svc == nil { - c.svc = make(map[string]interface{}) - } else if v, ok := c.svc[name]; ok { - return v - } - v := fill(c.HTTPClient) - c.svc[name] = v - return v -} - -// Transport is an http.RoundTripper that appends -// Google Cloud client's user-agent to the original -// request's user-agent header. -type Transport struct { - // Base is the actual http.RoundTripper - // requests will use. It must not be nil. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -func ProjID(ctx context.Context) string { - return cc(ctx).ProjectID -} - -func HTTPClient(ctx context.Context) *http.Client { - return cc(ctx).HTTPClient -} - -// cc returns the internal *cloudContext (cc) state for a context.Context. -// It panics if the user did it wrong. -func cc(ctx context.Context) *cloudContext { - if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { - return c - } - panic("invalid context.Context type; it should be created with cloud.NewContext") -} diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS new file mode 100755 index 000000000..ae828ad25 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -0,0 +1,27 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- saad-ali +- janetkuo +- timstclair +- eparis +- timothysc +- dims +- spxtr +- hongchaodeng +- krousey +- satnam6502 +- cjcullen +- david-mcmahon +- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go new file mode 100644 index 000000000..58751ed0e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors provides detailed error types for api field validation. +package errors diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go new file mode 100644 index 000000000..560c889b9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -0,0 +1,478 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// HTTP Status codes not in the golang http package. +const ( + StatusUnprocessableEntity = 422 + StatusTooManyRequests = 429 + // StatusServerTimeout is an indication that a transient server error has + // occurred and the client *should* retry, with an optional Retry-After + // header to specify the back off window. + StatusServerTimeout = 504 +) + +// StatusError is an error intended for consumption by a REST API server; it can also be +// reconstructed by clients from a REST response. Public to allow easy type switches. +type StatusError struct { + ErrStatus metav1.Status +} + +// APIStatus is exposed by errors that can be converted to an api.Status object +// for finer grained details. +type APIStatus interface { + Status() metav1.Status +} + +var _ error = &StatusError{} + +// Error implements the Error interface. +func (e *StatusError) Error() string { + return e.ErrStatus.Message +} + +// Status allows access to e's status without having to know the detailed workings +// of StatusError. +func (e *StatusError) Status() metav1.Status { + return e.ErrStatus +} + +// DebugError reports extended info about the error to debug output. +func (e *StatusError) DebugError() (string, []interface{}) { + if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil { + return "server response object: %s", []interface{}{string(out)} + } + return "server response object: %#v", []interface{}{e.ErrStatus} +} + +// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. +type UnexpectedObjectError struct { + Object runtime.Object +} + +// Error returns an error message describing 'u'. +func (u *UnexpectedObjectError) Error() string { + return fmt.Sprintf("unexpected object: %v", u.Object) +} + +// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise, +// returns an UnexpecteObjectError. +func FromObject(obj runtime.Object) error { + switch t := obj.(type) { + case *metav1.Status: + return &StatusError{*t} + } + return &UnexpectedObjectError{obj} +} + +// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. +func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotFound, + Reason: metav1.StatusReasonNotFound, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name), + }} +} + +// NewAlreadyExists returns an error indicating the item requested exists by that identifier. +func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonAlreadyExists, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name), + }} +} + +// NewUnauthorized returns an error indicating the client is not authorized to perform the requested +// action. +func NewUnauthorized(reason string) *StatusError { + message := reason + if len(message) == 0 { + message = "not authorized" + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnauthorized, + Reason: metav1.StatusReasonUnauthorized, + Message: message, + }} +} + +// NewForbidden returns an error indicating the requested action was forbidden +func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), + }} +} + +// NewConflict returns an error indicating the item can't be updated as provided. +func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), + }} +} + +// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +func NewGone(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonGone, + Message: message, + }} +} + +// NewInvalid returns an error indicating the item is invalid and cannot be processed. +func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { + causes := make([]metav1.StatusCause, 0, len(errs)) + for i := range errs { + err := errs[i] + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseType(err.Type), + Message: err.ErrorBody(), + Field: err.Field, + }) + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity + Reason: metav1.StatusReasonInvalid, + Details: &metav1.StatusDetails{ + Group: qualifiedKind.Group, + Kind: qualifiedKind.Kind, + Name: name, + Causes: causes, + }, + Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), + }} +} + +// NewBadRequest creates an error that indicates that the request is invalid and can not be processed. +func NewBadRequest(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusBadRequest, + Reason: metav1.StatusReasonBadRequest, + Message: reason, + }} +} + +// NewServiceUnavailable creates an error that indicates that the requested service is unavailable. +func NewServiceUnavailable(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusServiceUnavailable, + Reason: metav1.StatusReasonServiceUnavailable, + Message: reason, + }} +} + +// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. +func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusMethodNotAllowed, + Reason: metav1.StatusReasonMethodNotAllowed, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + }, + Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()), + }} +} + +// NewServerTimeout returns an error indicating the requested action could not be completed due to a +// transient error, and the client should try again. +func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonServerTimeout, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: operation, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()), + }} +} + +// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we +// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. +func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError { + return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) +} + +// NewInternalError returns an error indicating the item is invalid and cannot be processed. +func NewInternalError(err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{{Message: err.Error()}}, + }, + Message: fmt.Sprintf("Internal error occurred: %v", err), + }} +} + +// NewTimeoutError returns an error indicating that a timeout occurred before the request +// could be completed. Clients may retry, but the operation may still complete. +func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: StatusServerTimeout, + Reason: metav1.StatusReasonTimeout, + Message: fmt.Sprintf("Timeout: %s", message), + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + +// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. +func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { + reason := metav1.StatusReasonUnknown + message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) + switch code { + case http.StatusConflict: + if verb == "POST" { + reason = metav1.StatusReasonAlreadyExists + } else { + reason = metav1.StatusReasonConflict + } + message = "the server reported a conflict" + case http.StatusNotFound: + reason = metav1.StatusReasonNotFound + message = "the server could not find the requested resource" + case http.StatusBadRequest: + reason = metav1.StatusReasonBadRequest + message = "the server rejected our request for an unknown reason" + case http.StatusUnauthorized: + reason = metav1.StatusReasonUnauthorized + message = "the server has asked for the client to provide credentials" + case http.StatusForbidden: + reason = metav1.StatusReasonForbidden + // the server message has details about who is trying to perform what action. Keep its message. + message = serverMessage + case http.StatusMethodNotAllowed: + reason = metav1.StatusReasonMethodNotAllowed + message = "the server does not allow this method on the requested resource" + case StatusUnprocessableEntity: + reason = metav1.StatusReasonInvalid + message = "the server rejected our request due to an error in our request" + case StatusServerTimeout: + reason = metav1.StatusReasonServerTimeout + message = "the server cannot complete the requested operation at this time, try again later" + case StatusTooManyRequests: + reason = metav1.StatusReasonTimeout + message = "the server has received too many requests and has asked us to try again later" + default: + if code >= 500 { + reason = metav1.StatusReasonInternalError + message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage) + } + } + switch { + case !qualifiedResource.Empty() && len(name) > 0: + message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name) + case !qualifiedResource.Empty(): + message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String()) + } + var causes []metav1.StatusCause + if isUnexpectedResponse { + causes = []metav1.StatusCause{ + { + Type: metav1.CauseTypeUnexpectedServerResponse, + Message: serverMessage, + }, + } + } else { + causes = nil + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: int32(code), + Reason: reason, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + + Causes: causes, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: message, + }} +} + +// IsNotFound returns true if the specified error was created by NewNotFound. +func IsNotFound(err error) bool { + return reasonForError(err) == metav1.StatusReasonNotFound +} + +// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. +func IsAlreadyExists(err error) bool { + return reasonForError(err) == metav1.StatusReasonAlreadyExists +} + +// IsConflict determines if the err is an error which indicates the provided update conflicts. +func IsConflict(err error) bool { + return reasonForError(err) == metav1.StatusReasonConflict +} + +// IsInvalid determines if the err is an error which indicates the provided resource is not valid. +func IsInvalid(err error) bool { + return reasonForError(err) == metav1.StatusReasonInvalid +} + +// IsMethodNotSupported determines if the err is an error which indicates the provided action could not +// be performed because it is not supported by the server. +func IsMethodNotSupported(err error) bool { + return reasonForError(err) == metav1.StatusReasonMethodNotAllowed +} + +// IsBadRequest determines if err is an error which indicates that the request is invalid. +func IsBadRequest(err error) bool { + return reasonForError(err) == metav1.StatusReasonBadRequest +} + +// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and +// requires authentication by the user. +func IsUnauthorized(err error) bool { + return reasonForError(err) == metav1.StatusReasonUnauthorized +} + +// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot +// be completed as requested. +func IsForbidden(err error) bool { + return reasonForError(err) == metav1.StatusReasonForbidden +} + +// IsTimeout determines if err is an error which indicates that request times out due to long +// processing. +func IsTimeout(err error) bool { + return reasonForError(err) == metav1.StatusReasonTimeout +} + +// IsServerTimeout determines if err is an error which indicates that the request needs to be retried +// by the client. +func IsServerTimeout(err error) bool { + return reasonForError(err) == metav1.StatusReasonServerTimeout +} + +// IsInternalError determines if err is an error which indicates an internal server error. +func IsInternalError(err error) bool { + return reasonForError(err) == metav1.StatusReasonInternalError +} + +// IsTooManyRequests determines if err is an error which indicates that there are too many requests +// that the server cannot handle. +// TODO: update IsTooManyRequests() when the TooManyRequests(429) error returned from the API server has a non-empty Reason field +func IsTooManyRequests(err error) bool { + switch t := err.(type) { + case APIStatus: + return t.Status().Code == StatusTooManyRequests + } + return false +} + +// IsUnexpectedServerError returns true if the server response was not in the expected API format, +// and may be the result of another HTTP actor. +func IsUnexpectedServerError(err error) bool { + switch t := err.(type) { + case APIStatus: + if d := t.Status().Details; d != nil { + for _, cause := range d.Causes { + if cause.Type == metav1.CauseTypeUnexpectedServerResponse { + return true + } + } + } + } + return false +} + +// IsUnexpectedObjectError determines if err is due to an unexpected object from the master. +func IsUnexpectedObjectError(err error) bool { + _, ok := err.(*UnexpectedObjectError) + return err != nil && ok +} + +// SuggestsClientDelay returns true if this error suggests a client delay as well as the +// suggested seconds to wait, or false if the error does not imply a wait. +func SuggestsClientDelay(err error) (int, bool) { + switch t := err.(type) { + case APIStatus: + if t.Status().Details != nil { + switch t.Status().Reason { + case metav1.StatusReasonServerTimeout, metav1.StatusReasonTimeout: + return int(t.Status().Details.RetryAfterSeconds), true + } + } + } + return 0, false +} + +func reasonForError(err error) metav1.StatusReason { + switch t := err.(type) { + case APIStatus: + return t.Status().Reason + } + return metav1.StatusReasonUnknown +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS new file mode 100755 index 000000000..6044c031e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -0,0 +1,26 @@ +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- kargakis +- janetkuo +- ncdc +- eparis +- dims +- krousey +- markturansky +- fabioy +- resouer +- david-mcmahon +- mfojtik +- jianhuiz +- feihujiang +- ghodss diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/default.go b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go new file mode 100644 index 000000000..5ea906a2a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/default.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// NewDefaultRESTMapperFromScheme instantiates a DefaultRESTMapper based on types registered in the given scheme. +func NewDefaultRESTMapperFromScheme(defaultGroupVersions []schema.GroupVersion, interfacesFunc VersionInterfacesFunc, + importPathPrefix string, ignoredKinds, rootScoped sets.String, scheme *runtime.Scheme) *DefaultRESTMapper { + + mapper := NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc) + // enumerate all supported versions, get the kinds, and register with the mapper how to address + // our resources. + for _, gv := range defaultGroupVersions { + for kind, oType := range scheme.KnownTypes(gv) { + gvk := gv.WithKind(kind) + // TODO: Remove import path check. + // We check the import path because we currently stuff both "api" and "extensions" objects + // into the same group within Scheme since Scheme has no notion of groups yet. + if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { + continue + } + scope := RESTScopeNamespace + if rootScoped.Has(kind) { + scope = RESTScopeRoot + } + mapper.Add(gvk, scope) + } + } + return mapper +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go new file mode 100644 index 000000000..a3b18a5c9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package meta provides functions for retrieving API metadata from objects +// belonging to the Kubernetes API +package meta diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go new file mode 100644 index 000000000..1503bd6d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -0,0 +1,105 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource +type AmbiguousResourceError struct { + PartialResource schema.GroupVersionResource + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousResourceError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) +} + +// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind +type AmbiguousKindError struct { + PartialKind schema.GroupVersionKind + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousKindError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) +} + +func IsAmbiguousError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *AmbiguousResourceError, *AmbiguousKindError: + return true + default: + return false + } +} + +// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource +type NoResourceMatchError struct { + PartialResource schema.GroupVersionResource +} + +func (e *NoResourceMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialResource) +} + +// NoKindMatchError is returned if the RESTMapper can't find any match for a kind +type NoKindMatchError struct { + PartialKind schema.GroupVersionKind +} + +func (e *NoKindMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialKind) +} + +func IsNoMatchError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *NoResourceMatchError, *NoKindMatchError: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go new file mode 100644 index 000000000..fd2210022 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go @@ -0,0 +1,97 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the +// first successful result for the singular requests +type FirstHitRESTMapper struct { + MultiRESTMapper +} + +func (m FirstHitRESTMapper) String() string { + return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper) +} + +func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.ResourceFor(resource) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return schema.GroupVersionResource{}, collapseAggregateErrors(errors) +} + +func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.KindFor(resource) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return schema.GroupVersionKind{}, collapseAggregateErrors(errors) +} + +// RESTMapping provides the REST mapping for the resource based on the +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.RESTMapping(gk, versions...) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return nil, collapseAggregateErrors(errors) +} + +// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list +// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) +func collapseAggregateErrors(errors []error) error { + if len(errors) == 0 { + return nil + } + if len(errors) == 1 { + return errors[0] + } + + allNoMatchErrors := true + for _, err := range errors { + allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err) + } + if allNoMatchErrors { + return errors[0] + } + + return utilerrors.NewAggregate(errors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go new file mode 100644 index 000000000..930441fa6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -0,0 +1,199 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +// IsListType returns true if the provided Object has a slice called Items +func IsListType(obj runtime.Object) bool { + // if we're a runtime.Unstructured, check whether this is a list. + // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.IsList() + } + + _, err := GetItemsPtr(obj) + return err == nil +} + +// GetItemsPtr returns a pointer to the list object's Items member. +// If 'list' doesn't have an Items member, it's not really a list type +// and an error will be returned. +// This function will either return a pointer to a slice, or an error, but not both. +func GetItemsPtr(list runtime.Object) (interface{}, error) { + v, err := conversion.EnforcePtr(list) + if err != nil { + return nil, err + } + + items := v.FieldByName("Items") + if !items.IsValid() { + return nil, fmt.Errorf("no Items field in %#v", list) + } + switch items.Kind() { + case reflect.Interface, reflect.Ptr: + target := reflect.TypeOf(items.Interface()).Elem() + if target.Kind() != reflect.Slice { + return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) + } + return items.Interface(), nil + case reflect.Slice: + return items.Addr().Interface(), nil + default: + return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) + } +} + +// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates +// the loop. +func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + // TODO: Change to an interface call? + itemsPtr, err := GetItemsPtr(obj) + if err != nil { + return err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return err + } + len := items.Len() + if len == 0 { + return nil + } + takeAddr := false + if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface { + if !items.Index(0).CanAddr() { + return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) + } + takeAddr = true + } + + for i := 0; i < len; i++ { + raw := items.Index(i) + if takeAddr { + raw = raw.Addr() + } + switch item := raw.Interface().(type) { + case *runtime.RawExtension: + if err := fn(item.Object); err != nil { + return err + } + case runtime.Object: + if err := fn(item); err != nil { + return err + } + default: + obj, ok := item.(runtime.Object) + if !ok { + return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } + if err := fn(obj); err != nil { + return err + } + } + } + return nil +} + +// ExtractList returns obj's Items element as an array of runtime.Objects. +// Returns an error if obj is not a List type (does not have an Items member). +func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + itemsPtr, err := GetItemsPtr(obj) + if err != nil { + return nil, err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return nil, err + } + list := make([]runtime.Object, items.Len()) + for i := range list { + raw := items.Index(i) + switch item := raw.Interface().(type) { + case runtime.RawExtension: + switch { + case item.Object != nil: + list[i] = item.Object + case item.Raw != nil: + // TODO: Set ContentEncoding and ContentType correctly. + list[i] = &runtime.Unknown{Raw: item.Raw} + default: + list[i] = nil + } + case runtime.Object: + list[i] = item + default: + var found bool + if list[i], found = raw.Addr().Interface().(runtime.Object); !found { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } + } + } + return list, nil +} + +// objectSliceType is the type of a slice of Objects +var objectSliceType = reflect.TypeOf([]runtime.Object{}) + +// SetList sets the given list object's Items member have the elements given in +// objects. +// Returns an error if list is not a List type (does not have an Items member), +// or if any of the objects are not of the right type. +func SetList(list runtime.Object, objects []runtime.Object) error { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return err + } + if items.Type() == objectSliceType { + items.Set(reflect.ValueOf(objects)) + return nil + } + slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) + for i := range objects { + dest := slice.Index(i) + + // check to see if you're directly assignable + if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) { + dest.Set(reflect.ValueOf(objects[i])) + continue + } + + src, err := conversion.EnforcePtr(objects[i]) + if err != nil { + return err + } + if src.Type().AssignableTo(dest.Type()) { + dest.Set(src) + } else if src.Type().ConvertibleTo(dest.Type()) { + dest.Set(src.Convert(dest.Type())) + } else { + return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type()) + } + } + items.Set(slice) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go new file mode 100644 index 000000000..524b1ebd8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -0,0 +1,147 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. +type VersionInterfaces struct { + runtime.ObjectConvertor + MetadataAccessor +} + +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +type List metav1.List + +// Type exposes the type and APIVersion of versioned or internal API objects. +type Type metav1.Type + +// MetadataAccessor lets you work with object and list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +// +// MetadataAccessor exposes Interface in a way that can be used with multiple objects. +type MetadataAccessor interface { + APIVersion(obj runtime.Object) (string, error) + SetAPIVersion(obj runtime.Object, version string) error + + Kind(obj runtime.Object) (string, error) + SetKind(obj runtime.Object, kind string) error + + Namespace(obj runtime.Object) (string, error) + SetNamespace(obj runtime.Object, namespace string) error + + Name(obj runtime.Object) (string, error) + SetName(obj runtime.Object, name string) error + + GenerateName(obj runtime.Object) (string, error) + SetGenerateName(obj runtime.Object, name string) error + + UID(obj runtime.Object) (types.UID, error) + SetUID(obj runtime.Object, uid types.UID) error + + SelfLink(obj runtime.Object) (string, error) + SetSelfLink(obj runtime.Object, selfLink string) error + + Labels(obj runtime.Object) (map[string]string, error) + SetLabels(obj runtime.Object, labels map[string]string) error + + Annotations(obj runtime.Object) (map[string]string, error) + SetAnnotations(obj runtime.Object, annotations map[string]string) error + + runtime.ResourceVersioner +} + +type RESTScopeName string + +const ( + RESTScopeNameNamespace RESTScopeName = "namespace" + RESTScopeNameRoot RESTScopeName = "root" +) + +// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy +type RESTScope interface { + // Name of the scope + Name() RESTScopeName + // ParamName is the optional name of the parameter that should be inserted in the resource url + // If empty, no param will be inserted + ParamName() string + // ArgumentName is the optional name that should be used for the variable holding the value. + ArgumentName() string + // ParamDescription is the optional description to use to document the parameter in api documentation + ParamDescription() string +} + +// RESTMapping contains the information needed to deal with objects of a specific +// resource and kind in a RESTful manner. +type RESTMapping struct { + // Resource is a string representing the name of this resource as a REST client would see it + Resource string + + GroupVersionKind schema.GroupVersionKind + + // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy + Scope RESTScope + + runtime.ObjectConvertor + MetadataAccessor +} + +// RESTMapper allows clients to map resources to kind, and map kind and version +// to interfaces for manipulating those objects. It is primarily intended for +// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. +// +// The Kubernetes API provides versioned resources and object kinds which are scoped +// to API groups. In other words, kinds and resources should not be assumed to be +// unique across groups. +// +// TODO: split into sub-interfaces +type RESTMapper interface { + // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) + + // KindsFor takes a partial resource and returns the list of potential kinds in priority order + KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) + + // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) + + // ResourcesFor takes a partial resource and returns the list of potential resource in priority order + ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) + + // RESTMapping identifies a preferred resource mapping for the provided group kind. + RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) + // RESTMappings returns all resource mappings for the provided group kind if no + // version search is provided. Otherwise identifies a preferred resource mapping for + // the provided version(s). + RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) + + AliasesForResource(resource string) ([]string, bool) + ResourceSingularizer(resource string) (singular string, err error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go new file mode 100644 index 000000000..7492324ed --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -0,0 +1,581 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// errNotList is returned when an object implements the Object style interfaces but not the List style +// interfaces. +var errNotList = fmt.Errorf("object does not implement the List interfaces") + +// ListAccessor returns a List interface for the provided object or an error if the object does +// not provide List. +// IMPORTANT: Objects are a superset of lists, so all Objects return List metadata. Do not use this +// check to determine whether an object *is* a List. +// TODO: return bool instead of error +func ListAccessor(obj interface{}) (List, error) { + switch t := obj.(type) { + case List: + return t, nil + case metav1.List: + return t, nil + case ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotList + default: + return nil, errNotList + } +} + +// errNotObject is returned when an object implements the List style interfaces but not the Object style +// interfaces. +var errNotObject = fmt.Errorf("object does not implement the Object interfaces") + +// Accessor takes an arbitrary object pointer and returns meta.Interface. +// obj must be a pointer to an API type. An error is returned if the minimum +// required fields are missing. Fields that are not required return the default +// value and are a no-op if set. +// TODO: return bool instead of error +func Accessor(obj interface{}) (metav1.Object, error) { + switch t := obj.(type) { + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotObject + case List, metav1.List, ListMetaAccessor, metav1.ListMetaAccessor: + return nil, errNotObject + default: + return nil, errNotObject + } +} + +// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion +// and Kind of an in-memory internal object. +// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta +// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube +// api conventions). +func TypeAccessor(obj interface{}) (Type, error) { + if typed, ok := obj.(runtime.Object); ok { + return objectAccessor{typed}, nil + } + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + t := v.Type() + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) + } + + typeMeta := v.FieldByName("TypeMeta") + if !typeMeta.IsValid() { + return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) + } + a := &genericAccessor{} + if err := extractFromTypeMeta(typeMeta, a); err != nil { + return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) + } + return a, nil +} + +type objectAccessor struct { + runtime.Object +} + +func (obj objectAccessor) GetKind() string { + return obj.GetObjectKind().GroupVersionKind().Kind +} + +func (obj objectAccessor) SetKind(kind string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gvk.Kind = kind + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +func (obj objectAccessor) GetAPIVersion() string { + return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() +} + +func (obj objectAccessor) SetAPIVersion(version string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gv, err := schema.ParseGroupVersion(version) + if err != nil { + gv = schema.GroupVersion{Version: version} + } + gvk.Group, gvk.Version = gv.Group, gv.Version + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +// NewAccessor returns a MetadataAccessor that can retrieve +// or manipulate resource version on objects derived from core API +// metadata concepts. +func NewAccessor() MetadataAccessor { + return resourceAccessor{} +} + +// resourceAccessor implements ResourceVersioner and SelfLinker. +type resourceAccessor struct{} + +func (resourceAccessor) Kind(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetKind(), nil +} + +func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { + objectAccessor{obj}.SetKind(kind) + return nil +} + +func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetAPIVersion(), nil +} + +func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { + objectAccessor{obj}.SetAPIVersion(version) + return nil +} + +func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetNamespace(), nil +} + +func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetNamespace(namespace) + return nil +} + +func (resourceAccessor) Name(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil +} + +func (resourceAccessor) SetName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetName(name) + return nil +} + +func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetGenerateName(), nil +} + +func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetGenerateName(name) + return nil +} + +func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetUID(), nil +} + +func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetUID(uid) + return nil +} + +func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetSelfLink(), nil +} + +func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetSelfLink(selfLink) + return nil +} + +func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil +} + +func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetLabels(labels) + return nil +} + +func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetAnnotations(), nil +} + +func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetAnnotations(annotations) + return nil +} + +func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetResourceVersion(), nil +} + +func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion(version) + return nil +} + +// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. +func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { + return err + } + if err := runtime.Field(v, "Kind", &o.Kind); err != nil { + return err + } + if err := runtime.Field(v, "Name", &o.Name); err != nil { + return err + } + if err := runtime.Field(v, "UID", &o.UID); err != nil { + return err + } + var controllerPtr *bool + if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { + return err + } + if controllerPtr != nil { + controller := *controllerPtr + o.Controller = &controller + } + var blockOwnerDeletionPtr *bool + if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil { + return err + } + if blockOwnerDeletionPtr != nil { + block := *blockOwnerDeletionPtr + o.BlockOwnerDeletion = &block + } + return nil +} + +// setOwnerReference sets v to o. v is the OwnerReferences field of an object. +func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { + return err + } + if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { + return err + } + if err := runtime.SetField(o.Name, v, "Name"); err != nil { + return err + } + if err := runtime.SetField(o.UID, v, "UID"); err != nil { + return err + } + if o.Controller != nil { + controller := *(o.Controller) + if err := runtime.SetField(&controller, v, "Controller"); err != nil { + return err + } + } + if o.BlockOwnerDeletion != nil { + block := *(o.BlockOwnerDeletion) + if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil { + return err + } + } + return nil +} + +// genericAccessor contains pointers to strings that can modify an arbitrary +// struct and implements the Accessor interface. +type genericAccessor struct { + namespace *string + name *string + generateName *string + uid *types.UID + apiVersion *string + kind *string + resourceVersion *string + selfLink *string + creationTimestamp *metav1.Time + deletionTimestamp **metav1.Time + labels *map[string]string + annotations *map[string]string + ownerReferences reflect.Value + finalizers *[]string +} + +func (a genericAccessor) GetNamespace() string { + if a.namespace == nil { + return "" + } + return *a.namespace +} + +func (a genericAccessor) SetNamespace(namespace string) { + if a.namespace == nil { + return + } + *a.namespace = namespace +} + +func (a genericAccessor) GetName() string { + if a.name == nil { + return "" + } + return *a.name +} + +func (a genericAccessor) SetName(name string) { + if a.name == nil { + return + } + *a.name = name +} + +func (a genericAccessor) GetGenerateName() string { + if a.generateName == nil { + return "" + } + return *a.generateName +} + +func (a genericAccessor) SetGenerateName(generateName string) { + if a.generateName == nil { + return + } + *a.generateName = generateName +} + +func (a genericAccessor) GetUID() types.UID { + if a.uid == nil { + return "" + } + return *a.uid +} + +func (a genericAccessor) SetUID(uid types.UID) { + if a.uid == nil { + return + } + *a.uid = uid +} + +func (a genericAccessor) GetAPIVersion() string { + return *a.apiVersion +} + +func (a genericAccessor) SetAPIVersion(version string) { + *a.apiVersion = version +} + +func (a genericAccessor) GetKind() string { + return *a.kind +} + +func (a genericAccessor) SetKind(kind string) { + *a.kind = kind +} + +func (a genericAccessor) GetResourceVersion() string { + return *a.resourceVersion +} + +func (a genericAccessor) SetResourceVersion(version string) { + *a.resourceVersion = version +} + +func (a genericAccessor) GetSelfLink() string { + return *a.selfLink +} + +func (a genericAccessor) SetSelfLink(selfLink string) { + *a.selfLink = selfLink +} + +func (a genericAccessor) GetCreationTimestamp() metav1.Time { + return *a.creationTimestamp +} + +func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) { + *a.creationTimestamp = timestamp +} + +func (a genericAccessor) GetDeletionTimestamp() *metav1.Time { + return *a.deletionTimestamp +} + +func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) { + *a.deletionTimestamp = timestamp +} + +func (a genericAccessor) GetLabels() map[string]string { + if a.labels == nil { + return nil + } + return *a.labels +} + +func (a genericAccessor) SetLabels(labels map[string]string) { + *a.labels = labels +} + +func (a genericAccessor) GetAnnotations() map[string]string { + if a.annotations == nil { + return nil + } + return *a.annotations +} + +func (a genericAccessor) SetAnnotations(annotations map[string]string) { + if a.annotations == nil { + emptyAnnotations := make(map[string]string) + a.annotations = &emptyAnnotations + } + *a.annotations = annotations +} + +func (a genericAccessor) GetFinalizers() []string { + if a.finalizers == nil { + return nil + } + return *a.finalizers +} + +func (a genericAccessor) SetFinalizers(finalizers []string) { + *a.finalizers = finalizers +} + +func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { + var ret []metav1.OwnerReference + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + return ret + } + s = s.Elem() + // Set the capacity to one element greater to avoid copy if the caller later append an element. + ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) + for i := 0; i < s.Len(); i++ { + if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { + glog.Errorf("extractFromOwnerReference failed: %v", err) + return ret + } + } + return ret +} + +func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + glog.Errorf("expect %v to be a pointer to slice", s) + } + s = s.Elem() + newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) + for i := 0; i < len(references); i++ { + if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { + glog.Errorf("setOwnerReference failed: %v", err) + return + } + } + s.Set(newReferences) +} + +// extractFromTypeMeta extracts pointers to version and kind fields from an object +func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { + if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { + return err + } + if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go new file mode 100644 index 000000000..d32b0dbf7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -0,0 +1,231 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MultiRESTMapper is a wrapper for multiple RESTMappers. +type MultiRESTMapper []RESTMapper + +func (m MultiRESTMapper) String() string { + nested := []string{} + for _, t := range m { + currString := fmt.Sprintf("%v", t) + splitStrings := strings.Split(currString, "\n") + nested = append(nested, strings.Join(splitStrings, "\n\t")) + } + + return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t")) +} + +// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod) +// This implementation supports multiple REST schemas and return the first match. +func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + for _, t := range m { + singular, err = t.ResourceSingularizer(resource) + if err == nil { + return + } + } + return +} + +func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + allGVRs := []schema.GroupVersionResource{} + for _, t := range m { + gvrs, err := t.ResourcesFor(resource) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + return nil, err + } + + // walk the existing values to de-dup + for _, curr := range gvrs { + found := false + for _, existing := range allGVRs { + if curr == existing { + found = true + break + } + } + + if !found { + allGVRs = append(allGVRs, curr) + } + } + } + + if len(allGVRs) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + return allGVRs, nil +} + +func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + allGVKs := []schema.GroupVersionKind{} + for _, t := range m { + gvks, err := t.KindsFor(resource) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + return nil, err + } + + // walk the existing values to de-dup + for _, curr := range gvks { + found := false + for _, existing := range allGVKs { + if curr == existing { + found = true + break + } + } + + if !found { + allGVKs = append(allGVKs, curr) + } + } + } + + if len(allGVKs) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + return allGVKs, nil +} + +func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +// RESTMapping provides the REST mapping for the resource based on the +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + allMappings := []*RESTMapping{} + errors := []error{} + + for _, t := range m { + currMapping, err := t.RESTMapping(gk, versions...) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + errors = append(errors, err) + continue + } + + allMappings = append(allMappings, currMapping) + } + + // if we got exactly one mapping, then use it even if other requested failed + if len(allMappings) == 1 { + return allMappings[0], nil + } + if len(allMappings) > 1 { + var kinds []schema.GroupVersionKind + for _, m := range allMappings { + kinds = append(kinds, m.GroupVersionKind) + } + return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} + } + if len(errors) > 0 { + return nil, utilerrors.NewAggregate(errors) + } + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} +} + +// RESTMappings returns all possible RESTMappings for the provided group kind, or an error +// if the type is not recognized. +func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + var allMappings []*RESTMapping + var errors []error + + for _, t := range m { + currMappings, err := t.RESTMappings(gk, versions...) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + errors = append(errors, err) + continue + } + allMappings = append(allMappings, currMappings...) + } + if len(errors) > 0 { + return nil, utilerrors.NewAggregate(errors) + } + if len(allMappings) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + return allMappings, nil +} + +// AliasesForResource finds the first alias response for the provided mappers. +func (m MultiRESTMapper) AliasesForResource(alias string) ([]string, bool) { + seenAliases := sets.NewString() + allAliases := []string{} + handled := false + + for _, t := range m { + if currAliases, currOk := t.AliasesForResource(alias); currOk { + for _, currAlias := range currAliases { + if !seenAliases.Has(currAlias) { + allAliases = append(allAliases, currAlias) + seenAliases.Insert(currAlias) + } + } + handled = true + } + } + return allAliases, handled +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go new file mode 100644 index 000000000..91203d11f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -0,0 +1,226 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + AnyGroup = "*" + AnyVersion = "*" + AnyResource = "*" + AnyKind = "*" +) + +// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind +// when multiple matches are possible +type PriorityRESTMapper struct { + // Delegate is the RESTMapper to use to locate all the Kind and Resource matches + Delegate RESTMapper + + // ResourcePriority is a list of priority patterns to apply to matching resources. + // The list of all matching resources is narrowed based on the patterns until only one remains. + // A pattern with no matches is skipped. A pattern with more than one match uses its + // matches as the list to continue matching against. + ResourcePriority []schema.GroupVersionResource + + // KindPriority is a list of priority patterns to apply to matching kinds. + // The list of all matching kinds is narrowed based on the patterns until only one remains. + // A pattern with no matches is skipped. A pattern with more than one match uses its + // matches as the list to continue matching against. + KindPriority []schema.GroupVersionKind +} + +func (m PriorityRESTMapper) String() string { + return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate) +} + +// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. +func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(originalGVRs) == 1 { + return originalGVRs[0], nil + } + + remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) + for _, pattern := range m.ResourcePriority { + matchedGVRs := []schema.GroupVersionResource{} + for _, gvr := range remainingGVRs { + if resourceMatches(pattern, gvr) { + matchedGVRs = append(matchedGVRs, gvr) + } + } + + switch len(matchedGVRs) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matchedGVRs[0], nil + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remainingGVRs = matchedGVRs + } + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} +} + +// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. +func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(originalGVKs) == 1 { + return originalGVKs[0], nil + } + + remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) + for _, pattern := range m.KindPriority { + matchedGVKs := []schema.GroupVersionKind{} + for _, gvr := range remainingGVKs { + if kindMatches(pattern, gvr) { + matchedGVKs = append(matchedGVKs, gvr) + } + } + + switch len(matchedGVKs) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matchedGVKs[0], nil + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remainingGVKs = matchedGVKs + } + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} +} + +func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool { + if pattern.Group != AnyGroup && pattern.Group != resource.Group { + return false + } + if pattern.Version != AnyVersion && pattern.Version != resource.Version { + return false + } + if pattern.Resource != AnyResource && pattern.Resource != resource.Resource { + return false + } + + return true +} + +func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool { + if pattern.Group != AnyGroup && pattern.Group != kind.Group { + return false + } + if pattern.Version != AnyVersion && pattern.Version != kind.Version { + return false + } + if pattern.Kind != AnyKind && pattern.Kind != kind.Kind { + return false + } + + return true +} + +func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { + mappings, err := m.Delegate.RESTMappings(gk) + if err != nil { + return nil, err + } + + // any versions the user provides take priority + priorities := m.KindPriority + if len(versions) > 0 { + priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) + for _, version := range versions { + gv := schema.GroupVersion{ + Version: version, + Group: gk.Group, + } + priorities = append(priorities, gv.WithKind(AnyKind)) + } + priorities = append(priorities, m.KindPriority...) + } + + remaining := append([]*RESTMapping{}, mappings...) + for _, pattern := range priorities { + var matching []*RESTMapping + for _, m := range remaining { + if kindMatches(pattern, m.GroupVersionKind) { + matching = append(matching, m) + } + } + + switch len(matching) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matching[0], nil + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remaining = matching + } + } + if len(remaining) == 1 { + return remaining[0], nil + } + + var kinds []schema.GroupVersionKind + for _, m := range mappings { + kinds = append(kinds, m.GroupVersionKind) + } + return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} +} + +func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + return m.Delegate.RESTMappings(gk, versions...) +} + +func (m PriorityRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) { + return m.Delegate.AliasesForResource(alias) +} + +func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return m.Delegate.ResourceSingularizer(resource) +} + +func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return m.Delegate.ResourcesFor(partiallySpecifiedResource) +} + +func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + return m.Delegate.KindsFor(partiallySpecifiedResource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go new file mode 100644 index 000000000..bf4c77a82 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -0,0 +1,566 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: move everything in this file to pkg/api/rest +package meta + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Implements RESTScope interface +type restScope struct { + name RESTScopeName + paramName string + argumentName string + paramDescription string +} + +func (r *restScope) Name() RESTScopeName { + return r.name +} +func (r *restScope) ParamName() string { + return r.paramName +} +func (r *restScope) ArgumentName() string { + return r.argumentName +} +func (r *restScope) ParamDescription() string { + return r.paramDescription +} + +var RESTScopeNamespace = &restScope{ + name: RESTScopeNameNamespace, + paramName: "namespaces", + argumentName: "namespace", + paramDescription: "object name and auth scope, such as for teams and projects", +} + +var RESTScopeRoot = &restScope{ + name: RESTScopeNameRoot, +} + +// DefaultRESTMapper exposes mappings between the types defined in a +// runtime.Scheme. It assumes that all types defined the provided scheme +// can be mapped with the provided MetadataAccessor and Codec interfaces. +// +// The resource name of a Kind is defined as the lowercase, +// English-plural version of the Kind string. +// When converting from resource to Kind, the singular version of the +// resource name is also accepted for convenience. +// +// TODO: Only accept plural for some operations for increased control? +// (`get pod bar` vs `get pods bar`) +type DefaultRESTMapper struct { + defaultGroupVersions []schema.GroupVersion + + resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind + kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource + kindToScope map[schema.GroupVersionKind]RESTScope + singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource + pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource + + interfacesFunc VersionInterfacesFunc + + // aliasToResource is used for mapping aliases to resources + aliasToResource map[string][]string +} + +func (m *DefaultRESTMapper) String() string { + return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) +} + +var _ RESTMapper = &DefaultRESTMapper{} + +// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a +// given api version, or an error if no such api version exists. +type VersionInterfacesFunc func(version schema.GroupVersion) (*VersionInterfaces, error) + +// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion +// to a resource name and back based on the objects in a runtime.Scheme +// and the Kubernetes API conventions. Takes a group name, a priority list of the versions +// to search when an object has no default version (set empty to return an error), +// and a function that retrieves the correct metadata for a given version. +func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { + resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) + kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) + kindToScope := make(map[schema.GroupVersionKind]RESTScope) + singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + aliasToResource := make(map[string][]string) + // TODO: verify name mappings work correctly when versions differ + + return &DefaultRESTMapper{ + resourceToKind: resourceToKind, + kindToPluralResource: kindToPluralResource, + kindToScope: kindToScope, + defaultGroupVersions: defaultGroupVersions, + singularToPlural: singularToPlural, + pluralToSingular: pluralToSingular, + aliasToResource: aliasToResource, + interfacesFunc: f, + } +} + +func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { + plural, singular := KindToResource(kind) + + m.singularToPlural[singular] = plural + m.pluralToSingular[plural] = singular + + m.resourceToKind[singular] = kind + m.resourceToKind[plural] = kind + + m.kindToPluralResource[kind] = plural + m.kindToScope[kind] = scope +} + +// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular +// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. +// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all +// callers to use the RESTMapper they mean. +var unpluralizedSuffixes = []string{ + "endpoints", +} + +// KindToResource converts Kind to a resource name. +// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match +// and they aren't guaranteed to do so. +func KindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { + kindName := kind.Kind + if len(kindName) == 0 { + return schema.GroupVersionResource{}, schema.GroupVersionResource{} + } + singularName := strings.ToLower(kindName) + singular := kind.GroupVersion().WithResource(singularName) + + for _, skip := range unpluralizedSuffixes { + if strings.HasSuffix(singularName, skip) { + return singular, singular + } + } + + switch string(singularName[len(singularName)-1]) { + case "s": + return kind.GroupVersion().WithResource(singularName + "es"), singular + case "y": + return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular + } + + return kind.GroupVersion().WithResource(singularName + "s"), singular +} + +// ResourceSingularizer implements RESTMapper +// It converts a resource name from plural to singular (e.g., from pods to pod) +func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { + partialResource := schema.GroupVersionResource{Resource: resourceType} + resources, err := m.ResourcesFor(partialResource) + if err != nil { + return resourceType, err + } + + singular := schema.GroupVersionResource{} + for _, curr := range resources { + currSingular, ok := m.pluralToSingular[curr] + if !ok { + continue + } + if singular.Empty() { + singular = currSingular + continue + } + + if currSingular.Resource != singular.Resource { + return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) + } + } + + if singular.Empty() { + return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) + } + + return singular.Resource, nil +} + +// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) +func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource { + resource.Resource = strings.ToLower(resource.Resource) + if resource.Version == runtime.APIVersionInternal { + resource.Version = "" + } + + return resource +} + +func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionResource{} + switch { + case hasGroup && hasVersion: + // fully qualified. Find the exact match + for plural, singular := range m.pluralToSingular { + if singular == resource { + ret = append(ret, plural) + break + } + if plural == resource { + ret = append(ret, plural) + break + } + } + + case hasGroup: + // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for plural, singular := range m.pluralToSingular { + if singular.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + if plural.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for plural, singular := range m.pluralToSingular { + if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { + continue + } + if singular.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + if plural.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + } + + } + + case hasVersion: + for plural, singular := range m.pluralToSingular { + if singular.Version == resource.Version && singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Version == resource.Version && plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + + default: + for plural, singular := range m.pluralToSingular { + if singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionKind{} + switch { + // fully qualified. Find the exact match + case hasGroup && hasVersion: + kind, exists := m.resourceToKind[resource] + if exists { + ret = append(ret, kind) + } + + case hasGroup: + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for currResource, currKind := range m.resourceToKind { + if currResource.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, currKind) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for currResource, currKind := range m.resourceToKind { + if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { + continue + } + if currResource.Resource == requestedGroupResource.Resource { + ret = append(ret, currKind) + } + } + + } + + case hasVersion: + for currResource, currKind := range m.resourceToKind { + if currResource.Version == resource.Version && currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + + default: + for currResource, currKind := range m.resourceToKind { + if currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: input} + } + + sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +type kindByPreferredGroupVersion struct { + list []schema.GroupVersionKind + sortOrder []schema.GroupVersion +} + +func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } +func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o kindByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Kind < rhs.Kind + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +type resourceByPreferredGroupVersion struct { + list []schema.GroupVersionResource + sortOrder []schema.GroupVersion +} + +func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } +func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o resourceByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Resource < rhs.Resource + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +// RESTMapping returns a struct representing the resource path and conversion interfaces a +// RESTClient should use to operate on the provided group/kind in order of versions. If a version search +// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which +// version should be used to access the named group/kind. +func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + mappings, err := m.RESTMappings(gk, versions...) + if err != nil { + return nil, err + } + if len(mappings) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + // since we rely on RESTMappings method + // take the first match and return to the caller + // as this was the existing behavior. + return mappings[0], nil +} + +// RESTMappings returns the RESTMappings for the provided group kind. If a version search order +// is not provided, the search order provided to DefaultRESTMapper will be used. +func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + mappings := make([]*RESTMapping, 0) + potentialGVK := make([]schema.GroupVersionKind, 0) + hadVersion := false + + // Pick an appropriate version + for _, version := range versions { + if len(version) == 0 || version == runtime.APIVersionInternal { + continue + } + currGVK := gk.WithVersion(version) + hadVersion = true + if _, ok := m.kindToPluralResource[currGVK]; ok { + potentialGVK = append(potentialGVK, currGVK) + break + } + } + // Use the default preferred versions + if !hadVersion && len(potentialGVK) == 0 { + for _, gv := range m.defaultGroupVersions { + if gv.Group != gk.Group { + continue + } + potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version)) + } + } + + if len(potentialGVK) == 0 { + return nil, &NoKindMatchError{PartialKind: gk.WithVersion("")} + } + + for _, gvk := range potentialGVK { + //Ensure we have a REST mapping + res, ok := m.kindToPluralResource[gvk] + if !ok { + continue + } + + // Ensure we have a REST scope + scope, ok := m.kindToScope[gvk] + if !ok { + return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) + } + + interfaces, err := m.interfacesFunc(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("the provided version %q has no relevant versions: %v", gvk.GroupVersion().String(), err) + } + + mappings = append(mappings, &RESTMapping{ + Resource: res.Resource, + GroupVersionKind: gvk, + Scope: scope, + + ObjectConvertor: interfaces.ObjectConvertor, + MetadataAccessor: interfaces.MetadataAccessor, + }) + } + + if len(mappings) == 0 { + return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} + } + return mappings, nil +} + +// AddResourceAlias maps aliases to resources +func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { + if len(resources) == 0 { + return + } + m.aliasToResource[alias] = resources +} + +// AliasesForResource returns whether a resource has an alias or not +func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { + if res, ok := m.aliasToResource[alias]; ok { + return res, true + } + return nil, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go new file mode 100644 index 000000000..3ebf24815 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// InterfacesForUnstructured returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects. +func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { + return &VersionInterfaces{ + ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, + MetadataAccessor: NewAccessor(), + }, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100755 index 000000000..b905e57f0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- derekwaynecarr +- mikedanese +- saad-ali +- janetkuo +- timstclair +- eparis +- timothysc +- jbeda +- xiang90 +- mbohlool +- david-mcmahon +- goltermann diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/resource/amount.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index 6ae823a02..a8866a43e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -54,6 +54,7 @@ var ( // int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster // than operations on inf.Dec for values that can be represented as int64. +// +k8s:openapi-gen=true type int64Amount struct { value int64 scale Scale diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go new file mode 100644 index 000000000..87fb5f580 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// DO NOT EDIT! + +/* + Package resource is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto + + It has these top-level messages: + Quantity +*/ +package resource + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") +} + +var fileDescriptorGenerated = []byte{ + // 253 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x8f, 0xb1, 0x4a, 0x03, 0x41, + 0x10, 0x86, 0x77, 0x1b, 0x89, 0x57, 0x06, 0x11, 0x49, 0xb1, 0x17, 0xc4, 0x42, 0x04, 0x77, 0x0a, + 0x9b, 0x60, 0x69, 0x6f, 0xa1, 0xa5, 0xdd, 0xdd, 0x65, 0xdc, 0x2c, 0x67, 0x76, 0x8f, 0xd9, 0x59, + 0x21, 0x5d, 0x4a, 0xcb, 0x94, 0x96, 0xb9, 0xb7, 0x49, 0x99, 0xd2, 0xc2, 0xc2, 0x3b, 0x5f, 0x44, + 0x72, 0xc9, 0x81, 0x08, 0x76, 0xf3, 0xfd, 0xc3, 0x37, 0xfc, 0x93, 0xdc, 0x97, 0x93, 0xa0, 0xad, + 0x87, 0x32, 0xe6, 0x48, 0x0e, 0x19, 0x03, 0xbc, 0xa2, 0x9b, 0x7a, 0x82, 0xc3, 0x22, 0xab, 0xec, + 0x3c, 0x2b, 0x66, 0xd6, 0x21, 0x2d, 0xa0, 0x2a, 0xcd, 0x2e, 0x00, 0xc2, 0xe0, 0x23, 0x15, 0x08, + 0x06, 0x1d, 0x52, 0xc6, 0x38, 0xd5, 0x15, 0x79, 0xf6, 0xc3, 0x8b, 0xbd, 0xa5, 0x7f, 0x5b, 0xba, + 0x2a, 0xcd, 0x2e, 0xd0, 0xbd, 0x35, 0xba, 0x36, 0x96, 0x67, 0x31, 0xd7, 0x85, 0x9f, 0x83, 0xf1, + 0xc6, 0x43, 0x27, 0xe7, 0xf1, 0xb9, 0xa3, 0x0e, 0xba, 0x69, 0x7f, 0x74, 0x74, 0xf3, 0x5f, 0x95, + 0xc8, 0xf6, 0x05, 0xac, 0xe3, 0xc0, 0xf4, 0xb7, 0xc9, 0xf9, 0x24, 0x19, 0x3c, 0xc4, 0xcc, 0xb1, + 0xe5, 0xc5, 0xf0, 0x34, 0x39, 0x0a, 0x4c, 0xd6, 0x99, 0x33, 0x39, 0x96, 0x97, 0xc7, 0x8f, 0x07, + 0xba, 0x3d, 0x79, 0x5f, 0xa7, 0xe2, 0xad, 0x4e, 0xc5, 0xaa, 0x4e, 0xc5, 0xba, 0x4e, 0xc5, 0xf2, + 0x73, 0x2c, 0xee, 0xae, 0x36, 0x8d, 0x12, 0xdb, 0x46, 0x89, 0x8f, 0x46, 0x89, 0x65, 0xab, 0xe4, + 0xa6, 0x55, 0x72, 0xdb, 0x2a, 0xf9, 0xd5, 0x2a, 0xb9, 0xfa, 0x56, 0xe2, 0x69, 0xd0, 0x7f, 0xf2, + 0x13, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x3c, 0xf3, 0xc9, 0x3f, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto new file mode 100644 index 000000000..608299da4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.api.resource; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "resource"; + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and Int64() accessors. +// +// The serialization format is: +// +// <quantity> ::= <signedNumber><suffix> +// (Note that <suffix> may be empty, from the "" case in <decimalSI>.) +// <digit> ::= 0 | 1 | ... | 9 +// <digits> ::= <digit> | <digit><digits> +// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> +// <sign> ::= "+" | "-" +// <signedNumber> ::= <number> | <sign><number> +// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> +// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// <decimalSI> ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber> +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// NOTE: We reserve the right to amend this canonical format, perhaps to +// allow 1.5 to be canonical. +// TODO: Remove above disclaimer after all bikeshedding about format is over, +// or after March 2015. +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message Quantity { + optional string string = 1; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/resource/math.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 163aafa5d..887ac74c9 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index 96877b4c9..3a9560882 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,7 +27,9 @@ import ( flag "github.com/spf13/pflag" + "github.com/go-openapi/spec" inf "gopkg.in/inf.v0" + "k8s.io/apimachinery/pkg/openapi" ) // Quantity is a fixed-point representation of a number. @@ -87,11 +89,11 @@ import ( // writing some sort of special handling code in the hopes that that will // cause implementors to also use a fixed point implementation. // -// +gencopy=false // +protobuf=true // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true type Quantity struct { // i is the quantity in int64 scaled form, if d.Dec == nil i int64Amount @@ -386,6 +388,28 @@ func ParseQuantity(str string) (Quantity, error) { return Quantity{d: infDecAmount{amount}, Format: format}, nil } +// DeepCopy returns a deep-copy of the Quantity value. Note that the method +// receiver is a value, so we can mutate it in-place and return it. +func (q Quantity) DeepCopy() Quantity { + if q.d.Dec != nil { + tmp := &inf.Dec{} + q.d.Dec = tmp.Set(q.d.Dec) + } + return q +} + +// OpenAPIDefinition returns openAPI definition for this type. +func (_ Quantity) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + } +} + // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index 240294682..74dfb4e4b 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go index 173de1a21..55e177b0e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/scale_int.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go rename to vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go index 0aa2ce2bf..5ed7abe66 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/suffix.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go new file mode 100644 index 000000000..2c8568c1f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go @@ -0,0 +1,99 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package announced contains tools for announcing API group factories. This is +// distinct from registration (in the 'registered' package) in that it's safe +// to announce every possible group linked in, but only groups requested at +// runtime should be registered. This package contains both a registry, and +// factory code (which was formerly copy-pasta in every install package). +package announced + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" +) + +// APIGroupFactoryRegistry allows for groups and versions to announce themselves, +// which simply makes them available and doesn't take other actions. Later, +// users of the registry can select which groups and versions they'd actually +// like to register with an APIRegistrationManager. +// +// (Right now APIRegistrationManager has separate 'registration' and 'enabled' +// concepts-- APIGroupFactory is going to take over the former function; +// they will overlap untill the refactoring is finished.) +// +// The key is the group name. After initialization, this should be treated as +// read-only. It is implemented as a map from group name to group factory, and +// it is safe to use this knowledge to manually pick out groups to register +// (e.g., for testing). +type APIGroupFactoryRegistry map[string]*GroupMetaFactory + +func (gar APIGroupFactoryRegistry) group(groupName string) *GroupMetaFactory { + gmf, ok := gar[groupName] + if !ok { + gmf = &GroupMetaFactory{VersionArgs: map[string]*GroupVersionFactoryArgs{}} + gar[groupName] = gmf + } + return gmf +} + +// AnnounceGroupVersion adds the particular arguments for this group version to the group factory. +func (gar APIGroupFactoryRegistry) AnnounceGroupVersion(gvf *GroupVersionFactoryArgs) error { + gmf := gar.group(gvf.GroupName) + if _, ok := gmf.VersionArgs[gvf.VersionName]; ok { + return fmt.Errorf("version %q in group %q has already been announced", gvf.VersionName, gvf.GroupName) + } + gmf.VersionArgs[gvf.VersionName] = gvf + return nil +} + +// AnnounceGroup adds the group-wide arguments to the group factory. +func (gar APIGroupFactoryRegistry) AnnounceGroup(args *GroupMetaFactoryArgs) error { + gmf := gar.group(args.GroupName) + if gmf.GroupArgs != nil { + return fmt.Errorf("group %q has already been announced", args.GroupName) + } + gmf.GroupArgs = args + return nil +} + +// RegisterAndEnableAll throws every factory at the specified API registration +// manager, and lets it decide which to register. (If you want to do this a la +// cart, you may look through gar itself-- it's just a map.) +func (gar APIGroupFactoryRegistry) RegisterAndEnableAll(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + for groupName, gmf := range gar { + if err := gmf.Register(m); err != nil { + return fmt.Errorf("error registering %v: %v", groupName, err) + } + if err := gmf.Enable(m, scheme); err != nil { + return fmt.Errorf("error enabling %v: %v", groupName, err) + } + } + return nil +} + +// AnnouncePreconstructedFactory announces a factory which you've manually assembled. +// You may call this instead of calling AnnounceGroup and AnnounceGroupVersion. +func (gar APIGroupFactoryRegistry) AnnouncePreconstructedFactory(gmf *GroupMetaFactory) error { + name := gmf.GroupArgs.GroupName + if _, exists := gar[name]; exists { + return fmt.Errorf("the group %q has already been announced.", name) + } + gar[name] = gmf + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go new file mode 100644 index 000000000..afb03295d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go @@ -0,0 +1,252 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package announced + +import ( + "fmt" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +type SchemeFunc func(*runtime.Scheme) error +type VersionToSchemeFunc map[string]SchemeFunc + +// GroupVersionFactoryArgs contains all the per-version parts of a GroupMetaFactory. +type GroupVersionFactoryArgs struct { + GroupName string + VersionName string + + AddToScheme SchemeFunc +} + +// GroupMetaFactoryArgs contains the group-level args of a GroupMetaFactory. +type GroupMetaFactoryArgs struct { + // GroupName is the name of the API-Group + // + // example: 'servicecatalog.k8s.io' + GroupName string + VersionPreferenceOrder []string + // ImportPrefix is the base go package of the API-Group + // + // example: 'k8s.io/kubernetes/pkg/apis/autoscaling' + ImportPrefix string + // RootScopedKinds are resources that are not namespaced. + RootScopedKinds sets.String // nil is allowed + IgnoredKinds sets.String // nil is allowed + + // May be nil if there are no internal objects. + AddInternalObjectsToScheme SchemeFunc +} + +// NewGroupMetaFactory builds the args for you. This is for if you're +// constructing a factory all at once and not using the registry. +func NewGroupMetaFactory(groupArgs *GroupMetaFactoryArgs, versions VersionToSchemeFunc) *GroupMetaFactory { + gmf := &GroupMetaFactory{ + GroupArgs: groupArgs, + VersionArgs: map[string]*GroupVersionFactoryArgs{}, + } + for v, f := range versions { + gmf.VersionArgs[v] = &GroupVersionFactoryArgs{ + GroupName: groupArgs.GroupName, + VersionName: v, + AddToScheme: f, + } + } + return gmf +} + +// Announce adds this Group factory to the global factory registry. It should +// only be called if you constructed the GroupMetaFactory yourself via +// NewGroupMetadFactory. +// Note that this will panic on an error, since it's expected that you'll be +// calling this at initialization time and any error is a result of a +// programmer importing the wrong set of packages. If this assumption doesn't +// work for you, just call DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory +// yourself. +func (gmf *GroupMetaFactory) Announce(groupFactoryRegistry APIGroupFactoryRegistry) *GroupMetaFactory { + if err := groupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { + panic(err) + } + return gmf +} + +// GroupMetaFactory has the logic for actually assembling and registering a group. +// +// There are two ways of obtaining one of these. +// 1. You can announce your group and versions separately, and then let the +// GroupFactoryRegistry assemble this object for you. (This allows group and +// versions to be imported separately, without referencing each other, to +// keep import trees small.) +// 2. You can call NewGroupMetaFactory(), which is mostly a drop-in replacement +// for the old, bad way of doing things. You can then call .Announce() to +// announce your constructed factory to any code that would like to do +// things the new, better way. +// +// Note that GroupMetaFactory actually does construct GroupMeta objects, but +// currently it does so in a way that's very entangled with an +// APIRegistrationManager. It's a TODO item to cleanly separate that interface. +type GroupMetaFactory struct { + GroupArgs *GroupMetaFactoryArgs + // map of version name to version factory + VersionArgs map[string]*GroupVersionFactoryArgs + + // assembled by Register() + prioritizedVersionList []schema.GroupVersion +} + +// Register constructs the finalized prioritized version list and sanity checks +// the announced group & versions. Then it calls register. +func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) error { + if gmf.GroupArgs == nil { + return fmt.Errorf("partially announced groups are not allowed, only got versions: %#v", gmf.VersionArgs) + } + if len(gmf.VersionArgs) == 0 { + return fmt.Errorf("group %v announced but no versions announced", gmf.GroupArgs.GroupName) + } + + pvSet := sets.NewString(gmf.GroupArgs.VersionPreferenceOrder...) + if pvSet.Len() != len(gmf.GroupArgs.VersionPreferenceOrder) { + return fmt.Errorf("preference order for group %v has duplicates: %v", gmf.GroupArgs.GroupName, gmf.GroupArgs.VersionPreferenceOrder) + } + prioritizedVersions := []schema.GroupVersion{} + for _, v := range gmf.GroupArgs.VersionPreferenceOrder { + prioritizedVersions = append( + prioritizedVersions, + schema.GroupVersion{ + Group: gmf.GroupArgs.GroupName, + Version: v, + }, + ) + } + + // Go through versions that weren't explicitly prioritized. + unprioritizedVersions := []schema.GroupVersion{} + for _, v := range gmf.VersionArgs { + if v.GroupName != gmf.GroupArgs.GroupName { + return fmt.Errorf("found %v/%v in group %v?", v.GroupName, v.VersionName, gmf.GroupArgs.GroupName) + } + if pvSet.Has(v.VersionName) { + pvSet.Delete(v.VersionName) + continue + } + unprioritizedVersions = append(unprioritizedVersions, schema.GroupVersion{Group: v.GroupName, Version: v.VersionName}) + } + if len(unprioritizedVersions) > 1 { + glog.Warningf("group %v has multiple unprioritized versions: %#v. They will have an arbitrary preference order!", gmf.GroupArgs.GroupName, unprioritizedVersions) + } + if pvSet.Len() != 0 { + return fmt.Errorf("group %v has versions in the priority list that were never announced: %s", gmf.GroupArgs.GroupName, pvSet) + } + prioritizedVersions = append(prioritizedVersions, unprioritizedVersions...) + m.RegisterVersions(prioritizedVersions) + gmf.prioritizedVersionList = prioritizedVersions + return nil +} + +func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []schema.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := sets.NewString() + if gmf.GroupArgs.RootScopedKinds != nil { + rootScoped = gmf.GroupArgs.RootScopedKinds + } + ignoredKinds := sets.NewString() + if gmf.GroupArgs.IgnoredKinds != nil { + ignoredKinds = gmf.GroupArgs.IgnoredKinds + } + + return meta.NewDefaultRESTMapperFromScheme( + externalVersions, + groupMeta.InterfacesFor, + gmf.GroupArgs.ImportPrefix, + ignoredKinds, + rootScoped, + scheme, + ) +} + +// Enable enables group versions that are allowed, adds methods to the scheme, etc. +func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + externalVersions := []schema.GroupVersion{} + for _, v := range gmf.prioritizedVersionList { + if !m.IsAllowedVersion(v) { + continue + } + externalVersions = append(externalVersions, v) + if err := m.EnableVersions(v); err != nil { + return err + } + gmf.VersionArgs[v.Version].AddToScheme(scheme) + } + if len(externalVersions) == 0 { + glog.V(4).Infof("No version is registered for group %v", gmf.GroupArgs.GroupName) + return nil + } + + if gmf.GroupArgs.AddInternalObjectsToScheme != nil { + gmf.GroupArgs.AddInternalObjectsToScheme(scheme) + } + + preferredExternalVersion := externalVersions[0] + accessor := meta.NewAccessor() + + groupMeta := &apimachinery.GroupMeta{ + GroupVersion: preferredExternalVersion, + GroupVersions: externalVersions, + SelfLinker: runtime.SelfLinker(accessor), + } + for _, v := range externalVersions { + gvf := gmf.VersionArgs[v.Version] + if err := groupMeta.AddVersionInterfaces( + schema.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, + &meta.VersionInterfaces{ + ObjectConvertor: scheme, + MetadataAccessor: accessor, + }, + ); err != nil { + return err + } + } + groupMeta.InterfacesFor = groupMeta.DefaultInterfacesFor + groupMeta.RESTMapper = gmf.newRESTMapper(scheme, externalVersions, groupMeta) + + if err := m.RegisterGroup(*groupMeta); err != nil { + return err + } + return nil +} + +// RegisterAndEnable is provided only to allow this code to get added in multiple steps. +// It's really bad that this is called in init() methods, but supporting this +// temporarily lets us do the change incrementally. +func (gmf *GroupMetaFactory) RegisterAndEnable(registry *registered.APIRegistrationManager, scheme *runtime.Scheme) error { + if err := gmf.Register(registry); err != nil { + return err + } + if err := gmf.Enable(registry, scheme); err != nil { + return err + } + + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go new file mode 100644 index 000000000..ede22b3d6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apimachinery contains the generic API machinery code that +// is common to both server and clients. +// This package should never import specific API objects. +package apimachinery diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go new file mode 100644 index 000000000..f2e32c88c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -0,0 +1,376 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. +package registered + +import ( + "fmt" + "sort" + "strings" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apimachinery" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// APIRegistrationManager provides the concept of what API groups are enabled. +// +// TODO: currently, it also provides a "registered" concept. But it's wrong to +// have both concepts in the same object. Therefore the "announced" package is +// going to take over the registered concept. After all the install packages +// are switched to using the announce package instead of this package, then we +// can combine the registered/enabled concepts in this object. Simplifying this +// isn't easy right now because there are so many callers of this package. +type APIRegistrationManager struct { + // registeredGroupVersions stores all API group versions for which RegisterGroup is called. + registeredVersions map[schema.GroupVersion]struct{} + + // thirdPartyGroupVersions are API versions which are dynamically + // registered (and unregistered) via API calls to the apiserver + thirdPartyGroupVersions []schema.GroupVersion + + // enabledVersions represents all enabled API versions. It should be a + // subset of registeredVersions. Please call EnableVersions() to add + // enabled versions. + enabledVersions map[schema.GroupVersion]struct{} + + // map of group meta for all groups. + groupMetaMap map[string]*apimachinery.GroupMeta + + // envRequestedVersions represents the versions requested via the + // KUBE_API_VERSIONS environment variable. The install package of each group + // checks this list before add their versions to the latest package and + // Scheme. This list is small and order matters, so represent as a slice + envRequestedVersions []schema.GroupVersion +} + +// NewAPIRegistrationManager constructs a new manager. The argument ought to be +// the value of the KUBE_API_VERSIONS env var, or a value of this which you +// wish to test. +func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { + m := &APIRegistrationManager{ + registeredVersions: map[schema.GroupVersion]struct{}{}, + thirdPartyGroupVersions: []schema.GroupVersion{}, + enabledVersions: map[schema.GroupVersion]struct{}{}, + groupMetaMap: map[string]*apimachinery.GroupMeta{}, + envRequestedVersions: []schema.GroupVersion{}, + } + + if len(kubeAPIVersions) != 0 { + for _, version := range strings.Split(kubeAPIVersions, ",") { + gv, err := schema.ParseGroupVersion(version) + if err != nil { + return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", + version, kubeAPIVersions) + } + m.envRequestedVersions = append(m.envRequestedVersions, gv) + } + } + return m, nil +} + +func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { + m, err := NewAPIRegistrationManager(kubeAPIVersions) + if err != nil { + glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) + } + return m +} + +// RegisterVersions adds the given group versions to the list of registered group versions. +func (m *APIRegistrationManager) RegisterVersions(availableVersions []schema.GroupVersion) { + for _, v := range availableVersions { + m.registeredVersions[v] = struct{}{} + } +} + +// RegisterGroup adds the given group to the list of registered groups. +func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { + groupName := groupMeta.GroupVersion.Group + if _, found := m.groupMetaMap[groupName]; found { + return fmt.Errorf("group %q is already registered in groupsMap: %v", groupName, m.groupMetaMap) + } + m.groupMetaMap[groupName] = &groupMeta + return nil +} + +// EnableVersions adds the versions for the given group to the list of enabled versions. +// Note that the caller should call RegisterGroup before calling this method. +// The caller of this function is responsible to add the versions to scheme and RESTMapper. +func (m *APIRegistrationManager) EnableVersions(versions ...schema.GroupVersion) error { + var unregisteredVersions []schema.GroupVersion + for _, v := range versions { + if _, found := m.registeredVersions[v]; !found { + unregisteredVersions = append(unregisteredVersions, v) + } + m.enabledVersions[v] = struct{}{} + } + if len(unregisteredVersions) != 0 { + return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) + } + return nil +} + +// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS +// environment variable. If the environment variable is empty, then it always +// returns true. +func (m *APIRegistrationManager) IsAllowedVersion(v schema.GroupVersion) bool { + if len(m.envRequestedVersions) == 0 { + return true + } + for _, envGV := range m.envRequestedVersions { + if v == envGV { + return true + } + } + return false +} + +// IsEnabledVersion returns if a version is enabled. +func (m *APIRegistrationManager) IsEnabledVersion(v schema.GroupVersion) bool { + _, found := m.enabledVersions[v] + return found +} + +// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups +// are priority order from best to worst +func (m *APIRegistrationManager) EnabledVersions() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for _, groupMeta := range m.groupMetaMap { + for _, version := range groupMeta.GroupVersions { + if m.IsEnabledVersion(version) { + ret = append(ret, version) + } + } + } + return ret +} + +// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst +func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []schema.GroupVersion { + groupMeta, ok := m.groupMetaMap[group] + if !ok { + return []schema.GroupVersion{} + } + + ret := []schema.GroupVersion{} + for _, version := range groupMeta.GroupVersions { + if m.IsEnabledVersion(version) { + ret = append(ret, version) + } + } + return ret +} + +// Group returns the metadata of a group if the group is registered, otherwise +// an error is returned. +func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { + groupMeta, found := m.groupMetaMap[group] + if !found { + return nil, fmt.Errorf("group %v has not been registered", group) + } + groupMetaCopy := *groupMeta + return &groupMetaCopy, nil +} + +// IsRegistered takes a string and determines if it's one of the registered groups +func (m *APIRegistrationManager) IsRegistered(group string) bool { + _, found := m.groupMetaMap[group] + return found +} + +// IsRegisteredVersion returns if a version is registered. +func (m *APIRegistrationManager) IsRegisteredVersion(v schema.GroupVersion) bool { + _, found := m.registeredVersions[v] + return found +} + +// RegisteredGroupVersions returns all registered group versions. +func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for groupVersion := range m.registeredVersions { + ret = append(ret, groupVersion) + } + return ret +} + +// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. +func (m *APIRegistrationManager) IsThirdPartyAPIGroupVersion(gv schema.GroupVersion) bool { + for ix := range m.thirdPartyGroupVersions { + if m.thirdPartyGroupVersions[ix] == gv { + return true + } + } + return false +} + +// AddThirdPartyAPIGroupVersions sets the list of third party versions, +// registers them in the API machinery and enables them. +// Skips GroupVersions that are already registered. +// Returns the list of GroupVersions that were skipped. +func (m *APIRegistrationManager) AddThirdPartyAPIGroupVersions(gvs ...schema.GroupVersion) []schema.GroupVersion { + filteredGVs := []schema.GroupVersion{} + skippedGVs := []schema.GroupVersion{} + for ix := range gvs { + if !m.IsRegisteredVersion(gvs[ix]) { + filteredGVs = append(filteredGVs, gvs[ix]) + } else { + glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) + skippedGVs = append(skippedGVs, gvs[ix]) + } + } + if len(filteredGVs) == 0 { + return skippedGVs + } + m.RegisterVersions(filteredGVs) + m.EnableVersions(filteredGVs...) + m.thirdPartyGroupVersions = append(m.thirdPartyGroupVersions, filteredGVs...) + + return skippedGVs +} + +// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types +func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { + groupMeta, err := m.Group(version.Group) + if err != nil { + return nil, err + } + return groupMeta.InterfacesFor(version) +} + +// TODO: This is an expedient function, because we don't check if a Group is +// supported throughout the code base. We will abandon this function and +// checking the error returned by the Group() function. +func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { + groupMeta, found := m.groupMetaMap[group] + if !found { + if group == "" { + panic("The legacy v1 API is not registered.") + } else { + panic(fmt.Sprintf("Group %s is not registered.", group)) + } + } + groupMetaCopy := *groupMeta + return &groupMetaCopy +} + +// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: +// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR +// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy +// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, +// all other groups alphabetical. +func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + unionedGroups := sets.NewString() + for enabledVersion := range m.enabledVersions { + if !unionedGroups.Has(enabledVersion.Group) { + unionedGroups.Insert(enabledVersion.Group) + groupMeta := m.groupMetaMap[enabledVersion.Group] + unionMapper = append(unionMapper, groupMeta.RESTMapper) + } + } + + if len(versionPatterns) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + for _, versionPriority := range versionPatterns { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + if len(m.envRequestedVersions) != 0 { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, versionPriority := range m.envRequestedVersions { + resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) + } + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} + } + + prioritizedGroups := []string{"", "extensions", "metrics"} + resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) + + prioritizedGroupsSet := sets.NewString(prioritizedGroups...) + remainingGroups := sets.String{} + for enabledVersion := range m.enabledVersions { + if !prioritizedGroupsSet.Has(enabledVersion.Group) { + remainingGroups.Insert(enabledVersion.Group) + } + } + + remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) + resourcePriority = append(resourcePriority, remainingResourcePriority...) + kindPriority = append(kindPriority, remainingKindPriority...) + + return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} +} + +// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, +// then any non-preferred version of the group second. +func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { + resourcePriority := []schema.GroupVersionResource{} + kindPriority := []schema.GroupVersionKind{} + + for _, group := range groups { + availableVersions := m.EnabledVersionsForGroup(group) + if len(availableVersions) > 0 { + resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) + kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) + } + } + for _, group := range groups { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) + kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) + } + + return resourcePriority, kindPriority +} + +// AllPreferredGroupVersions returns the preferred versions of all registered +// groups in the form of "group1/version1,group2/version2,..." +func (m *APIRegistrationManager) AllPreferredGroupVersions() string { + if len(m.groupMetaMap) == 0 { + return "" + } + var defaults []string + for _, groupMeta := range m.groupMetaMap { + defaults = append(defaults, groupMeta.GroupVersion.String()) + } + sort.Strings(defaults) + return strings.Join(defaults, ",") +} + +// ValidateEnvRequestedVersions returns a list of versions that are requested in +// the KUBE_API_VERSIONS environment variable, but not enabled. +func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []schema.GroupVersion { + var missingVersions []schema.GroupVersion + for _, v := range m.envRequestedVersions { + if _, found := m.enabledVersions[v]; !found { + missingVersions = append(missingVersions, v) + } + } + return missingVersions +} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go new file mode 100644 index 000000000..213e34bc0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go @@ -0,0 +1,87 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apimachinery + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupMeta stores the metadata of a group. +type GroupMeta struct { + // GroupVersion represents the preferred version of the group. + GroupVersion schema.GroupVersion + + // GroupVersions is Group + all versions in that group. + GroupVersions []schema.GroupVersion + + // SelfLinker can set or get the SelfLink field of all API types. + // TODO: when versioning changes, make this part of each API definition. + // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses + // to go through the InterfacesFor method below. + SelfLinker runtime.SelfLinker + + // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known + // versions. + RESTMapper meta.RESTMapper + + // InterfacesFor returns the default Codec and ResourceVersioner for a given version + // string, or an error if the version is not known. + // TODO: make this stop being a func pointer and always use the default + // function provided below once every place that populates this field has been changed. + InterfacesFor func(version schema.GroupVersion) (*meta.VersionInterfaces, error) + + // InterfacesByVersion stores the per-version interfaces. + InterfacesByVersion map[schema.GroupVersion]*meta.VersionInterfaces +} + +// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +// TODO: Remove the "Default" prefix. +func (gm *GroupMeta) DefaultInterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { + if v, ok := gm.InterfacesByVersion[version]; ok { + return v, nil + } + return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) +} + +// AddVersionInterfaces adds the given version to the group. Only call during +// init, after that GroupMeta objects should be immutable. Not thread safe. +// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) +// TODO: remove the "Interfaces" suffix and make this also maintain the +// .GroupVersions member. +func (gm *GroupMeta) AddVersionInterfaces(version schema.GroupVersion, interfaces *meta.VersionInterfaces) error { + if e, a := gm.GroupVersion.Group, version.Group; a != e { + return fmt.Errorf("got a version in group %v, but am in group %v", a, e) + } + if gm.InterfacesByVersion == nil { + gm.InterfacesByVersion = make(map[schema.GroupVersion]*meta.VersionInterfaces) + } + gm.InterfacesByVersion[version] = interfaces + + // TODO: refactor to make the below error not possible, this function + // should *set* GroupVersions rather than depend on it. + for _, v := range gm.GroupVersions { + if v == version { + return nil + } + } + return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100755 index 000000000..381a52509 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,33 @@ +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- caesarxuchao +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- sttts +- quinton-hoole +- kargakis +- luxas +- janetkuo +- justinsb +- ncdc +- timothysc +- soltysh +- dims +- madhusudancs +- hongchaodeng +- krousey +- mml +- mbohlool +- david-mcmahon +- therc +- mqliang +- kevin-wangzefeng +- jianhuiz +- feihujiang diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go new file mode 100644 index 000000000..b9a8fd02d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -0,0 +1,264 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func AddConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_TypeMeta_To_v1_TypeMeta, + + Convert_unversioned_ListMeta_To_unversioned_ListMeta, + + Convert_intstr_IntOrString_To_intstr_IntOrString, + + Convert_unversioned_Time_To_unversioned_Time, + + Convert_Slice_string_To_unversioned_Time, + + Convert_resource_Quantity_To_resource_Quantity, + + Convert_string_To_labels_Selector, + Convert_labels_Selector_To_string, + + Convert_string_To_fields_Selector, + Convert_fields_Selector_To_string, + + Convert_Pointer_bool_To_bool, + Convert_bool_To_Pointer_bool, + + Convert_Pointer_string_To_string, + Convert_string_To_Pointer_string, + + Convert_Pointer_int64_To_int, + Convert_int_To_Pointer_int64, + + Convert_Pointer_int32_To_int32, + Convert_int32_To_Pointer_int32, + + Convert_Pointer_float64_To_float64, + Convert_float64_To_Pointer_float64, + + Convert_map_to_unversioned_LabelSelector, + Convert_unversioned_LabelSelector_to_map, + + Convert_Slice_string_To_Slice_int32, + ) +} + +func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = float64(**in) + return nil +} + +func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error { + temp := float64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int32(**in) + return nil +} + +func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error { + temp := int32(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int(**in) + return nil +} + +func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { + if *in == nil { + *out = "" + return nil + } + *out = **in + return nil +} + +func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { + if in == nil { + stringVar := "" + *out = &stringVar + return nil + } + *out = in + return nil +} + +func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { + if *in == nil { + *out = false + return nil + } + *out = **in + return nil +} + +func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { + if in == nil { + boolVar := false + *out = &boolVar + return nil + } + *out = in + return nil +} + +// +k8s:conversion-fn=drop +func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error { + // These values are explicitly not copied + //out.APIVersion = in.APIVersion + //out.Kind = in.Kind + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *ListMeta, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_unversioned_Time_To_unversioned_Time(in *Time, out *Time, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value +func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { + str := "" + if len(*input) > 0 { + str = (*input)[0] + } + return out.UnmarshalQueryParameter(str) +} + +func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { + selector, err := labels.Parse(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { + selector, err := fields.ParseSelector(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { + *out = *in + return nil +} + +func Convert_map_to_unversioned_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + out = new(LabelSelector) + for labelKey, labelValue := range *in { + AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_unversioned_LabelSelector_to_map(in *LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = LabelSelectorAsMap(in) + return err +} + +// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or +// a single query parameter with a comma delimited value to multiple int32. +// This is used for port forwarding which needs the ports as int32. +func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error { + for _, s := range *in { + for _, v := range strings.Split(s, ",") { + x, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return fmt.Errorf("cannot convert to []int32: %v", err) + } + *out = append(*out, int32(x)) + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go new file mode 100644 index 000000000..52273240f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io +package v1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go new file mode 100644 index 000000000..fea458dfb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -0,0 +1,47 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +type Duration struct { + time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *Duration) UnmarshalJSON(b []byte) error { + var str string + json.Unmarshal(b, &str) + + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go new file mode 100644 index 000000000..72282474d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -0,0 +1,6915 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + + It has these top-level messages: + APIGroup + APIGroupList + APIResource + APIResourceList + APIVersions + DeleteOptions + Duration + ExportOptions + GetOptions + GroupKind + GroupResource + GroupVersion + GroupVersionForDiscovery + GroupVersionKind + GroupVersionResource + LabelSelector + LabelSelectorRequirement + ListMeta + ListOptions + ObjectMeta + OwnerReference + Preconditions + RootPaths + ServerAddressByClientCIDR + Status + StatusCause + StatusDetails + Time + Timestamp + TypeMeta + Verbs + WatchEvent +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import time "time" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } +func (*GroupVersionForDiscovery) ProtoMessage() {} +func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} +} + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (*LabelSelectorRequirement) ProtoMessage() {} +func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{16} +} + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (*ServerAddressByClientCIDR) ProtoMessage() {} +func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{23} +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } + +func init() { + proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") + proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") + proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") + proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") + proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") + proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") +} +func (m *APIGroup) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIGroup) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIGroupList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIGroupList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for _, msg := range m.Groups { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.Namespaced { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + if m.Verbs != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Verbs.Size())) + n2, err := m.Verbs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *APIResourceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIResourceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) + i += copy(data[i:], m.GroupVersion) + if len(m.APIResources) > 0 { + for _, msg := range m.APIResources { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *APIVersions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIVersions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeleteOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) + n3, err := m.Preconditions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.OrphanDependents != nil { + data[i] = 0x18 + i++ + if *m.OrphanDependents { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) + i += copy(data[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *Duration) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Duration) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Duration)) + return i, nil +} + +func (m *ExportOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExportOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Export { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Exact { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *GetOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GetOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + return i, nil +} + +func (m *GroupKind) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupKind) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *GroupResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + return i, nil +} + +func (m *GroupVersion) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersion) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) + i += copy(data[i:], m.GroupVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + return i, nil +} + +func (m *GroupVersionKind) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *GroupVersionResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + return i, nil +} + +func (m *LabelSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k := range m.MatchLabels { + data[i] = 0xa + i++ + v := m.MatchLabels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ListMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + return i, nil +} + +func (m *ListOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) + i += copy(data[i:], m.LabelSelector) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) + i += copy(data[i:], m.FieldSelector) + data[i] = 0x18 + i++ + if m.Watch { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) + } + return i, nil +} + +func (m *ObjectMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) + i += copy(data[i:], m.GenerateName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Generation)) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) + n4, err := m.CreationTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.DeletionTimestamp != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) + n5, err := m.DeletionTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.DeletionGracePeriodSeconds != nil { + data[i] = 0x50 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + data[i] = 0x5a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + data[i] = 0x62 + i++ + v := m.Annotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterName))) + i += copy(data[i:], m.ClusterName) + return i, nil +} + +func (m *OwnerReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *OwnerReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + if m.Controller != nil { + data[i] = 0x30 + i++ + if *m.Controller { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.BlockOwnerDeletion != nil { + data[i] = 0x38 + i++ + if *m.BlockOwnerDeletion { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Preconditions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Preconditions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) + i += copy(data[i:], *m.UID) + } + return i, nil +} + +func (m *RootPaths) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RootPaths) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) + i += copy(data[i:], m.ClientCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) + i += copy(data[i:], m.ServerAddress) + return i, nil +} + +func (m *Status) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Status) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + if m.Details != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) + n7, err := m.Details.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Code)) + return i, nil +} + +func (m *StatusCause) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatusCause) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Field))) + i += copy(data[i:], m.Field) + return i, nil +} + +func (m *StatusDetails) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatusDetails) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds)) + return i, nil +} + +func (m *Timestamp) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Timestamp) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Seconds)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Nanos)) + return i, nil +} + +func (m *TypeMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TypeMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m Verbs) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m Verbs) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *WatchEvent) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WatchEvent) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n8, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *APIGroup) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.PreferredVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupList) Size() (n int) { + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.Verbs != nil { + l = m.Verbs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResourceList) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIResources) > 0 { + for _, e := range m.APIResources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersions) Size() (n int) { + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Duration) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Duration)) + return n +} + +func (m *ExportOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *GetOptions) Size() (n int) { + var l int + _ = l + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersion) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionForDiscovery) Size() (n int) { + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionKind) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionResource) Size() (n int) { + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelSelector) Size() (n int) { + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListMeta) Size() (n int) { + var l int + _ = l + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OwnerReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Controller != nil { + n += 2 + } + if m.BlockOwnerDeletion != nil { + n += 2 + } + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RootPaths) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Status) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Code)) + return n +} + +func (m *StatusCause) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Field) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatusDetails) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + return n +} + +func (m *Timestamp) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m Verbs) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WatchEvent) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroup{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIGroupList{`, + `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResourceList{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *Duration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Duration{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `}`, + }, "") + return s +} +func (this *ExportOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExportOptions{`, + `Export:` + fmt.Sprintf("%v", this.Export) + `,`, + `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, + `}`, + }, "") + return s +} +func (this *GetOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *GroupVersionForDiscovery) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelector) String() string { + if this == nil { + return "nil" + } + keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) + for k := range this.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + mapStringForMatchLabels := "map[string]string{" + for _, k := range keysForMatchLabels { + mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k]) + } + mapStringForMatchLabels += "}" + s := strings.Join([]string{`&LabelSelector{`, + `MatchLabels:` + mapStringForMatchLabels + `,`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *ListMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListMeta{`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `}`, + }, "") + return s +} +func (this *OwnerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Controller:` + valueToStringGenerated(this.Controller) + `,`, + `BlockOwnerDeletion:` + valueToStringGenerated(this.BlockOwnerDeletion) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *RootPaths) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootPaths{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func (this *ServerAddressByClientCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerAddressByClientCIDR{`, + `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, + `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *StatusCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Field:` + fmt.Sprintf("%v", this.Field) + `,`, + `}`, + }, "") + return s +} +func (this *StatusDetails) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusDetails{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, + `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Timestamp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Timestamp{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *WatchEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroup) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, GroupVersionForDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, APIGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Namespaced = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Verbs == nil { + m.Verbs = Verbs{} + } + if err := m.Verbs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIResources = append(m.APIResources, APIResource{}) + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(data[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Duration |= (time.Duration(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupKind) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersion) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionKind) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &Time{} + } + if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Labels == nil { + m.Labels = make(map[string]string) + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootPaths) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &StatusDetails{} + } + if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Code |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusCause) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CauseType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusDetails) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, StatusCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) + } + m.RetryAfterSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Verbs) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Verbs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchEvent) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 2160 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x23, 0x57, + 0x3d, 0x63, 0xc7, 0x5e, 0xfb, 0xe7, 0x38, 0x1f, 0xaf, 0x59, 0x70, 0x23, 0x61, 0xa7, 0xd3, 0x0a, + 0xa5, 0xb0, 0xb5, 0x49, 0x0a, 0xd5, 0xb2, 0xc0, 0x42, 0x26, 0xce, 0x46, 0x51, 0x37, 0x9b, 0xe8, + 0xa5, 0xbb, 0x88, 0x65, 0x85, 0x98, 0xcc, 0xbc, 0x38, 0x43, 0xc6, 0x33, 0xc3, 0x7b, 0x63, 0x6f, + 0x4c, 0x0f, 0x54, 0x2a, 0x48, 0x1c, 0x10, 0xda, 0x23, 0x07, 0x84, 0xba, 0x82, 0x1b, 0x37, 0xfe, + 0x06, 0x24, 0xf6, 0x58, 0x89, 0x0b, 0x07, 0x64, 0xb1, 0xee, 0x81, 0x23, 0xf7, 0x9c, 0xd0, 0x7b, + 0xf3, 0xe6, 0xcb, 0x8e, 0x9b, 0x31, 0xed, 0xa1, 0xa7, 0x78, 0x7e, 0xdf, 0xef, 0xf7, 0xfd, 0x5e, + 0xe0, 0xe0, 0xfc, 0x36, 0x6b, 0x5a, 0x6e, 0xeb, 0xbc, 0x77, 0x42, 0xa8, 0x43, 0x7c, 0xc2, 0x5a, + 0x7d, 0xe2, 0x98, 0x2e, 0x6d, 0x49, 0x84, 0xee, 0x59, 0x5d, 0xdd, 0x38, 0xb3, 0x1c, 0x42, 0x07, + 0x2d, 0xef, 0xbc, 0xc3, 0x01, 0xac, 0xd5, 0x25, 0xbe, 0xde, 0xea, 0x6f, 0xb6, 0x3a, 0xc4, 0x21, + 0x54, 0xf7, 0x89, 0xd9, 0xf4, 0xa8, 0xeb, 0xbb, 0xe8, 0x8d, 0x80, 0xab, 0x99, 0xe4, 0x6a, 0x7a, + 0xe7, 0x1d, 0x0e, 0x60, 0x4d, 0xce, 0xd5, 0xec, 0x6f, 0xae, 0xbd, 0xd5, 0xb1, 0xfc, 0xb3, 0xde, + 0x49, 0xd3, 0x70, 0xbb, 0xad, 0x8e, 0xdb, 0x71, 0x5b, 0x82, 0xf9, 0xa4, 0x77, 0x2a, 0xbe, 0xc4, + 0x87, 0xf8, 0x15, 0x08, 0x5d, 0x9b, 0x6a, 0x0a, 0xed, 0x39, 0xbe, 0xd5, 0x25, 0xe3, 0x56, 0xac, + 0xbd, 0x73, 0x1d, 0x03, 0x33, 0xce, 0x48, 0x57, 0x9f, 0xe0, 0x7b, 0x7b, 0x1a, 0x5f, 0xcf, 0xb7, + 0xec, 0x96, 0xe5, 0xf8, 0xcc, 0xa7, 0xe3, 0x4c, 0xea, 0xdf, 0xf3, 0x50, 0xda, 0x3e, 0xda, 0xdf, + 0xa3, 0x6e, 0xcf, 0x43, 0xeb, 0x30, 0xef, 0xe8, 0x5d, 0x52, 0x53, 0xd6, 0x95, 0x8d, 0xb2, 0xb6, + 0xf0, 0x62, 0xd8, 0x98, 0x1b, 0x0d, 0x1b, 0xf3, 0x0f, 0xf4, 0x2e, 0xc1, 0x02, 0x83, 0x6c, 0x28, + 0xf5, 0x09, 0x65, 0x96, 0xeb, 0xb0, 0x5a, 0x6e, 0x3d, 0xbf, 0x51, 0xd9, 0xba, 0xdb, 0xcc, 0xe2, + 0xb4, 0xa6, 0x50, 0xf0, 0x28, 0x60, 0xbd, 0xe7, 0xd2, 0xb6, 0xc5, 0x0c, 0xb7, 0x4f, 0xe8, 0x40, + 0x5b, 0x96, 0x5a, 0x4a, 0x12, 0xc9, 0x70, 0xa4, 0x01, 0xfd, 0x4a, 0x81, 0x65, 0x8f, 0x92, 0x53, + 0x42, 0x29, 0x31, 0x25, 0xbe, 0x96, 0x5f, 0x57, 0x3e, 0x07, 0xb5, 0x35, 0xa9, 0x76, 0xf9, 0x68, + 0x4c, 0x3e, 0x9e, 0xd0, 0x88, 0xfe, 0xa4, 0xc0, 0x1a, 0x23, 0xb4, 0x4f, 0xe8, 0xb6, 0x69, 0x52, + 0xc2, 0x98, 0x36, 0xd8, 0xb1, 0x2d, 0xe2, 0xf8, 0x3b, 0xfb, 0x6d, 0xcc, 0x6a, 0xf3, 0xc2, 0x0f, + 0xdf, 0xcf, 0x66, 0xd0, 0xf1, 0x34, 0x39, 0x9a, 0x2a, 0x2d, 0x5a, 0x9b, 0x4a, 0xc2, 0xf0, 0xa7, + 0x98, 0xa1, 0x9e, 0xc2, 0x42, 0x18, 0xc8, 0xfb, 0x16, 0xf3, 0xd1, 0x23, 0x28, 0x76, 0xf8, 0x07, + 0xab, 0x29, 0xc2, 0xc0, 0x66, 0x36, 0x03, 0x43, 0x19, 0xda, 0xa2, 0xb4, 0xa7, 0x28, 0x3e, 0x19, + 0x96, 0xd2, 0xd4, 0x0f, 0x73, 0x50, 0xd9, 0x3e, 0xda, 0xc7, 0x84, 0xb9, 0x3d, 0x6a, 0x90, 0x0c, + 0x49, 0xb3, 0x05, 0xc0, 0xff, 0x32, 0x4f, 0x37, 0x88, 0x59, 0xcb, 0xad, 0x2b, 0x1b, 0x25, 0x0d, + 0x49, 0x3a, 0x78, 0x10, 0x61, 0x70, 0x82, 0x8a, 0x4b, 0x3d, 0xb7, 0x1c, 0x53, 0x44, 0x3b, 0x21, + 0xf5, 0x5d, 0xcb, 0x31, 0xb1, 0xc0, 0xa0, 0xfb, 0x50, 0xe8, 0x13, 0x7a, 0xc2, 0xfd, 0xcf, 0x13, + 0xe2, 0xeb, 0xd9, 0x8e, 0xf7, 0x88, 0xb3, 0x68, 0xe5, 0xd1, 0xb0, 0x51, 0x10, 0x3f, 0x71, 0x20, + 0x04, 0x35, 0x01, 0xd8, 0x99, 0x4b, 0x7d, 0x61, 0x4e, 0xad, 0xb0, 0x9e, 0xdf, 0x28, 0x6b, 0x8b, + 0xdc, 0xbe, 0xe3, 0x08, 0x8a, 0x13, 0x14, 0xea, 0x5f, 0x15, 0x58, 0x4a, 0x78, 0x41, 0x78, 0xfc, + 0x36, 0x2c, 0x74, 0x12, 0xf9, 0x26, 0x3d, 0xb2, 0x2a, 0x6d, 0x5f, 0x48, 0xe6, 0x22, 0x4e, 0x51, + 0x22, 0x02, 0x65, 0x2a, 0x25, 0x85, 0x75, 0xb5, 0x99, 0x39, 0x5c, 0xa1, 0x0d, 0xb1, 0xa6, 0x04, + 0x90, 0xe1, 0x58, 0xb2, 0xfa, 0x1f, 0x45, 0x84, 0x2e, 0xac, 0x34, 0xb4, 0x91, 0xa8, 0x66, 0x45, + 0x1c, 0x79, 0x61, 0x4a, 0x25, 0x5e, 0x53, 0x02, 0xb9, 0x2f, 0x44, 0x09, 0xdc, 0x29, 0xfd, 0xfe, + 0xa3, 0xc6, 0xdc, 0x07, 0xff, 0x5a, 0x9f, 0x53, 0x3f, 0xc9, 0x41, 0xb5, 0x4d, 0x6c, 0xe2, 0x93, + 0x43, 0xcf, 0x17, 0x27, 0xb8, 0x07, 0xa8, 0x43, 0x75, 0x83, 0x1c, 0x11, 0x6a, 0xb9, 0xe6, 0x31, + 0x31, 0x5c, 0xc7, 0x64, 0x22, 0x44, 0x79, 0xed, 0x4b, 0xa3, 0x61, 0x03, 0xed, 0x4d, 0x60, 0xf1, + 0x15, 0x1c, 0xc8, 0x86, 0xaa, 0x47, 0xc5, 0x6f, 0xcb, 0x97, 0x6d, 0x90, 0xa7, 0xdf, 0xdb, 0xd9, + 0xce, 0x7e, 0x94, 0x64, 0xd5, 0x56, 0x46, 0xc3, 0x46, 0x35, 0x05, 0xc2, 0x69, 0xe1, 0xe8, 0x07, + 0xb0, 0xec, 0x52, 0xef, 0x4c, 0x77, 0xda, 0xc4, 0x23, 0x8e, 0x49, 0x1c, 0x9f, 0x89, 0x92, 0x28, + 0x69, 0xab, 0xbc, 0x79, 0x1d, 0x8e, 0xe1, 0xf0, 0x04, 0x35, 0x7a, 0x0c, 0x2b, 0x1e, 0x75, 0x3d, + 0xbd, 0xa3, 0x73, 0x89, 0x47, 0xae, 0x6d, 0x19, 0x03, 0x51, 0x32, 0x65, 0xed, 0xd6, 0x68, 0xd8, + 0x58, 0x39, 0x1a, 0x47, 0x5e, 0x0e, 0x1b, 0xaf, 0x08, 0xd7, 0x71, 0x48, 0x8c, 0xc4, 0x93, 0x62, + 0xd4, 0x7d, 0x28, 0xb5, 0x7b, 0x54, 0x40, 0xd0, 0xf7, 0xa0, 0x64, 0xca, 0xdf, 0xd2, 0xab, 0xaf, + 0x85, 0x9d, 0x3d, 0xa4, 0xb9, 0x1c, 0x36, 0xaa, 0x7c, 0x80, 0x35, 0x43, 0x00, 0x8e, 0x58, 0xd4, + 0x27, 0x50, 0xdd, 0xbd, 0xf0, 0x5c, 0xea, 0x87, 0xf1, 0xfa, 0x2a, 0x14, 0x89, 0x00, 0x08, 0x69, + 0xa5, 0xb8, 0x1d, 0x05, 0x64, 0x58, 0x62, 0xd1, 0xeb, 0x50, 0x20, 0x17, 0xba, 0xe1, 0xcb, 0xbe, + 0x52, 0x95, 0x64, 0x85, 0x5d, 0x0e, 0xc4, 0x01, 0x4e, 0x3d, 0x04, 0xd8, 0x23, 0x91, 0xe8, 0x6d, + 0x58, 0x0a, 0x6b, 0x22, 0x5d, 0xaa, 0x5f, 0x96, 0xcc, 0x4b, 0x38, 0x8d, 0xc6, 0xe3, 0xf4, 0xea, + 0x13, 0x28, 0x8b, 0x72, 0xe6, 0xfd, 0x88, 0x9b, 0x20, 0xaa, 0x59, 0x4a, 0x89, 0x4c, 0x10, 0x14, + 0x38, 0xc0, 0x45, 0x0d, 0x2d, 0x37, 0xad, 0xa1, 0x25, 0xb2, 0xd7, 0x86, 0x6a, 0xc0, 0x1b, 0xf6, + 0xd8, 0x4c, 0x1a, 0x6e, 0x41, 0x29, 0x34, 0x53, 0x6a, 0x89, 0x66, 0x6b, 0x28, 0x08, 0x47, 0x14, + 0x09, 0x6d, 0x67, 0x90, 0x6a, 0x4d, 0xd9, 0x94, 0xbd, 0x09, 0x37, 0x64, 0x73, 0x90, 0xba, 0x96, + 0x24, 0xd9, 0x8d, 0xd0, 0x67, 0x21, 0x3e, 0xa1, 0xe9, 0x97, 0x50, 0x9b, 0x36, 0x90, 0x3f, 0x43, + 0xf3, 0xcc, 0x6e, 0x8a, 0xfa, 0x3b, 0x05, 0x96, 0x93, 0x92, 0xb2, 0x87, 0x2f, 0xbb, 0x92, 0xeb, + 0x47, 0x57, 0xc2, 0x23, 0x7f, 0x54, 0x60, 0x35, 0x75, 0xb4, 0x99, 0x22, 0x3e, 0x83, 0x51, 0xc9, + 0xe4, 0xc8, 0xcf, 0x90, 0x1c, 0xff, 0xc8, 0x41, 0xf5, 0xbe, 0x7e, 0x42, 0xec, 0x63, 0x62, 0x13, + 0xc3, 0x77, 0x29, 0x7a, 0x1f, 0x2a, 0x5d, 0xdd, 0x37, 0xce, 0x04, 0x34, 0x5c, 0x2e, 0xda, 0xd9, + 0xda, 0x5f, 0x4a, 0x52, 0xf3, 0x20, 0x16, 0xb3, 0xeb, 0xf8, 0x74, 0xa0, 0xbd, 0x22, 0x4d, 0xaa, + 0x24, 0x30, 0x38, 0xa9, 0x4d, 0x6c, 0x84, 0xe2, 0x7b, 0xf7, 0xc2, 0xe3, 0xfd, 0x7f, 0xf6, 0x45, + 0x34, 0x65, 0x02, 0x26, 0x3f, 0xef, 0x59, 0x94, 0x74, 0x89, 0xe3, 0xc7, 0x1b, 0xe1, 0xc1, 0x98, + 0x7c, 0x3c, 0xa1, 0x71, 0xed, 0x2e, 0x2c, 0x8f, 0x1b, 0x8f, 0x96, 0x21, 0x7f, 0x4e, 0x06, 0x41, + 0xbc, 0x30, 0xff, 0x89, 0x56, 0xa1, 0xd0, 0xd7, 0xed, 0x9e, 0xac, 0x46, 0x1c, 0x7c, 0xdc, 0xc9, + 0xdd, 0x56, 0xd4, 0x3f, 0x2b, 0x50, 0x9b, 0x66, 0x08, 0xfa, 0x4a, 0x42, 0x90, 0x56, 0x91, 0x56, + 0xe5, 0xdf, 0x25, 0x83, 0x40, 0xea, 0x2e, 0x94, 0x5c, 0x8f, 0xef, 0xf0, 0x2e, 0x95, 0x51, 0x7f, + 0x33, 0x8c, 0xe4, 0xa1, 0x84, 0x5f, 0x0e, 0x1b, 0x37, 0x53, 0xe2, 0x43, 0x04, 0x8e, 0x58, 0x91, + 0x0a, 0x45, 0x61, 0x0f, 0x9f, 0x27, 0x7c, 0xf2, 0x03, 0xef, 0xad, 0x8f, 0x04, 0x04, 0x4b, 0x8c, + 0xfa, 0x3e, 0x94, 0xf8, 0x62, 0x73, 0x40, 0x7c, 0x9d, 0x27, 0x10, 0x23, 0xf6, 0xe9, 0x7d, 0xcb, + 0x39, 0x97, 0xa6, 0x45, 0x09, 0x74, 0x2c, 0xe1, 0x38, 0xa2, 0xb8, 0xaa, 0xc5, 0xe6, 0x66, 0x6c, + 0xb1, 0x7f, 0xc9, 0x41, 0x85, 0x6b, 0x0f, 0xbb, 0xf6, 0x77, 0xa0, 0x6a, 0x27, 0xcf, 0x24, 0xad, + 0xb8, 0x29, 0x05, 0xa6, 0xb3, 0x14, 0xa7, 0x69, 0x39, 0xf3, 0xa9, 0x45, 0x6c, 0x33, 0x62, 0xce, + 0xa5, 0x99, 0xef, 0x25, 0x91, 0x38, 0x4d, 0xcb, 0x6b, 0xf1, 0x29, 0x8f, 0xb6, 0x9c, 0xbc, 0x51, + 0x2d, 0xfe, 0x90, 0x03, 0x71, 0x80, 0xbb, 0xea, 0xc4, 0xf3, 0xb3, 0x9d, 0x18, 0xdd, 0x81, 0x45, + 0x3e, 0x1e, 0xdd, 0x9e, 0x1f, 0xae, 0x27, 0x05, 0x31, 0x48, 0xd1, 0x68, 0xd8, 0x58, 0x7c, 0x2f, + 0x85, 0xc1, 0x63, 0x94, 0xea, 0x87, 0x00, 0x70, 0x78, 0xf2, 0x33, 0x62, 0x04, 0xd1, 0xba, 0x7e, + 0x29, 0xe7, 0xfd, 0x56, 0xde, 0x05, 0x39, 0x54, 0x3a, 0x24, 0xee, 0xb7, 0x09, 0x1c, 0x4e, 0x51, + 0xa2, 0x16, 0x94, 0xa3, 0x45, 0x5d, 0xf6, 0x92, 0x15, 0xc9, 0x56, 0x8e, 0xb6, 0x79, 0x1c, 0xd3, + 0xa4, 0x52, 0x67, 0xfe, 0xda, 0xd4, 0xd1, 0x20, 0xdf, 0xb3, 0x4c, 0x71, 0xf4, 0xb2, 0xf6, 0x8d, + 0x30, 0xfd, 0x1f, 0xee, 0xb7, 0x2f, 0x87, 0x8d, 0xd7, 0xa6, 0x5d, 0x71, 0xfd, 0x81, 0x47, 0x58, + 0xf3, 0xe1, 0x7e, 0x1b, 0x73, 0xe6, 0xab, 0x82, 0x51, 0x9c, 0x31, 0x18, 0x5b, 0x00, 0xf2, 0xd4, + 0x9c, 0xfb, 0x46, 0x10, 0x88, 0xf0, 0xd2, 0xb2, 0x17, 0x61, 0x70, 0x82, 0x0a, 0x31, 0x58, 0x31, + 0x28, 0x11, 0xbf, 0x79, 0xb8, 0x98, 0xaf, 0x77, 0xbd, 0x5a, 0x49, 0xec, 0x87, 0x5f, 0xcb, 0xd6, + 0x9d, 0x38, 0x9b, 0xf6, 0xaa, 0x54, 0xb3, 0xb2, 0x33, 0x2e, 0x0c, 0x4f, 0xca, 0x47, 0x2e, 0xac, + 0x98, 0x72, 0x5d, 0x8b, 0x95, 0x96, 0x67, 0x56, 0x7a, 0x93, 0x2b, 0x6c, 0x8f, 0x0b, 0xc2, 0x93, + 0xb2, 0xd1, 0x4f, 0x60, 0x2d, 0x04, 0x4e, 0xee, 0xcc, 0x35, 0x10, 0x9e, 0xaa, 0xf3, 0x2d, 0xbe, + 0x3d, 0x95, 0x0a, 0x7f, 0x8a, 0x04, 0x64, 0x42, 0xd1, 0x0e, 0x66, 0x4b, 0x45, 0x34, 0xf6, 0xef, + 0x66, 0x3b, 0x45, 0x9c, 0xfd, 0xcd, 0xe4, 0x4c, 0x89, 0xf6, 0x46, 0x39, 0x4e, 0xa4, 0x6c, 0x74, + 0x01, 0x15, 0xdd, 0x71, 0x5c, 0x5f, 0x0f, 0xb6, 0xf8, 0x05, 0xa1, 0x6a, 0x7b, 0x66, 0x55, 0xdb, + 0xb1, 0x8c, 0xb1, 0x19, 0x96, 0xc0, 0xe0, 0xa4, 0x2a, 0xf4, 0x14, 0x96, 0xdc, 0xa7, 0x0e, 0xa1, + 0x98, 0x9c, 0x12, 0x4a, 0x1c, 0x7e, 0xe5, 0xab, 0x0a, 0xed, 0xdf, 0xcc, 0xa8, 0x3d, 0xc5, 0x1c, + 0xa7, 0x74, 0x1a, 0xce, 0xf0, 0xb8, 0x16, 0x7e, 0xc7, 0x3d, 0xb5, 0x1c, 0xdd, 0xb6, 0x7e, 0x41, + 0x28, 0xab, 0x2d, 0xc6, 0x77, 0xdc, 0x7b, 0x11, 0x14, 0x27, 0x28, 0xd0, 0xb7, 0xa0, 0x62, 0xd8, + 0x3d, 0xe6, 0x13, 0x2a, 0x3a, 0xc4, 0x92, 0xa8, 0xa0, 0xe8, 0x7c, 0x3b, 0x31, 0x0a, 0x27, 0xe9, + 0xd6, 0xbe, 0x0d, 0x95, 0xff, 0x73, 0x2e, 0xf2, 0xb9, 0x3a, 0xee, 0xd0, 0x99, 0xe6, 0xea, 0xdf, + 0x72, 0xb0, 0x98, 0x76, 0x43, 0xb4, 0x8d, 0x29, 0x53, 0x1f, 0x12, 0xc2, 0x5e, 0x99, 0x9f, 0xda, + 0x2b, 0x65, 0x4b, 0x9a, 0xff, 0x2c, 0x2d, 0x69, 0x0b, 0x40, 0xf7, 0xac, 0xb0, 0x1b, 0x05, 0xdd, + 0x2d, 0xea, 0x27, 0xf1, 0xa5, 0x1c, 0x27, 0xa8, 0x78, 0xc0, 0x0c, 0xd7, 0xf1, 0xa9, 0x6b, 0xdb, + 0x84, 0x8a, 0x0e, 0x56, 0x0a, 0x02, 0xb6, 0x13, 0x41, 0x71, 0x82, 0x82, 0xdf, 0x71, 0x4f, 0x6c, + 0xd7, 0x38, 0x17, 0x2e, 0x08, 0xab, 0x4f, 0xf4, 0xae, 0x52, 0x70, 0xc7, 0xd5, 0x26, 0xb0, 0xf8, + 0x0a, 0x0e, 0xf5, 0x10, 0xd2, 0xb7, 0x52, 0x74, 0x37, 0x70, 0x80, 0x12, 0x5d, 0x1b, 0x67, 0x3b, + 0xbc, 0x7a, 0x0b, 0xca, 0xd8, 0x75, 0xfd, 0x23, 0xdd, 0x3f, 0x63, 0xa8, 0x01, 0x05, 0x8f, 0xff, + 0x90, 0x4f, 0x0e, 0xe2, 0x2d, 0x46, 0x60, 0x70, 0x00, 0x57, 0x7f, 0xab, 0xc0, 0xab, 0x53, 0x5f, + 0x00, 0xb8, 0x23, 0x8d, 0xe8, 0x4b, 0x9a, 0x14, 0x39, 0x32, 0xa6, 0xc3, 0x09, 0x2a, 0x3e, 0xfe, + 0x53, 0xcf, 0x06, 0xe3, 0xe3, 0x3f, 0xa5, 0x0d, 0xa7, 0x69, 0xd5, 0xff, 0xe6, 0xa0, 0x78, 0xec, + 0xeb, 0x7e, 0x8f, 0xa1, 0x27, 0x50, 0xe2, 0x55, 0x68, 0xea, 0xbe, 0x2e, 0x34, 0x67, 0x7e, 0x55, + 0x0b, 0xd7, 0xa8, 0x78, 0xf2, 0x85, 0x10, 0x1c, 0x49, 0xe4, 0x57, 0x5e, 0x26, 0xf4, 0x48, 0xf3, + 0xa2, 0xd6, 0x15, 0x68, 0xc7, 0x12, 0xcb, 0xd7, 0xfe, 0x2e, 0x61, 0x4c, 0xef, 0x84, 0x39, 0x1b, + 0xad, 0xfd, 0x07, 0x01, 0x18, 0x87, 0x78, 0xf4, 0x0e, 0x14, 0x29, 0xd1, 0x59, 0xb4, 0x8c, 0xd4, + 0x43, 0x91, 0x58, 0x40, 0x2f, 0x87, 0x8d, 0x05, 0x29, 0x5c, 0x7c, 0x63, 0x49, 0x8d, 0x1e, 0xc3, + 0x0d, 0x93, 0xf8, 0xba, 0x65, 0x07, 0x3b, 0x48, 0xe6, 0xf7, 0x8d, 0x40, 0x58, 0x3b, 0x60, 0xd5, + 0x2a, 0xdc, 0x26, 0xf9, 0x81, 0x43, 0x81, 0xbc, 0xde, 0x0c, 0xd7, 0x24, 0x22, 0x9f, 0x0b, 0x71, + 0xbd, 0xed, 0xb8, 0x26, 0xc1, 0x02, 0xa3, 0x3e, 0x53, 0xa0, 0x12, 0x48, 0xda, 0xd1, 0x7b, 0x8c, + 0xa0, 0xcd, 0xe8, 0x14, 0x41, 0xb8, 0xc3, 0x01, 0x39, 0xff, 0xde, 0xc0, 0x23, 0x97, 0xc3, 0x46, + 0x59, 0x90, 0xf1, 0x8f, 0xe8, 0x00, 0x09, 0x1f, 0xe5, 0xae, 0xf1, 0xd1, 0xeb, 0x50, 0x10, 0xfb, + 0x9e, 0x74, 0x66, 0xb4, 0xde, 0x89, 0x9d, 0x10, 0x07, 0x38, 0xf5, 0x0f, 0x39, 0xa8, 0xa6, 0x0e, + 0x97, 0x61, 0xc5, 0x8a, 0xee, 0x70, 0xb9, 0x0c, 0xef, 0x02, 0xd3, 0x1f, 0x3a, 0x7f, 0x04, 0x45, + 0x83, 0x9f, 0x2f, 0x7c, 0x69, 0xde, 0x9c, 0x25, 0x14, 0xc2, 0x33, 0x71, 0x26, 0x89, 0x4f, 0x86, + 0xa5, 0x40, 0xb4, 0x07, 0x2b, 0x94, 0xf8, 0x74, 0xb0, 0x7d, 0xea, 0x13, 0x9a, 0x5c, 0x3a, 0x0b, + 0xf1, 0x12, 0x82, 0xc7, 0x09, 0xf0, 0x24, 0x8f, 0x6a, 0xc3, 0x3c, 0x5f, 0x10, 0xb8, 0xdb, 0x59, + 0xea, 0x69, 0x2d, 0x72, 0x7b, 0xc8, 0x1c, 0xe2, 0xb9, 0x77, 0x1c, 0xdd, 0x71, 0x83, 0x64, 0x2f, + 0xc4, 0xde, 0x79, 0xc0, 0x81, 0x38, 0xc0, 0xdd, 0x59, 0xe5, 0x17, 0xd1, 0xdf, 0x3c, 0x6f, 0xcc, + 0x3d, 0x7b, 0xde, 0x98, 0xfb, 0xe8, 0xb9, 0xbc, 0x94, 0xfe, 0x18, 0xca, 0xf1, 0x3a, 0xf2, 0x39, + 0xab, 0x54, 0x7f, 0x0a, 0x25, 0x9e, 0x49, 0xe1, 0x1a, 0x7d, 0xcd, 0xf0, 0x48, 0xb7, 0xf5, 0x5c, + 0x96, 0xb6, 0xae, 0x6e, 0x41, 0xf0, 0xf6, 0xcc, 0x3b, 0xa1, 0xe5, 0x93, 0x6e, 0xaa, 0x13, 0xee, + 0x73, 0x00, 0x0e, 0xe0, 0x89, 0x7b, 0xf8, 0xaf, 0x15, 0x00, 0x71, 0xdf, 0xd8, 0xed, 0xf3, 0x3b, + 0xe2, 0x3a, 0xcc, 0xf3, 0x16, 0x3b, 0x6e, 0x98, 0x28, 0x01, 0x81, 0x41, 0x0f, 0xa1, 0xe8, 0x8a, + 0x35, 0x45, 0x3e, 0x50, 0xbe, 0x35, 0x35, 0x6b, 0xe4, 0xbf, 0x95, 0x9a, 0x58, 0x7f, 0xba, 0x7b, + 0xe1, 0x13, 0x87, 0xdb, 0x18, 0x67, 0x4c, 0xb0, 0xeb, 0x60, 0x29, 0x4c, 0x7b, 0xe3, 0xc5, 0xcb, + 0xfa, 0xdc, 0xc7, 0x2f, 0xeb, 0x73, 0xff, 0x7c, 0x59, 0x9f, 0xfb, 0x60, 0x54, 0x57, 0x5e, 0x8c, + 0xea, 0xca, 0xc7, 0xa3, 0xba, 0xf2, 0xef, 0x51, 0x5d, 0x79, 0xf6, 0x49, 0x7d, 0xee, 0x71, 0xae, + 0xbf, 0xf9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xe8, 0xc1, 0x2f, 0x98, 0x1b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto new file mode 100644 index 000000000..9d42018ec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -0,0 +1,689 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1; + +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +message APIGroup { + // name is the name of the group. + optional string name = 1; + + // versions are the versions supported in this group. + repeated GroupVersionForDiscovery versions = 2; + + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + optional GroupVersionForDiscovery preferredVersion = 3; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +message APIGroupList { + // groups is a list of APIGroup. + repeated APIGroup groups = 1; +} + +// APIResource specifies the name of a resource and whether it is namespaced. +message APIResource { + // name is the name of the resource. + optional string name = 1; + + // namespaced indicates if a resource is namespaced or not. + optional bool namespaced = 2; + + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + optional string kind = 3; + + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + optional Verbs verbs = 4; + + // shortNames is a list of suggested short names of the resource. + repeated string shortNames = 5; +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +message APIResourceList { + // groupVersion is the group and version this APIResourceList is for. + optional string groupVersion = 1; + + // resources contains the name of the resources and if they are namespaced. + repeated APIResource resources = 2; +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message APIVersions { + // versions are the api versions that are available. + repeated string versions = 1; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; +} + +// DeleteOptions may be provided when deleting an API object. +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +message Duration { + optional int64 duration = 1; +} + +// ExportOptions is the query options to the standard REST get call. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + optional bool exact = 2; +} + +// GetOptions is the standard query options to the standard REST get call. +message GetOptions { + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + optional string resourceVersion = 1; +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupKind { + optional string group = 1; + + optional string kind = 2; +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupResource { + optional string group = 1; + + optional string resource = 2; +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersion { + optional string group = 1; + + optional string version = 2; +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +message GroupVersionForDiscovery { + // groupVersion specifies the API group and version in the form "group/version" + optional string groupVersion = 1; + + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + optional string version = 2; +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionKind { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionResource { + optional string group = 1; + + optional string version = 2; + + optional string resource = 3; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + map<string, string> matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + repeated string values = 3; +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +message ListMeta { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 1; + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 2; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // +optional + optional int64 timeoutSeconds = 5; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map<string, string> labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map<string, string> annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + repeated OwnerReference ownerReferences = 13; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + optional string uid = 4; + + // If true, this reference points to the managing controller. + // +optional + optional bool controller = 6; + + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + optional bool blockOwnerDeletion = 7; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +message RootPaths { + // paths are the paths available at root. + repeated string paths = 1; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + +// Status is a return value for calls that don't return other objects. +message Status { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional string status = 2; + + // A human-readable description of the status of this operation. + // +optional + optional string message = 3; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + optional string reason = 4; + + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + optional StatusDetails details = 5; + + // Suggested HTTP return code for this status, 0 if not set. + // +optional + optional int32 code = 6; +} + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +message StatusCause { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + optional string reason = 1; + + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + optional string message = 2; + + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + optional string field = 3; +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +message StatusDetails { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + optional string name = 1; + + // The group attribute of the resource associated with the status StatusReason. + // +optional + optional string group = 2; + + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 3; + + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + repeated StatusCause causes = 4; + + // If specified, the time in seconds before the operation should be retried. + // +optional + optional int32 retryAfterSeconds = 5; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +message TypeMeta { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // +optional + optional string apiVersion = 2; +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Verbs { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Event represents a single event to a watched resource. +// +// +protobuf=true +message WatchEvent { + optional string type = 1; + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go new file mode 100644 index 000000000..8b6fdef5a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -0,0 +1,148 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupResource struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Resource string `protobuf:"bytes,2,opt,name=resource"` +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionResource struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` + Resource string `protobuf:"bytes,3,opt,name=resource"` +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupKind struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Kind string `protobuf:"bytes,2,opt,name=kind"` +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionKind struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` + Kind string `protobuf:"bytes,3,opt,name=kind"` +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersion struct { + Group string `protobuf:"bytes,1,opt,name=group"` + Version string `protobuf:"bytes,2,opt,name=version"` +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// MarshalJSON implements the json.Marshaller interface. +func (gv GroupVersion) MarshalJSON() ([]byte, error) { + s := gv.String() + if strings.Count(s, "/") > 1 { + return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) + } + return json.Marshal(s) +} + +func (gv *GroupVersion) unmarshal(value []byte) error { + var s string + if err := json.Unmarshal(value, &s); err != nil { + return err + } + parsed, err := schema.ParseGroupVersion(s) + if err != nil { + return err + } + gv.Group, gv.Version = parsed.Group, parsed.Version + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (gv *GroupVersion) UnmarshalJSON(value []byte) error { + return gv.unmarshal(value) +} + +// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. +func (gv *GroupVersion) UnmarshalText(value []byte) error { + return gv.unmarshal(value) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go new file mode 100644 index 000000000..b62dd9ee0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -0,0 +1,234 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" +) + +// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements +// labels.Selector +// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go +func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { + if ps == nil { + return labels.Nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return labels.Everything(), nil + } + selector := labels.NewSelector() + for k, v := range ps.MatchLabels { + r, err := labels.NewRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case LabelSelectorOpIn: + op = selection.In + case LabelSelectorOpNotIn: + op = selection.NotIn + case LabelSelectorOpExists: + op = selection.Exists + case LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the +// original structure of a label selector. Operators that cannot be converted into plain +// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in +// an error. +func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { + if ps == nil { + return nil, nil + } + selector := map[string]string{} + for k, v := range ps.MatchLabels { + selector[k] = v + } + for _, expr := range ps.MatchExpressions { + switch expr.Operator { + case LabelSelectorOpIn: + if len(expr.Values) != 1 { + return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) + } + // Should we do anything in case this will override a previous key-value pair? + selector[expr.Key] = expr.Values[0] + case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: + return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) + default: + return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) + } + } + return selector, nil +} + +// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. +// Note: This function should be kept in sync with the parser in pkg/labels/selector.go +func ParseToLabelSelector(selector string) (*LabelSelector, error) { + reqs, err := labels.ParseToRequirements(selector) + if err != nil { + return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) + } + + labelSelector := &LabelSelector{ + MatchLabels: map[string]string{}, + MatchExpressions: []LabelSelectorRequirement{}, + } + for _, req := range reqs { + var op LabelSelectorOperator + switch req.Operator() { + case selection.Equals, selection.DoubleEquals: + vals := req.Values() + if vals.Len() != 1 { + return nil, fmt.Errorf("equals operator must have exactly one value") + } + val, ok := vals.PopAny() + if !ok { + return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") + } + labelSelector.MatchLabels[req.Key()] = val + continue + case selection.In: + op = LabelSelectorOpIn + case selection.NotIn: + op = LabelSelectorOpNotIn + case selection.Exists: + op = LabelSelectorOpExists + case selection.DoesNotExist: + op = LabelSelectorOpDoesNotExist + case selection.GreaterThan, selection.LessThan: + // Adding a separate case for these operators to indicate that this is deliberate + return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) + default: + return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) + } + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ + Key: req.Key(), + Operator: op, + Values: req.Values().List(), + }) + } + return labelSelector, nil +} + +// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. +func SetAsLabelSelector(ls labels.Set) *LabelSelector { + if ls == nil { + return nil + } + + selector := &LabelSelector{ + MatchLabels: make(map[string]string), + } + for label, value := range ls { + selector.MatchLabels[label] = value + } + + return selector +} + +// FormatLabelSelector convert labelSelector into plain string +func FormatLabelSelector(labelSelector *LabelSelector) string { + selector, err := LabelSelectorAsSelector(labelSelector) + if err != nil { + return "<error>" + } + + l := selector.String() + if len(l) == 0 { + l = "<none>" + } + return l +} + +func ExtractGroupVersions(l *APIGroupList) []string { + var groupVersions []string + for _, g := range l.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + return groupVersions +} + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name).String(), + ResourceVersion: meta.ResourceVersion, + } +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. +func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool { + return !meta.CreationTimestamp.Time.IsZero() || + len(meta.UID) != 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go new file mode 100644 index 000000000..8b4c0423e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := new(LabelSelector) + + // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. + newSelector.MatchLabels = make(map[string]string) + if selector.MatchLabels != nil { + for key, val := range selector.MatchLabels { + newSelector.MatchLabels[key] = val + } + } + newSelector.MatchLabels[labelKey] = labelValue + + if selector.MatchExpressions != nil { + newMExps := make([]LabelSelectorRequirement, len(selector.MatchExpressions)) + for i, me := range selector.MatchExpressions { + newMExps[i].Key = me.Key + newMExps[i].Operator = me.Operator + if me.Values != nil { + newMExps[i].Values = make([]string, len(me.Values)) + copy(newMExps[i].Values, me.Values) + } else { + newMExps[i].Values = nil + } + } + newSelector.MatchExpressions = newMExps + } else { + newSelector.MatchExpressions = nil + } + + return newSelector +} + +// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. +func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + if selector.MatchLabels == nil { + selector.MatchLabels = make(map[string]string) + } + selector.MatchLabels[labelKey] = labelValue + return selector +} + +// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels +func SelectorHasLabel(selector *LabelSelector, labelKey string) bool { + return len(selector.MatchLabels[labelKey]) > 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go new file mode 100644 index 000000000..108e34f0f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -0,0 +1,209 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// ObjectMetaFor returns a pointer to a provided object's ObjectMeta. +// TODO: allow runtime.Unknown to extract this object +// TODO: Remove this function and use meta.ObjectMetaAccessor() instead. +func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + var meta *ObjectMeta + err = runtime.FieldPtr(v, "ObjectMeta", &meta) + return meta, err +} + +// ListMetaFor returns a pointer to a provided object's ListMeta, +// or an error if the object does not have that pointer. +// TODO: allow runtime.Unknown to extract this object +// TODO: Remove this function and use meta.ObjectMetaAccessor() instead. +func ListMetaFor(obj runtime.Object) (*ListMeta, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + var meta *ListMeta + err = runtime.FieldPtr(v, "ListMeta", &meta) + return meta, err +} + +// TODO: move this, Object, List, and Type to a different package +type ObjectMetaAccessor interface { + GetObjectMeta() Object +} + +// Object lets you work with object metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +type Object interface { + GetNamespace() string + SetNamespace(namespace string) + GetName() string + SetName(name string) + GetGenerateName() string + SetGenerateName(name string) + GetUID() types.UID + SetUID(uid types.UID) + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) + GetCreationTimestamp() Time + SetCreationTimestamp(timestamp Time) + GetDeletionTimestamp() *Time + SetDeletionTimestamp(timestamp *Time) + GetLabels() map[string]string + SetLabels(labels map[string]string) + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []OwnerReference + SetOwnerReferences([]OwnerReference) + GetClusterName() string + SetClusterName(clusterName string) +} + +// ListMetaAccessor retrieves the list interface from an object +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type List interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) +} + +// Type exposes the type and APIVersion of versioned or internal API objects. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Type interface { + GetAPIVersion() string + SetAPIVersion(version string) + GetKind() string + SetKind(kind string) +} + +func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ListMeta) GetListMeta() List { return obj } + +func (obj *ObjectMeta) GetObjectMeta() Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { + ret := make([]OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + if meta.OwnerReferences[i].Controller != nil { + value := *meta.OwnerReferences[i].Controller + ret[i].Controller = &value + } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { + newReferences := make([]OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + if references[i].Controller != nil { + value := *references[i].Controller + newReferences[i].Controller = &value + } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } + } + meta.OwnerReferences = newReferences +} + +func (meta *ObjectMeta) GetClusterName() string { + return meta.ClusterName +} +func (meta *ObjectMeta) SetClusterName(clusterName string) { + meta.ClusterName = clusterName +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go new file mode 100644 index 000000000..8645d1abc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -0,0 +1,82 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// WatchEventKind is name reserved for serializing watch events. +const WatchEventKind = "WatchEvent" + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddToGroupVersion registers common meta types into schemas. +func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { + scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) + scheme.AddKnownTypeWithName( + schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), + &InternalEvent{}, + ) + // Supports legacy code paths, most callers should use metav1.ParameterCodec for now + scheme.AddKnownTypes(groupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + ) + scheme.AddConversionFuncs( + Convert_versioned_Event_to_watch_Event, + Convert_versioned_InternalEvent_to_versioned_Event, + Convert_watch_Event_to_versioned_Event, + Convert_versioned_Event_to_versioned_InternalEvent, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + AddConversionFuncs(scheme) + RegisterDefaults(scheme) +} + +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + scheme.AddUnversionedTypes(SchemeGroupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) + RegisterDefaults(scheme) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go new file mode 100644 index 000000000..a1e01f344 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -0,0 +1,180 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "k8s.io/apimachinery/pkg/openapi" + + "github.com/go-openapi/spec" + "github.com/google/gofuzz" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Time struct { + time.Time `protobuf:"-"` +} + +// DeepCopy returns a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t Time) DeepCopy() Time { + return t +} + +// String returns the representation of the time. +func (t Time) String() string { + return t.Time.String() +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t Time) Before(u Time) bool { + return t.Time.Before(u.Time) +} + +// Equal reports whether the time instant t is equal to u. +func (t Time) Equal(u Time) bool { + return t.Time.Equal(u.Time) +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + json.Unmarshal(b, &str) + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(time.RFC3339)) +} + +func (_ Time) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "date-time", + }, + }, + } +} + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) +} + +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index 496d5d98c..aea28e410 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 import ( "time" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go new file mode 100644 index 000000000..c6cf1fab8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -0,0 +1,792 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package unversioned contains API types that are common to all versions. +// +// The package contains two categories of types: +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// In the future, we will probably move these categories of objects into +// separate packages. +package v1 + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/types" +) + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents string = "orphan" + FinalizerDeleteDependents string = "foregroundDeletion" +) + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" +) + +// OwnerReference contains enough information to let you identify an owning +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // If true, this reference points to the managing controller. + // +optional + Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` +} + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` +} + +// ExportOptions is the query options to the standard REST get call. +type ExportOptions struct { + TypeMeta `json:",inline"` + // Should this value be exported. Export strips fields that a user can not specify. + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` +} + +// GetOptions is the standard query options to the standard REST get call. +type GetOptions struct { + TypeMeta `json:",inline"` + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of +// the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will + // delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object. +type DeleteOptions struct { + TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// Status is a return value for calls that don't return other objects. +type Status struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // A human-readable description of the status of this operation. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` + // Suggested HTTP return code for this status, 0 if not set. + // +optional + Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +type StatusDetails struct { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // The group attribute of the resource associated with the status StatusReason. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` + // If specified, the time in seconds before the operation should be retried. + // +optional + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` +} + +// Values of Status.Status +const ( + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +// StatusReason is an enumeration of possible failure causes. Each StatusReason +// must map to a single HTTP status code, but multiple reasons may map +// to the same HTTP status code. +// TODO: move to apiserver +type StatusReason string + +const ( + // StatusReasonUnknown means the server has declined to indicate a specific reason. + // The details field may contain other information about this error. + // Status code 500. + StatusReasonUnknown StatusReason = "" + + // StatusReasonUnauthorized means the server can be reached and understood the request, but requires + // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) + // in order for the action to be completed. If the user has specified credentials on the request, the + // server considers them insufficient. + // Status code 401 + StatusReasonUnauthorized StatusReason = "Unauthorized" + + // StatusReasonForbidden means the server can be reached and understood the request, but refuses + // to take any further action. It is the result of the server being configured to deny access for some reason + // to the requested resource by the client. + // Details (optional): + // "kind" string - the kind attribute of the forbidden resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the forbidden resource + // Status code 403 + StatusReasonForbidden StatusReason = "Forbidden" + + // StatusReasonNotFound means one or more resources required for this operation + // could not be found. + // Details (optional): + // "kind" string - the kind attribute of the missing resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the missing resource + // Status code 404 + StatusReasonNotFound StatusReason = "NotFound" + + // StatusReasonAlreadyExists means the resource you are creating already exists. + // Details (optional): + // "kind" string - the kind attribute of the conflicting resource + // "id" string - the identifier of the conflicting resource + // Status code 409 + StatusReasonAlreadyExists StatusReason = "AlreadyExists" + + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. + // Status code 409 + StatusReasonConflict StatusReason = "Conflict" + + // StatusReasonGone means the item is no longer available at the server and no + // forwarding address is known. + // Status code 410 + StatusReasonGone StatusReason = "Gone" + + // StatusReasonInvalid means the requested create or update operation cannot be + // completed due to invalid data provided as part of the request. The client may + // need to alter the request. When set, the client may use the StatusDetails + // message field as a summary of the issues encountered. + // Details (optional): + // "kind" string - the kind attribute of the invalid resource + // "id" string - the identifier of the invalid resource + // "causes" - one or more StatusCause entries indicating the data in the + // provided resource that was invalid. The code, message, and + // field attributes will be set. + // Status code 422 + StatusReasonInvalid StatusReason = "Invalid" + + // StatusReasonServerTimeout means the server can be reached and understood the request, + // but cannot complete the action in a reasonable time. The client should retry the request. + // This is may be due to temporary server load or a transient communication issue with + // another server. Status code 500 is used because the HTTP spec provides no suitable + // server-requested client retry and the 5xx class represents actionable errors. + // Details (optional): + // "kind" string - the kind attribute of the resource being acted on. + // "id" string - the operation that is being attempted. + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 500 + StatusReasonServerTimeout StatusReason = "ServerTimeout" + + // StatusReasonTimeout means that the request could not be completed within the given time. + // Clients can get this response only when they specified a timeout param in the request, + // or if the server cannot complete the operation within a reasonable amount of time. + // The request might succeed with an increased value of timeout param. The client *should* + // wait at least the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 504 + StatusReasonTimeout StatusReason = "Timeout" + + // StatusReasonBadRequest means that the request itself was invalid, because the request + // doesn't make any sense, for example deleting a read-only object. This is different than + // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the + // data was invalid. API calls that return BadRequest can never succeed. + StatusReasonBadRequest StatusReason = "BadRequest" + + // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the + // resource was not supported by the code - for instance, attempting to delete a resource that + // can only be created. API calls that return MethodNotAllowed can never succeed. + StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected + // and the outcome of the call is unknown. + // Details (optional): + // "causes" - The original error + // Status code 500 + StatusReasonInternalError StatusReason = "InternalError" + + // StatusReasonExpired indicates that the request is invalid because the content you are requesting + // has expired and is no longer available. It is typically associated with watches that can't be + // serviced. + // Status code 410 (gone) + StatusReasonExpired StatusReason = "Expired" + + // StatusReasonServiceUnavailable means that the request itself was valid, + // but the requested service is unavailable at this time. + // Retrying the request after some time might succeed. + // Status code 503 + StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" +) + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +type StatusCause struct { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` +} + +// CauseType is a machine readable value providing more detail about what +// occurred in a status response. An operation may have multiple causes for a +// status (whether Failure or Success). +type CauseType string + +const ( + // CauseTypeFieldValueNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). + CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" + // CauseTypeFieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + CauseTypeFieldValueRequired CauseType = "FieldValueRequired" + // CauseTypeFieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" + // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex + // match). + CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" + // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) + // values that can not be handled (e.g. an enumerated string). + CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client + // without the expected return type. The presence of this cause indicates the error may be + // due to an intervening proxy or the server software malfunctioning. + CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" +) + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type APIVersions struct { + TypeMeta `json:",inline"` + // versions are the api versions that are available. + Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +type APIGroupList struct { + TypeMeta `json:",inline"` + // groups is a list of APIGroup. + Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` +} + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +type APIGroup struct { + TypeMeta `json:",inline"` + // name is the name of the group. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // versions are the versions supported in this group. + Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +type GroupVersionForDiscovery struct { + // groupVersion specifies the API group and version in the form "group/version" + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// APIResource specifies the name of a resource and whether it is namespaced. +type APIResource struct { + // name is the name of the resource. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // namespaced indicates if a resource is namespaced or not. + Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Verbs []string + +func (vs Verbs) String() string { + return fmt.Sprintf("%v", []string(vs)) +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +type APIResourceList struct { + TypeMeta `json:",inline"` + // groupVersion is the group and version this APIResourceList is for. + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // resources contains the name of the resources and if they are namespaced. + APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +type RootPaths struct { + // paths are the paths available at root. + Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` +} + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + +// String returns available api versions as a human-friendly version string. +func (apiVersions APIVersions) String() string { + return strings.Join(apiVersions.Versions, ",") +} + +func (apiVersions APIVersions) GoString() string { + return apiVersions.String() +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct{} + +// Note: +// There are two different styles of label selectors used in versioned types: +// an older style which is represented as just a string in versioned types, and a +// newer style that is structured. LabelSelector is an internal representation for the +// latter style. + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators ard In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..950c00285 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -0,0 +1,290 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIGroup = map[string]string{ + "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "name": "name is the name of the group.", + "versions": "versions are the versions supported in this group.", + "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIGroup) SwaggerDoc() map[string]string { + return map_APIGroup +} + +var map_APIGroupList = map[string]string{ + "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "groups": "groups is a list of APIGroup.", +} + +func (APIGroupList) SwaggerDoc() map[string]string { + return map_APIGroupList +} + +var map_APIResource = map[string]string{ + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the name of the resource.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", +} + +func (APIResource) SwaggerDoc() map[string]string { + return map_APIResource +} + +var map_APIResourceList = map[string]string{ + "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "groupVersion": "groupVersion is the group and version this APIResourceList is for.", + "resources": "resources contains the name of the resources and if they are namespaced.", +} + +func (APIResourceList) SwaggerDoc() map[string]string { + return map_APIResourceList +} + +var map_APIVersions = map[string]string{ + "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "versions": "versions are the api versions that are available.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIVersions) SwaggerDoc() map[string]string { + return map_APIVersions +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_ExportOptions = map[string]string{ + "": "ExportOptions is the query options to the standard REST get call.", + "export": "Should this value be exported. Export strips fields that a user can not specify.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", +} + +func (ExportOptions) SwaggerDoc() map[string]string { + return map_ExportOptions +} + +var map_GetOptions = map[string]string{ + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", +} + +func (GetOptions) SwaggerDoc() map[string]string { + return map_GetOptions +} + +var map_GroupVersionForDiscovery = map[string]string{ + "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", + "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", +} + +func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { + return map_GroupVersionForDiscovery +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_ListMeta = map[string]string{ + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", +} + +func (ListMeta) SwaggerDoc() map[string]string { + return map_ListMeta +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_OwnerReference = map[string]string{ + "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", + "apiVersion": "API version of the referent.", + "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "controller": "If true, this reference points to the managing controller.", + "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", +} + +func (OwnerReference) SwaggerDoc() map[string]string { + return map_OwnerReference +} + +var map_Patch = map[string]string{ + "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", +} + +func (Patch) SwaggerDoc() map[string]string { + return map_Patch +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_RootPaths = map[string]string{ + "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", + "paths": "paths are the paths available at root.", +} + +func (RootPaths) SwaggerDoc() map[string]string { + return map_RootPaths +} + +var map_ServerAddressByClientCIDR = map[string]string{ + "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", +} + +func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { + return map_ServerAddressByClientCIDR +} + +var map_Status = map[string]string{ + "": "Status is a return value for calls that don't return other objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "message": "A human-readable description of the status of this operation.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "code": "Suggested HTTP return code for this status, 0 if not set.", +} + +func (Status) SwaggerDoc() map[string]string { + return map_Status +} + +var map_StatusCause = map[string]string{ + "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", +} + +func (StatusCause) SwaggerDoc() map[string]string { + return map_StatusCause +} + +var map_StatusDetails = map[string]string{ + "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "group": "The group attribute of the resource associated with the status StatusReason.", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", +} + +func (StatusDetails) SwaggerDoc() map[string]string { + return map_StatusDetails +} + +var map_TypeMeta = map[string]string{ + "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources", +} + +func (TypeMeta) SwaggerDoc() map[string]string { + return map_TypeMeta +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go new file mode 100644 index 000000000..ae20726b6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -0,0 +1,689 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + gojson "encoding/json" + "errors" + "fmt" + "io" + "strings" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +// Unstructured allows objects that do not have Golang structs registered to be manipulated +// generically. This can be used to deal with the API objects from a plug-in. Unstructured +// objects still have functioning TypeMeta features-- kind, version, etc. +// +// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this +// type if you are dealing with objects that are not in the server meta v1 schema. +// +// TODO: make the serialization part of this type distinct from the field accessors. +type Unstructured struct { + // Object is a JSON compatible map with string, float, int, bool, []interface{}, or + // map[string]interface{} + // children. + Object map[string]interface{} +} + +var _ metav1.Object = &Unstructured{} +var _ runtime.Unstructured = &Unstructured{} +var _ runtime.Unstructured = &UnstructuredList{} + +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } +func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } + +func (obj *Unstructured) IsUnstructuredObject() {} +func (obj *UnstructuredList) IsUnstructuredObject() {} + +func (obj *Unstructured) IsList() bool { + if obj.Object != nil { + _, ok := obj.Object["items"] + return ok + } + return false +} +func (obj *UnstructuredList) IsList() bool { return true } + +func (obj *Unstructured) UnstructuredContent() map[string]interface{} { + if obj.Object == nil { + obj.Object = make(map[string]interface{}) + } + return obj.Object +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. +func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := obj.Object + if out == nil { + out = make(map[string]interface{}) + } + items := make([]interface{}, len(obj.Items)) + for i, item := range obj.Items { + items[i] = item.Object + } + out["items"] = items + return out +} + +// MarshalJSON ensures that the unstructured object produces proper +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured object properly decodes +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func getNestedField(obj map[string]interface{}, fields ...string) interface{} { + var val interface{} = obj + for _, field := range fields { + if _, ok := val.(map[string]interface{}); !ok { + return nil + } + val = val.(map[string]interface{})[field] + } + return val +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + if str, ok := getNestedField(obj, fields...).(string); ok { + return str + } + return "" +} + +func getNestedSlice(obj map[string]interface{}, fields ...string) []string { + if m, ok := getNestedField(obj, fields...).([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } + } + return strSlice + } + return nil +} + +func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { + if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } + } + return strMap + } + return nil +} + +func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { + m := obj + if len(fields) > 1 { + for _, field := range fields[0 : len(fields)-1] { + if _, ok := m[field].(map[string]interface{}); !ok { + m[field] = make(map[string]interface{}) + } + m = m[field].(map[string]interface{}) + } + } + m[fields[len(fields)-1]] = value +} + +func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { + m := make([]interface{}, 0, len(value)) + for _, v := range value { + m = append(m, v) + } + setNestedField(obj, m, fields...) +} + +func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { + m := make(map[string]interface{}, len(value)) + for k, v := range value { + m[k] = v + } + setNestedField(obj, m, fields...) +} + +func (u *Unstructured) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedSlice(value []string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedSlice(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedMap(u.Object, value, fields...) +} + +func extractOwnerReference(src interface{}) metav1.OwnerReference { + v := src.(map[string]interface{}) + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + controller, ok := (getNestedField(v, "controller")).(bool) + if !ok { + controllerPtr = nil + } else { + controllerCopy := controller + controllerPtr = &controllerCopy + } + var blockOwnerDeletionPtr *bool + blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) + if !ok { + blockOwnerDeletionPtr = nil + } else { + blockOwnerDeletionCopy := blockOwnerDeletion + blockOwnerDeletionPtr = &blockOwnerDeletionCopy + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: (types.UID)(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { + ret := make(map[string]interface{}) + controllerPtr := src.Controller + if controllerPtr != nil { + controller := *controllerPtr + controllerPtr = &controller + } + blockOwnerDeletionPtr := src.BlockOwnerDeletion + if blockOwnerDeletionPtr != nil { + blockOwnerDeletion := *blockOwnerDeletionPtr + blockOwnerDeletionPtr = &blockOwnerDeletion + } + setNestedField(ret, src.Kind, "kind") + setNestedField(ret, src.Name, "name") + setNestedField(ret, src.APIVersion, "apiVersion") + setNestedField(ret, string(src.UID), "uid") + setNestedField(ret, controllerPtr, "controller") + setNestedField(ret, blockOwnerDeletionPtr, "blockOwnerDeletion") + return ret +} + +func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { + field := getNestedField(object, "metadata", "ownerReferences") + if field == nil { + return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) + } + ownerReferences, ok := field.([]map[string]interface{}) + if ok { + return ownerReferences, nil + } + // TODO: This is hacky... + interfaces, ok := field.([]interface{}) + if !ok { + return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) + } + ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) + for i := 0; i < len(interfaces); i++ { + r, ok := interfaces[i].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) + } + ownerReferences = append(ownerReferences, r) + } + return ownerReferences, nil +} + +func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { + original, err := getOwnerReferences(u.Object) + if err != nil { + glog.V(6).Info(err) + return nil + } + ret := make([]metav1.OwnerReference, 0, len(original)) + for i := 0; i < len(original); i++ { + ret = append(ret, extractOwnerReference(original[i])) + } + return ret +} + +func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { + var newReferences = make([]map[string]interface{}, 0, len(references)) + for i := 0; i < len(references); i++ { + newReferences = append(newReferences, setOwnerReference(references[i])) + } + u.setNestedField(newReferences, "metadata", "ownerReferences") +} + +func (u *Unstructured) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *Unstructured) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *Unstructured) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *Unstructured) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *Unstructured) GetNamespace() string { + return getNestedString(u.Object, "metadata", "namespace") +} + +func (u *Unstructured) SetNamespace(namespace string) { + u.setNestedField(namespace, "metadata", "namespace") +} + +func (u *Unstructured) GetName() string { + return getNestedString(u.Object, "metadata", "name") +} + +func (u *Unstructured) SetName(name string) { + u.setNestedField(name, "metadata", "name") +} + +func (u *Unstructured) GetGenerateName() string { + return getNestedString(u.Object, "metadata", "generateName") +} + +func (u *Unstructured) SetGenerateName(name string) { + u.setNestedField(name, "metadata", "generateName") +} + +func (u *Unstructured) GetUID() types.UID { + return types.UID(getNestedString(u.Object, "metadata", "uid")) +} + +func (u *Unstructured) SetUID(uid types.UID) { + u.setNestedField(string(uid), "metadata", "uid") +} + +func (u *Unstructured) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *Unstructured) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *Unstructured) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *Unstructured) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *Unstructured) GetCreationTimestamp() metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) + return timestamp +} + +func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "creationTimestamp") +} + +func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) + if timestamp.IsZero() { + return nil + } + return ×tamp +} + +func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "deletionTimestamp") +} + +func (u *Unstructured) GetLabels() map[string]string { + return getNestedMap(u.Object, "metadata", "labels") +} + +func (u *Unstructured) SetLabels(labels map[string]string) { + u.setNestedMap(labels, "metadata", "labels") +} + +func (u *Unstructured) GetAnnotations() map[string]string { + return getNestedMap(u.Object, "metadata", "annotations") +} + +func (u *Unstructured) SetAnnotations(annotations map[string]string) { + u.setNestedMap(annotations, "metadata", "annotations") +} + +func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *Unstructured) GetFinalizers() []string { + return getNestedSlice(u.Object, "metadata", "finalizers") +} + +func (u *Unstructured) SetFinalizers(finalizers []string) { + u.setNestedSlice(finalizers, "metadata", "finalizers") +} + +func (u *Unstructured) GetClusterName() string { + return getNestedString(u.Object, "metadata", "clusterName") +} + +func (u *Unstructured) SetClusterName(clusterName string) { + u.setNestedField(clusterName, "metadata", "clusterName") +} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []*Unstructured `json:"items"` +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + setNestedField(u.Object, value, fields...) +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]map[string]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + t.Object["items"] = items + defer func() { delete(t.Object, "items") }() + return json.NewEncoder(w).Encode(t.Object) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = nil + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, unstruct) + } + return nil +} + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteeed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go new file mode 100644 index 000000000..a645501a1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -0,0 +1,80 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Event represents a single event to a watched resource. +// +// +protobuf=true +type WatchEvent struct { + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` +} + +func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *WatchEvent, s conversion.Scope) error { + out.Type = string(in.Type) + switch t := in.Object.(type) { + case *runtime.Unknown: + // TODO: handle other fields on Unknown and detect type + out.Object.Raw = t.Raw + case nil: + default: + out.Object.Object = in.Object + } + return nil +} + +func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *WatchEvent, s conversion.Scope) error { + return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s) +} + +func Convert_versioned_Event_to_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error { + out.Type = watch.EventType(in.Type) + if in.Object.Object != nil { + out.Object = in.Object.Object + } else if in.Object.Raw != nil { + // TODO: handle other fields on Unknown and detect type + out.Object = &runtime.Unknown{ + Raw: in.Object.Raw, + ContentType: runtime.ContentTypeJSON, + } + } + return nil +} + +func Convert_versioned_Event_to_versioned_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error { + return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s) +} + +// InternalEvent makes watch.Event versioned +// +protobuf=false +type InternalEvent watch.Event + +func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go new file mode 100644 index 000000000..bd17546a4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/well_known_labels.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // If you add a new topology domain here, also consider adding it to the set of default values + // for the scheduler's --failure-domain command-line argument. + LabelHostname = "kubernetes.io/hostname" + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + + LabelOS = "beta.kubernetes.io/os" + LabelArch = "beta.kubernetes.io/arch" + + // Historically fluentd was a manifest pod the was migrated to DaemonSet. + // To avoid situation during cluster upgrade when there are two instances + // of fluentd running on a node, kubelet need to mark node on which + // fluentd in not running as a manifest pod with LabelFluentdDsReady. + LabelFluentdDsReady = "alpha.kubernetes.io/fluentd-ds-ready" + + // When feature-gate for TaintBasedEvictions=true flag is enabled, + // TaintNodeNotReady would be automatically added by node controller + // when node is not ready, and removed when node becomes ready. + TaintNodeNotReady = "node.alpha.kubernetes.io/notReady" + + // When feature-gate for TaintBasedEvictions=true flag is enabled, + // TaintNodeUnreachable would be automatically added by node controller + // when node becomes unreachable (corresponding to NodeReady status ConditionUnknown) + // and removed when node becomes reachable (NodeReady status ConditionTrue). + TaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable" +) + +// Role labels are applied to Nodes to mark their purpose. In particular, we +// usually want to distinguish the master, so that we can isolate privileged +// pods and operations. +// +// Originally we relied on not registering the master, on the fact that the +// master was Unschedulable, and on static manifests for master components. +// But we now do register masters in many environments, are generally moving +// away from static manifests (for better manageability), and working towards +// deprecating the unschedulable field (replacing it with taints & tolerations +// instead). +// +// Even with tainting, a label remains the easiest way of making a positive +// selection, so that pods can schedule only to master nodes for example, and +// thus installations will likely define a label for their master nodes. +// +// So that we can recognize master nodes in consequent places though (such as +// kubectl get nodes), we encourage installations to use the well-known labels. +// We define NodeLabelRole, which is the preferred form, but we will also recognize +// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole). + +const ( + // NodeLabelRole is the preferred label applied to a Node as a hint that it has a particular purpose (defined by the value). + NodeLabelRole = "kubernetes.io/role" + + // NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose. + // Use of NodeLabelRole is preferred. + NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role" + + // NodeLabelRoleMaster is the value of a NodeLabelRole or NodeLabelKubeadmAlphaRole label, indicating a master node. + // A master node typically runs kubernetes system components and will not typically run user workloads. + NodeLabelRoleMaster = "master" + + // NodeLabelRoleNode is the value of a NodeLabelRole or NodeLabelKubeadmAlphaRole label, indicating a "normal" node, + // as opposed to a RoleMaster node. + NodeLabelRoleNode = "node" +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..b5f7360e2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -0,0 +1,554 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_v1_APIGroup, InType: reflect.TypeOf(&APIGroup{})}, + {Fn: DeepCopy_v1_APIGroupList, InType: reflect.TypeOf(&APIGroupList{})}, + {Fn: DeepCopy_v1_APIResource, InType: reflect.TypeOf(&APIResource{})}, + {Fn: DeepCopy_v1_APIResourceList, InType: reflect.TypeOf(&APIResourceList{})}, + {Fn: DeepCopy_v1_APIVersions, InType: reflect.TypeOf(&APIVersions{})}, + {Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + {Fn: DeepCopy_v1_Duration, InType: reflect.TypeOf(&Duration{})}, + {Fn: DeepCopy_v1_ExportOptions, InType: reflect.TypeOf(&ExportOptions{})}, + {Fn: DeepCopy_v1_GetOptions, InType: reflect.TypeOf(&GetOptions{})}, + {Fn: DeepCopy_v1_GroupKind, InType: reflect.TypeOf(&GroupKind{})}, + {Fn: DeepCopy_v1_GroupResource, InType: reflect.TypeOf(&GroupResource{})}, + {Fn: DeepCopy_v1_GroupVersion, InType: reflect.TypeOf(&GroupVersion{})}, + {Fn: DeepCopy_v1_GroupVersionForDiscovery, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, + {Fn: DeepCopy_v1_GroupVersionKind, InType: reflect.TypeOf(&GroupVersionKind{})}, + {Fn: DeepCopy_v1_GroupVersionResource, InType: reflect.TypeOf(&GroupVersionResource{})}, + {Fn: DeepCopy_v1_InternalEvent, InType: reflect.TypeOf(&InternalEvent{})}, + {Fn: DeepCopy_v1_LabelSelector, InType: reflect.TypeOf(&LabelSelector{})}, + {Fn: DeepCopy_v1_LabelSelectorRequirement, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, + {Fn: DeepCopy_v1_ListMeta, InType: reflect.TypeOf(&ListMeta{})}, + {Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + {Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + {Fn: DeepCopy_v1_OwnerReference, InType: reflect.TypeOf(&OwnerReference{})}, + {Fn: DeepCopy_v1_Patch, InType: reflect.TypeOf(&Patch{})}, + {Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + {Fn: DeepCopy_v1_RootPaths, InType: reflect.TypeOf(&RootPaths{})}, + {Fn: DeepCopy_v1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, + {Fn: DeepCopy_v1_Status, InType: reflect.TypeOf(&Status{})}, + {Fn: DeepCopy_v1_StatusCause, InType: reflect.TypeOf(&StatusCause{})}, + {Fn: DeepCopy_v1_StatusDetails, InType: reflect.TypeOf(&StatusDetails{})}, + {Fn: DeepCopy_v1_Time, InType: reflect.TypeOf(&Time{})}, + {Fn: DeepCopy_v1_Timestamp, InType: reflect.TypeOf(&Timestamp{})}, + {Fn: DeepCopy_v1_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, + {Fn: DeepCopy_v1_WatchEvent, InType: reflect.TypeOf(&WatchEvent{})}, + } +} + +func DeepCopy_v1_APIGroup(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIGroup) + out := out.(*APIGroup) + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_APIGroupList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIGroupList) + out := out.(*APIGroupList) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*APIGroup) + } + } + } + return nil + } +} + +func DeepCopy_v1_APIResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIResource) + out := out.(*APIResource) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_APIResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIResourceList) + out := out.(*APIResourceList) + *out = *in + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*APIResource) + } + } + } + return nil + } +} + +func DeepCopy_v1_APIVersions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersions) + out := out.(*APIVersions) + *out = *in + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*Preconditions) + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Duration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Duration) + out := out.(*Duration) + *out = *in + return nil + } +} + +func DeepCopy_v1_ExportOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExportOptions) + out := out.(*ExportOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_GetOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GetOptions) + out := out.(*GetOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupKind(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupKind) + out := out.(*GroupKind) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupResource) + out := out.(*GroupResource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersion) + out := out.(*GroupVersion) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersionForDiscovery(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionForDiscovery) + out := out.(*GroupVersionForDiscovery) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersionKind(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionKind) + out := out.(*GroupVersionKind) + *out = *in + return nil + } +} + +func DeepCopy_v1_GroupVersionResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GroupVersionResource) + out := out.(*GroupVersionResource) + *out = *in + return nil + } +} + +func DeepCopy_v1_InternalEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*InternalEvent) + out := out.(*InternalEvent) + *out = *in + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.Object) + } + } + return nil + } +} + +func DeepCopy_v1_LabelSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LabelSelector) + out := out.(*LabelSelector) + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*LabelSelectorRequirement) + } + } + } + return nil + } +} + +func DeepCopy_v1_LabelSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LabelSelectorRequirement) + out := out.(*LabelSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ListMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListMeta) + out := out.(*ListMeta) + *out = *in + return nil + } +} + +func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*OwnerReference) + } + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_OwnerReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*OwnerReference) + out := out.(*OwnerReference) + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(bool) + **out = **in + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Patch(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Patch) + out := out.(*Patch) + *out = *in + return nil + } +} + +func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_RootPaths(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RootPaths) + out := out.(*RootPaths) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServerAddressByClientCIDR) + out := out.(*ServerAddressByClientCIDR) + *out = *in + return nil + } +} + +func DeepCopy_v1_Status(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Status) + out := out.(*Status) + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*StatusDetails) + } + } + return nil + } +} + +func DeepCopy_v1_StatusCause(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatusCause) + out := out.(*StatusCause) + *out = *in + return nil + } +} + +func DeepCopy_v1_StatusDetails(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatusDetails) + out := out.(*StatusDetails) + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_Time(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Time) + out := out.(*Time) + *out = in.DeepCopy() + return nil + } +} + +func DeepCopy_v1_Timestamp(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Timestamp) + out := out.(*Timestamp) + *out = *in + return nil + } +} + +func DeepCopy_v1_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TypeMeta) + out := out.(*TypeMeta) + *out = *in + return nil + } +} + +func DeepCopy_v1_WatchEvent(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WatchEvent) + out := out.(*WatchEvent) + *out = *in + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*runtime.RawExtension) + } + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go new file mode 100644 index 000000000..6df448eb9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS b/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS new file mode 100644 index 000000000..c25e7faf2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/OWNERS @@ -0,0 +1,10 @@ +approvers: +- derekwaynecarr +- lavalamp +- smarterclayton +- wojtek-t +reviewers: +- derekwaynecarr +- lavalamp +- smarterclayton +- wojtek-t diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/conversion/cloner.go rename to vendor/k8s.io/apimachinery/pkg/conversion/cloner.go index a8c574713..c5dec1f31 100644 --- a/vendor/k8s.io/kubernetes/pkg/conversion/cloner.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,14 +25,14 @@ import ( type Cloner struct { // Map from the type to a function which can do the deep copy. deepCopyFuncs map[reflect.Type]reflect.Value - generatedDeepCopyFuncs map[reflect.Type]reflect.Value + generatedDeepCopyFuncs map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error } // NewCloner creates a new Cloner object. func NewCloner() *Cloner { c := &Cloner{ deepCopyFuncs: map[reflect.Type]reflect.Value{}, - generatedDeepCopyFuncs: map[reflect.Type]reflect.Value{}, + generatedDeepCopyFuncs: map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error{}, } if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil { // If one of the deep-copy functions is malformed, detect it immediately. @@ -42,10 +42,10 @@ func NewCloner() *Cloner { } // Prevent recursing into every byte... -func byteSliceDeepCopy(in []byte, out *[]byte, c *Cloner) error { - if in != nil { - *out = make([]byte, len(in)) - copy(*out, in) +func byteSliceDeepCopy(in *[]byte, out *[]byte, c *Cloner) error { + if *in != nil { + *out = make([]byte, len(*in)) + copy(*out, *in) } else { *out = nil } @@ -63,10 +63,10 @@ func verifyDeepCopyFunctionSignature(ft reflect.Type) error { if ft.NumOut() != 1 { return fmt.Errorf("expected one 'out' param, got %v", ft) } - if ft.In(1).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) + if ft.In(0).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) } - if ft.In(1).Elem() != ft.In(0) { + if ft.In(1) != ft.In(0) { return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft) } var forClonerType Cloner @@ -103,15 +103,17 @@ func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error { return nil } +// GeneratedDeepCopyFunc bundles an untyped generated deep-copy function of a type +// with a reflection type object used as a key to lookup the deep-copy function. +type GeneratedDeepCopyFunc struct { + Fn func(in interface{}, out interface{}, c *Cloner) error + InType reflect.Type +} + // Similar to RegisterDeepCopyFunc, but registers deep copy function that were // automatically generated. -func (c *Cloner) RegisterGeneratedDeepCopyFunc(deepCopyFunc interface{}) error { - fv := reflect.ValueOf(deepCopyFunc) - ft := fv.Type() - if err := verifyDeepCopyFunctionSignature(ft); err != nil { - return err - } - c.generatedDeepCopyFuncs[ft.In(0)] = fv +func (c *Cloner) RegisterGeneratedDeepCopyFunc(fn GeneratedDeepCopyFunc) error { + c.generatedDeepCopyFuncs[fn.InType] = fn.Fn return nil } @@ -135,25 +137,35 @@ func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) { func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) { inType := src.Type() + switch src.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + if src.IsNil() { + return src, nil + } + } + if fv, ok := c.deepCopyFuncs[inType]; ok { return c.customDeepCopy(src, fv) } if fv, ok := c.generatedDeepCopyFuncs[inType]; ok { - return c.customDeepCopy(src, fv) + var outValue reflect.Value + outValue = reflect.New(inType.Elem()) + err := fv(src.Interface(), outValue.Interface(), c) + return outValue, err } return c.defaultDeepCopy(src) } func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) { - outValue := reflect.New(src.Type()) + outValue := reflect.New(src.Type().Elem()) args := []reflect.Value{src, outValue, reflect.ValueOf(c)} result := fv.Call(args)[0].Interface() // This convolution is necessary because nil interfaces won't convert // to error. if result == nil { - return outValue.Elem(), nil + return outValue, nil } - return outValue.Elem(), result.(error) + return outValue, result.(error) } func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/conversion/converter.go rename to vendor/k8s.io/apimachinery/pkg/conversion/converter.go index e045dcd2f..9ab468ebe 100644 --- a/vendor/k8s.io/kubernetes/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -213,6 +213,8 @@ type Meta struct { // KeyNameMapping is an optional function which may map the listed key (field name) // into a source and destination value. KeyNameMapping FieldMappingFunc + // Context is an optional field that callers may use to pass info to conversion functions. + Context interface{} } // scope contains information about an ongoing conversion. @@ -429,10 +431,10 @@ func (c *Converter) SetStructFieldCopy(srcFieldType interface{}, srcFieldName st } // RegisterDefaultingFunc registers a value-defaulting func with the Converter. -// defaultingFunc must take one parameters: a pointer to the input type. +// defaultingFunc must take one parameter: a pointer to the input type. // // Example: -// c.RegisteDefaultingFunc( +// c.RegisterDefaultingFunc( // func(in *v1.Pod) { // // defaulting logic... // }) diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go rename to vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go index 7c3ed7cda..f21abe1e5 100644 --- a/vendor/k8s.io/kubernetes/pkg/conversion/deep_equal.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,7 +17,7 @@ limitations under the License. package conversion import ( - "k8s.io/kubernetes/third_party/forked/reflect" + "k8s.io/apimachinery/third_party/forked/golang/reflect" ) // The code for this type must be located in third_party, since it forks from @@ -26,7 +26,7 @@ type Equalities struct { reflect.Equalities } -// For convenience, panics on errrors +// For convenience, panics on errors func EqualitiesOrDie(funcs ...interface{}) Equalities { e := Equalities{reflect.Equalities{}} if err := e.AddFuncs(funcs...); err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go new file mode 100644 index 000000000..0c46ef2d1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package conversion provides go object versioning. +// +// Specifically, conversion provides a way for you to define multiple versions +// of the same object. You may write functions which implement conversion logic, +// but for the fields which did not change, copying is automated. This makes it +// easy to modify the structures you use in memory without affecting the format +// you store on disk or respond to in your external API calls. +package conversion diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go new file mode 100644 index 000000000..4ebc1ebc5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value +// of the dereferenced pointer, ensuring that it is settable/addressable. +// Returns an error if this is not possible. +func EnforcePtr(obj interface{}) (reflect.Value, error) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + if v.Kind() == reflect.Invalid { + return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") + } + return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type()) + } + if v.IsNil() { + return reflect.Value{}, fmt.Errorf("expected pointer, but got nil") + } + return v.Elem(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go new file mode 100644 index 000000000..30f717b2c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -0,0 +1,188 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryparams + +import ( + "fmt" + "net/url" + "reflect" + "strings" +) + +// Marshaler converts an object to a query parameter string representation +type Marshaler interface { + MarshalQueryParameter() (string, error) +} + +// Unmarshaler converts a string representation to an object +type Unmarshaler interface { + UnmarshalQueryParameter(string) error +} + +func jsonTag(field reflect.StructField) (string, bool) { + structTag := field.Tag.Get("json") + if len(structTag) == 0 { + return "", false + } + parts := strings.Split(structTag, ",") + tag := parts[0] + if tag == "-" { + tag = "" + } + omitempty := false + parts = parts[1:] + for _, part := range parts { + if part == "omitempty" { + omitempty = true + break + } + } + return tag, omitempty +} + +func formatValue(value interface{}) string { + return fmt.Sprintf("%v", value) +} + +func isPointerKind(kind reflect.Kind) bool { + return kind == reflect.Ptr +} + +func isStructKind(kind reflect.Kind) bool { + return kind == reflect.Struct +} + +func isValueKind(kind reflect.Kind) bool { + switch kind { + case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, + reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, + reflect.Float64, reflect.Complex64, reflect.Complex128: + return true + default: + return false + } +} + +func zeroValue(value reflect.Value) bool { + return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface()) +} + +func customMarshalValue(value reflect.Value) (reflect.Value, bool) { + // Return unless we implement a custom query marshaler + if !value.CanInterface() { + return reflect.Value{}, false + } + + marshaler, ok := value.Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + + // Don't invoke functions on nil pointers + // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response + if isPointerKind(value.Kind()) && zeroValue(value) { + return reflect.ValueOf(""), true + } + + // Get the custom marshalled value + v, err := marshaler.MarshalQueryParameter() + if err != nil { + return reflect.Value{}, false + } + return reflect.ValueOf(v), true +} + +func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) { + if omitempty && zeroValue(value) { + return + } + val := "" + iValue := fmt.Sprintf("%v", value.Interface()) + + if iValue != "<nil>" { + val = iValue + } + values.Add(tag, val) +} + +func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) { + for i := 0; i < list.Len(); i++ { + addParam(values, tag, omitempty, list.Index(i)) + } +} + +// Convert takes an object and converts it to a url.Values object using JSON tags as +// parameter names. Only top-level simple values, arrays, and slices are serialized. +// Embedded structs, maps, etc. will not be serialized. +func Convert(obj interface{}) (url.Values, error) { + result := url.Values{} + if obj == nil { + return result, nil + } + var sv reflect.Value + switch reflect.TypeOf(obj).Kind() { + case reflect.Ptr, reflect.Interface: + sv = reflect.ValueOf(obj).Elem() + default: + return nil, fmt.Errorf("expecting a pointer or interface") + } + st := sv.Type() + if !isStructKind(st.Kind()) { + return nil, fmt.Errorf("expecting a pointer to a struct") + } + + // Check all object fields + convertStruct(result, st, sv) + + return result, nil +} + +func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { + for i := 0; i < st.NumField(); i++ { + field := sv.Field(i) + tag, omitempty := jsonTag(st.Field(i)) + if len(tag) == 0 { + continue + } + ft := field.Type() + + kind := ft.Kind() + if isPointerKind(kind) { + ft = ft.Elem() + kind = ft.Kind() + if !field.IsNil() { + field = reflect.Indirect(field) + } + } + + switch { + case isValueKind(kind): + addParam(result, tag, omitempty, field) + case kind == reflect.Array || kind == reflect.Slice: + if isValueKind(ft.Elem().Kind()) { + addListOfParams(result, tag, omitempty, field) + } + case isStructKind(kind) && !(zeroValue(field) && omitempty): + if marshalValue, ok := customMarshalValue(field); ok { + addParam(result, tag, omitempty, marshalValue) + } else { + convertStruct(result, ft, field) + } + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go new file mode 100644 index 000000000..4c1002a4c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queryparams provides conversion from versioned +// runtime objects to URL query values +package queryparams diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go new file mode 100644 index 000000000..49059e263 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fields implements a simple field system, parsing and matching +// selectors with sets of fields. +package fields diff --git a/vendor/k8s.io/apimachinery/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go new file mode 100644 index 000000000..623b27e95 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/fields.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "sort" + "strings" +) + +// Fields allows you to present fields independently from their storage. +type Fields interface { + // Has returns whether the provided field exists. + Has(field string) (exists bool) + + // Get returns the value for the provided field. + Get(field string) (value string) +} + +// Set is a map of field:value. It implements Fields. +type Set map[string]string + +// String returns all fields listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided field exists in the map. +func (ls Set) Has(field string) bool { + _, exists := ls[field] + return exists +} + +// Get returns the value in the map for the provided field. +func (ls Set) Get(field string) string { + return ls[field] +} + +// AsSelector converts fields into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go new file mode 100644 index 000000000..70d94ded8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import "k8s.io/apimachinery/pkg/selection" + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Requirement contains a field, a value, and an operator that relates the field and value. +// This is currently for reading internal selection information of field selector. +type Requirement struct { + Operator selection.Operator + Field string + Value string +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go new file mode 100644 index 000000000..bb156b4cb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -0,0 +1,413 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/selection" +) + +// Selector represents a field selector. +type Selector interface { + // Matches returns true if this selector matches the given set of fields. + Matches(Fields) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific field to be set, and if so returns the value it + // requires. + RequiresExactMatch(field string) (value string, found bool) + + // Transform returns a new copy of the selector after TransformFunc has been + // applied to the entire selector, or an error if fn returns an error. + Transform(fn TransformFunc) (Selector, error) + + // Requirements converts this interface to Requirements to expose + // more detailed selection information. + Requirements() Requirements + + // String returns a human readable string that represents this selector. + String() string +} + +// Everything returns a selector that matches all fields. +func Everything() Selector { + return andTerm{} +} + +type hasTerm struct { + field, value string +} + +func (t *hasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) == t.value +} + +func (t *hasTerm) Empty() bool { + return false +} + +func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { + if t.field == field { + return t.value, true + } + return "", false +} + +func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + return &hasTerm{field, value}, nil +} + +func (t *hasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.Equals, + Value: t.value, + }} +} + +func (t *hasTerm) String() string { + return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) +} + +type notHasTerm struct { + field, value string +} + +func (t *notHasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) != t.value +} + +func (t *notHasTerm) Empty() bool { + return false +} + +func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { + return "", false +} + +func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + return ¬HasTerm{field, value}, nil +} + +func (t *notHasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.NotEquals, + Value: t.value, + }} +} + +func (t *notHasTerm) String() string { + return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) +} + +type andTerm []Selector + +func (t andTerm) Matches(ls Fields) bool { + for _, q := range t { + if !q.Matches(ls) { + return false + } + } + return true +} + +func (t andTerm) Empty() bool { + if t == nil { + return true + } + if len([]Selector(t)) == 0 { + return true + } + for i := range t { + if !t[i].Empty() { + return false + } + } + return true +} + +func (t andTerm) RequiresExactMatch(field string) (string, bool) { + if t == nil || len([]Selector(t)) == 0 { + return "", false + } + for i := range t { + if value, found := t[i].RequiresExactMatch(field); found { + return value, found + } + } + return "", false +} + +func (t andTerm) Transform(fn TransformFunc) (Selector, error) { + next := make([]Selector, len([]Selector(t))) + for i, s := range []Selector(t) { + n, err := s.Transform(fn) + if err != nil { + return nil, err + } + next[i] = n + } + return andTerm(next), nil +} + +func (t andTerm) Requirements() Requirements { + reqs := make([]Requirement, 0, len(t)) + for _, s := range []Selector(t) { + rs := s.Requirements() + reqs = append(reqs, rs...) + } + return reqs +} + +func (t andTerm) String() string { + var terms []string + for _, q := range t { + terms = append(terms, q.String()) + } + return strings.Join(terms, ",") +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil Set is considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil { + return Everything() + } + items := make([]Selector, 0, len(ls)) + for field, value := range ls { + items = append(items, &hasTerm{field: field, value: value}) + } + if len(items) == 1 { + return items[0] + } + return andTerm(items) +} + +// valueEscaper prefixes \,= characters with a backslash +var valueEscaper = strings.NewReplacer( + // escape \ characters + `\`, `\\`, + // then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector + `,`, `\,`, + `=`, `\=`, +) + +// Escapes an arbitrary literal string for use as a fieldSelector value +func EscapeValue(s string) string { + return valueEscaper.Replace(s) +} + +// InvalidEscapeSequence indicates an error occurred unescaping a field selector +type InvalidEscapeSequence struct { + sequence string +} + +func (i InvalidEscapeSequence) Error() string { + return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence) +} + +// UnescapedRune indicates an error occurred unescaping a field selector +type UnescapedRune struct { + r rune +} + +func (i UnescapedRune) Error() string { + return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) +} + +// Unescapes a fieldSelector value and returns the original literal value. +// May return the original string if it contains no escaped or special characters. +func UnescapeValue(s string) (string, error) { + // if there's no escaping or special characters, just return to avoid allocation + if !strings.ContainsAny(s, `\,=`) { + return s, nil + } + + v := bytes.NewBuffer(make([]byte, 0, len(s))) + inSlash := false + for _, c := range s { + if inSlash { + switch c { + case '\\', ',', '=': + // omit the \ for recognized escape sequences + v.WriteRune(c) + default: + // error on unrecognized escape sequences + return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})} + } + inSlash = false + continue + } + + switch c { + case '\\': + inSlash = true + case ',', '=': + // unescaped , and = characters are not allowed in field selector values + return "", UnescapedRune{r: c} + default: + v.WriteRune(c) + } + } + + // Ending with a single backslash is an invalid sequence + if inSlash { + return "", InvalidEscapeSequence{sequence: "\\"} + } + + return v.String(), nil +} + +// ParseSelectorOrDie takes a string representing a selector and returns an +// object suitable for matching, or panic when an error occur. +func ParseSelectorOrDie(s string) Selector { + selector, err := ParseSelector(s) + if err != nil { + panic(err) + } + return selector +} + +// ParseSelector takes a string representing a selector and returns an +// object suitable for matching, or an error. +func ParseSelector(selector string) (Selector, error) { + return parseSelector(selector, + func(lhs, rhs string) (newLhs, newRhs string, err error) { + return lhs, rhs, nil + }) +} + +// Parses the selector and runs them through the given TransformFunc. +func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { + return parseSelector(selector, fn) +} + +// Function to transform selectors. +type TransformFunc func(field, value string) (newField, newValue string, err error) + +// splitTerms returns the comma-separated terms contained in the given fieldSelector. +// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved. +func splitTerms(fieldSelector string) []string { + if len(fieldSelector) == 0 { + return nil + } + + terms := make([]string, 0, 1) + startIndex := 0 + inSlash := false + for i, c := range fieldSelector { + switch { + case inSlash: + inSlash = false + case c == '\\': + inSlash = true + case c == ',': + terms = append(terms, fieldSelector[startIndex:i]) + startIndex = i + 1 + } + } + + terms = append(terms, fieldSelector[startIndex:]) + + return terms +} + +const ( + notEqualOperator = "!=" + doubleEqualOperator = "==" + equalOperator = "=" +) + +// termOperators holds the recognized operators supported in fieldSelectors. +// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first +// to avoid leaving a leading = character on the rhs value. +var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator} + +// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful. +// no escaping of special characters is supported in the lhs value, so the first occurance of a recognized operator is used as the split point. +// the literal rhs is returned, and the caller is responsible for applying any desired unescaping. +func splitTerm(term string) (lhs, op, rhs string, ok bool) { + for i := range term { + remaining := term[i:] + for _, op := range termOperators { + if strings.HasPrefix(remaining, op) { + return term[0:i], op, term[i+len(op):], true + } + } + } + return "", "", "", false +} + +func parseSelector(selector string, fn TransformFunc) (Selector, error) { + parts := splitTerms(selector) + sort.StringSlice(parts).Sort() + var items []Selector + for _, part := range parts { + if part == "" { + continue + } + lhs, op, rhs, ok := splitTerm(part) + if !ok { + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + unescapedRHS, err := UnescapeValue(rhs) + if err != nil { + return nil, err + } + switch op { + case notEqualOperator: + items = append(items, ¬HasTerm{field: lhs, value: unescapedRHS}) + case doubleEqualOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + case equalOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + default: + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + } + if len(items) == 1 { + return items[0].Transform(fn) + } + return andTerm(items).Transform(fn) +} + +// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. +// Cannot return an error. +func OneTermEqualSelector(k, v string) Selector { + return &hasTerm{field: k, value: v} +} + +// AndSelectors creates a selector that is the logical AND of all the given selectors +func AndSelectors(selectors ...Selector) Selector { + return andTerm(selectors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go new file mode 100644 index 000000000..35ba78809 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package labels implements a simple label system, parsing and matching +// selectors with sets of labels. +package labels diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go new file mode 100644 index 000000000..0d0caa77d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -0,0 +1,181 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "fmt" + "sort" + "strings" +) + +// Labels allows you to present labels independently from their storage. +type Labels interface { + // Has returns whether the provided label exists. + Has(label string) (exists bool) + + // Get returns the value for the provided label. + Get(label string) (value string) +} + +// Set is a map of label:value. It implements Labels. +type Set map[string]string + +// String returns all labels listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided label exists in the map. +func (ls Set) Has(label string) bool { + _, exists := ls[label] + return exists +} + +// Get returns the value in the map for the provided label. +func (ls Set) Get(label string) string { + return ls[label] +} + +// AsSelector converts labels into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} + +// AsSelectorPreValidated converts labels into a selector, but +// assumes that labels are already validated and thus don't +// preform any validation. +// According to our measurements this is significantly faster +// in codepaths that matter at high sccale. +func (ls Set) AsSelectorPreValidated() Selector { + return SelectorFromValidatedSet(ls) +} + +// FormatLables convert label map into plain string +func FormatLabels(labelMap map[string]string) string { + l := Set(labelMap).String() + if l == "" { + l = "<none>" + } + return l +} + +// Conflicts takes 2 maps and returns true if there a key match between +// the maps but the value doesn't match, and returns false in other cases +func Conflicts(labels1, labels2 Set) bool { + small := labels1 + big := labels2 + if len(labels2) < len(labels1) { + small = labels2 + big = labels1 + } + + for k, v := range small { + if val, match := big[k]; match { + if val != v { + return true + } + } + } + + return false +} + +// Merge combines given maps, and does not check for any conflicts +// between the maps. In case of conflicts, second map (labels2) wins +func Merge(labels1, labels2 Set) Set { + mergedMap := Set{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 Set) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// AreLabelsInWhiteList verifies if the provided label list +// is in the provided whitelist and returns true, otherwise false. +func AreLabelsInWhiteList(labels, whitelist Set) bool { + if len(whitelist) == 0 { + return true + } + + for k, v := range labels { + value, ok := whitelist[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// ConvertSelectorToLabelsMap converts selector string to labels map +// and validates keys and values +func ConvertSelectorToLabelsMap(selector string) (Set, error) { + labelsMap := Set{} + + if len(selector) == 0 { + return labelsMap, nil + } + + labels := strings.Split(selector, ",") + for _, label := range labels { + l := strings.Split(label, "=") + if len(l) != 2 { + return labelsMap, fmt.Errorf("invalid selector: %s", l) + } + key := strings.TrimSpace(l[0]) + if err := validateLabelKey(key); err != nil { + return labelsMap, err + } + value := strings.TrimSpace(l[1]) + if err := validateLabelValue(value); err != nil { + return labelsMap, err + } + labelsMap[key] = value + } + return labelsMap, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go new file mode 100644 index 000000000..9bddc35a6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -0,0 +1,836 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +// Sort by key to obtain determisitic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + if err := validateLabelKey(key); err != nil { + return nil, err + } + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + return nil, fmt.Errorf("exact-match compatibility requires one single value") + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + return nil, fmt.Errorf("values set must be empty for exists and does not exist") + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") + } + } + default: + return nil, fmt.Errorf("operator '%v' is not recognized", op) + } + + for i := range vals { + if err := validateLabelValue(vals[i]); err != nil { + return nil, err + } + } + sort.Strings(vals) + return &Requirement{key: key, operator: op, strValues: vals}, nil +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to a integer. + if len(r.strValues) != 1 { + glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +func (r *Requirement) Key() string { + return r.key +} +func (r *Requirement) Operator() selection.Operator { + return r.operator +} +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Return true if the internalSelector doesn't restrict selection space +func (lsel internalSelector) Empty() bool { + if lsel == nil { + return true + } + return len(lsel) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var buffer bytes.Buffer + if r.operator == selection.DoesNotExist { + buffer.WriteString("!") + } + buffer.WriteString(r.key) + + switch r.operator { + case selection.Equals: + buffer.WriteString("=") + case selection.DoubleEquals: + buffer.WriteString("==") + case selection.NotEquals: + buffer.WriteString("!=") + case selection.In: + buffer.WriteString(" in ") + case selection.NotIn: + buffer.WriteString(" notin ") + case selection.GreaterThan: + buffer.WriteString(">") + case selection.LessThan: + buffer.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return buffer.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString("(") + } + if len(r.strValues) == 1 { + buffer.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + buffer.WriteString(strings.Join(r.strValues, ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString(")") + } + return buffer.String() +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (lsel internalSelector) Add(reqs ...Requirement) Selector { + var sel internalSelector + for ix := range lsel { + sel = append(sel, lsel[ix]) + } + for _, r := range reqs { + sel = append(sel, r) + } + sort.Sort(ByKey(sel)) + return sel +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (lsel internalSelector) Matches(l Labels) bool { + for ix := range lsel { + if matches := lsel[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (lsel internalSelector) String() string { + var reqs []string + for ix := range lsel { + reqs = append(reqs, lsel[ix].String()) + } + return strings.Join(reqs, ",") +} + +// constants definition for lexer token +type Token int + +const ( + ErrorToken Token = iota + EndOfStringToken + ClosedParToken + CommaToken + DoesNotExistToken + DoubleEqualsToken + EqualsToken + GreaterThanToken + IdentifierToken // to represent keys and values + InToken + LessThanToken + NotEqualsToken + NotInToken + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// The item produced by the lexer. It contains the Token and the literal. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIdOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIdOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// Parser context represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + KeyAndOperator ParserContext = iota + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parse literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator return operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, lit := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit = p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// <selector-syntax> ::= <requirement> | <requirement> "," <selector-syntax> +// <requirement> ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ] +// <set-based-restriction> ::= "" | <inclusion-exclusion> <value-set> +// <inclusion-exclusion> ::= <inclusion> | <exclusion> +// <exclusion> ::= "notin" +// <inclusion> ::= "in" +// <value-set> ::= "(" <values> ")" +// <values> ::= VALUE | VALUE "," <values> +// <exact-match-restriction> ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string) error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(v string) error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + if r, err := NewRequirement(label, selection.Equals, []string{value}); err != nil { + //TODO: double check errors when input comes from serialization? + return internalSelector{} + } else { + requirements = append(requirements, *r) + } + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + var requirements internalSelector + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return requirements +} + +// ParseToRequirements takes a string representing a selector and returns a list of +// requirements. This function is suitable for those callers that perform additional +// processing on selector requirements. +// See the documentation for Parse() function for more details. +// TODO: Consider exporting the internalSelector type instead. +func ParseToRequirements(selector string) ([]Requirement, error) { + return parse(selector) +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/common.go b/vendor/k8s.io/apimachinery/pkg/openapi/common.go new file mode 100644 index 000000000..605776ed8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/openapi/common.go @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "github.com/emicklei/go-restful" + "github.com/go-openapi/spec" + "strings" +) + +// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi. +type OpenAPIDefinition struct { + Schema spec.Schema + Dependencies []string +} + +type ReferenceCallback func(path string) spec.Ref + +// OpenAPIDefinitions is collection of all definitions. +type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition + +// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface, +// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See +// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when +// possible. +type OpenAPIDefinitionGetter interface { + OpenAPIDefinition() *OpenAPIDefinition +} + +// Config is set of configuration for openAPI spec generation. +type Config struct { + // List of supported protocols such as https, http, etc. + ProtocolList []string + + // Info is general information about the API. + Info *spec.Info + + // DefaultResponse will be used if an operation does not have any responses listed. It + // will show up as ... "responses" : {"default" : $DefaultResponse} in the spec. + DefaultResponse *spec.Response + + // CommonResponses will be added as a response to all operation specs. This is a good place to add common + // responses such as authorization failed. + CommonResponses map[int]spec.Response + + // List of webservice's path prefixes to ignore + IgnorePrefixes []string + + // OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map + // or any of the models will result in spec generation failure. + GetDefinitions GetOpenAPIDefinitions + + // GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs. + GetOperationIDAndTags func(servePath string, r *restful.Route) (string, []string, error) + + // GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition. + // It is an optional function to customize model names. + GetDefinitionName func(servePath string, name string) (string, spec.Extensions) + + // PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving. + PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error) + + // SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config + // is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses. + SecurityDefinitions *spec.SecurityDefinitions + + // DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI. + // For most cases, this will be list of acceptable definitions in SecurityDefinitions. + DefaultSecurity []map[string][]string +} + +// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are +// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type +// comment (the comment that is added before type definition) will be lost. The spec will still have the property +// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so +// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple +// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. +// Example: +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // IntOrString documentation... +// type IntOrString { ... } +// +// Adding IntOrString to this function: +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } +// +// Implement OpenAPIDefinitionGetter for IntOrString: +// +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// ... +// definitions: +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } +// +func GetOpenAPITypeFormat(typeName string) (string, string) { + schemaTypeFormatMap := map[string][]string{ + "uint": {"integer", "int32"}, + "uint8": {"integer", "byte"}, + "uint16": {"integer", "int32"}, + "uint32": {"integer", "int64"}, + "uint64": {"integer", "int64"}, + "int": {"integer", "int32"}, + "int8": {"integer", "byte"}, + "int16": {"integer", "int32"}, + "int32": {"integer", "int32"}, + "int64": {"integer", "int64"}, + "byte": {"integer", "byte"}, + "float64": {"number", "double"}, + "float32": {"number", "float"}, + "bool": {"boolean", ""}, + "time.Time": {"string", "date-time"}, + "string": {"string", ""}, + "integer": {"integer", ""}, + "number": {"number", ""}, + "boolean": {"boolean", ""}, + "[]byte": {"string", "byte"}, // base64 encoded characters + } + mapped, ok := schemaTypeFormatMap[typeName] + if !ok { + return "", "" + } + return mapped[0], mapped[1] +} + +func EscapeJsonPointer(p string) string { + // Escaping reference name using rfc6901 + p = strings.Replace(p, "~", "~0", -1) + p = strings.Replace(p, "/", "~1", -1) + return p +} diff --git a/vendor/k8s.io/apimachinery/pkg/openapi/doc.go b/vendor/k8s.io/apimachinery/pkg/openapi/doc.go new file mode 100644 index 000000000..5ed572cc1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/openapi/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package openapi holds shared codes and types between open API code generator and spec generator. +package openapi diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS b/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS new file mode 100644 index 000000000..a49419f70 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/OWNERS @@ -0,0 +1,19 @@ +approvers: +- caesarxuchao +- deads2k +- lavalamp +- smarterclayton +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- nikhiljindal +- gmarek +- krousey +- timothysc +- piosz +- mbohlool diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go new file mode 100644 index 000000000..d9748f066 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -0,0 +1,316 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/url" + "reflect" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// codec binds an encoder and decoder. +type codec struct { + Encoder + Decoder +} + +// NewCodec creates a Codec from an Encoder and Decoder. +func NewCodec(e Encoder, d Decoder) Codec { + return codec{e, d} +} + +// Encode is a convenience wrapper for encoding to a []byte from an Encoder +func Encode(e Encoder, obj Object) ([]byte, error) { + // TODO: reuse buffer + buf := &bytes.Buffer{} + if err := e.Encode(obj, buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Decode is a convenience wrapper for decoding data into an Object. +func Decode(d Decoder, data []byte) (Object, error) { + obj, _, err := d.Decode(data, nil, nil) + return obj, err +} + +// DecodeInto performs a Decode into the provided object. +func DecodeInto(d Decoder, data []byte, into Object) error { + out, gvk, err := d.Decode(data, nil, into) + if err != nil { + return err + } + if out != into { + return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) + } + return nil +} + +// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. +func EncodeOrDie(e Encoder, obj Object) string { + bytes, err := Encode(e, obj) + if err != nil { + panic(err) + } + return string(bytes) +} + +// DefaultingSerializer invokes defaulting after decoding. +type DefaultingSerializer struct { + Defaulter ObjectDefaulter + Decoder Decoder + // Encoder is optional to allow this type to be used as both a Decoder and an Encoder + Encoder +} + +// Decode performs a decode and then allows the defaulter to act on the provided object. +func (d DefaultingSerializer) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaultGVK, into) + if err != nil { + return obj, gvk, err + } + d.Defaulter.Default(obj) + return obj, gvk, nil +} + +// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or +// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. +func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) { + if obj != nil { + kinds, _, err := t.ObjectKinds(obj) + if err != nil { + return nil, err + } + for _, kind := range kinds { + if gvk == kind { + return obj, nil + } + } + } + return c.New(gvk) +} + +// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. +type NoopEncoder struct { + Decoder +} + +var _ Serializer = NoopEncoder{} + +func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) +} + +// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. +type NoopDecoder struct { + Encoder +} + +var _ Serializer = NoopDecoder{} + +func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) +} + +// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. +func NewParameterCodec(scheme *Scheme) ParameterCodec { + return ¶meterCodec{ + typer: scheme, + convertor: scheme, + creator: scheme, + } +} + +// parameterCodec implements conversion to and from query parameters and objects. +type parameterCodec struct { + typer ObjectTyper + convertor ObjectConvertor + creator ObjectCreater +} + +var _ ParameterCodec = ¶meterCodec{} + +// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then +// converts that object to into (if necessary). Returns an error if the operation cannot be completed. +func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error { + if len(parameters) == 0 { + return nil + } + targetGVKs, _, err := c.typer.ObjectKinds(into) + if err != nil { + return err + } + for i := range targetGVKs { + if targetGVKs[i].GroupVersion() == from { + return c.convertor.Convert(¶meters, into, nil) + } + } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) + if err != nil { + return err + } + if err := c.convertor.Convert(¶meters, input, nil); err != nil { + return err + } + return c.convertor.Convert(input, into, nil) +} + +// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. +// Returns an error if conversion is not possible. +func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) { + gvks, _, err := c.typer.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + if to != gvk.GroupVersion() { + out, err := c.convertor.ConvertToVersion(obj, to) + if err != nil { + return nil, err + } + obj = out + } + return queryparams.Convert(obj) +} + +type base64Serializer struct { + Encoder + Decoder +} + +func NewBase64Serializer(e Encoder, d Decoder) Serializer { + return &base64Serializer{e, d} +} + +func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + e := base64.NewEncoder(base64.StdEncoding, stream) + err := s.Encoder.Encode(obj, e) + e.Close() + return err +} + +func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + n, err := base64.StdEncoding.Decode(out, data) + if err != nil { + return nil, nil, err + } + return s.Decoder.Decode(out[:n], defaults, into) +} + +// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot +// include media-type parameters), or the first info with an empty media type, or false if no type matches. +func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) { + for _, info := range types { + if info.MediaType == mediaType { + return info, true + } + } + for _, info := range types { + if len(info.MediaType) == 0 { + return info, true + } + } + return SerializerInfo{}, false +} + +var ( + // InternalGroupVersioner will always prefer the internal version for a given group version kind. + InternalGroupVersioner GroupVersioner = internalGroupVersioner{} + // DisabledGroupVersioner will reject all kinds passed to it. + DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} +) + +type internalGroupVersioner struct{} + +// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. +func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, kind := range kinds { + if kind.Version == APIVersionInternal { + return kind, true + } + } + for _, kind := range kinds { + return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true + } + return schema.GroupVersionKind{}, false +} + +type disabledGroupVersioner struct{} + +// KindForGroupVersionKinds returns false for any input. +func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + return schema.GroupVersionKind{}, false +} + +// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. +type GroupVersioners []GroupVersioner + +// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occured. +func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + return target, true + } + return schema.GroupVersionKind{}, false +} + +// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner +var _ GroupVersioner = schema.GroupVersion{} +var _ GroupVersioner = schema.GroupVersions{} +var _ GroupVersioner = multiGroupVersioner{} + +type multiGroupVersioner struct { + target schema.GroupVersion + acceptedGroupKinds []schema.GroupKind +} + +// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. +// Kind may be empty in the provided group kind, in which case any kind will match. +func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) { + return gv + } + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} +} + +// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will +// use the originating kind where possible. +func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, src := range kinds { + for _, kind := range v.acceptedGroupKinds { + if kind.Group != src.Group { + continue + } + if len(kind.Kind) > 0 && kind.Kind != src.Kind { + continue + } + return v.target.WithKind(src.Kind), true + } + } + return schema.GroupVersionKind{}, false +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go rename to vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go index 09e7d51ad..1d34ec1a8 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/codec_check.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,14 +20,14 @@ import ( "fmt" "reflect" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/runtime/schema" ) // CheckCodec makes sure that the codec can encode objects like internalType, // decode all of the external types listed, and also decode them into the given // object. (Will modify internalObject.) (Assumes JSON serialization.) // TODO: verify that the correct external version is chosen on encode... -func CheckCodec(c Codec, internalType Object, externalTypes ...unversioned.GroupVersionKind) error { +func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { _, err := Encode(c, internalType) if err != nil { return fmt.Errorf("Internal type not encodable: %v", err) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go new file mode 100644 index 000000000..8eedffc9c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Defines conversions between generic types and structs to map query strings +// to struct objects. +package runtime + +import ( + "reflect" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/conversion" +) + +// JSONKeyMapper uses the struct tags on a conversion to determine the key value for +// the other side. Use when mapping from a map[string]* to a struct or vice versa. +func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { + if s := destTag.Get("json"); len(s) > 0 { + return strings.SplitN(s, ",", 2)[0], key + } + if s := sourceTag.Get("json"); len(s) > 0 { + return key, strings.SplitN(s, ",", 2)[0] + } + return key, key +} + +// DefaultStringConversions are helpers for converting []string and string to real values. +var DefaultStringConversions = []interface{}{ + Convert_Slice_string_To_string, + Convert_Slice_string_To_int, + Convert_Slice_string_To_bool, + Convert_Slice_string_To_int64, +} + +func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { + if len(*input) == 0 { + *out = "" + } + *out = (*input)[0] + return nil +} + +func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.Atoi(str) + if err != nil { + return err + } + *out = i + return nil +} + +// Conver_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value, a value of "false", or a value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { + if len(*input) == 0 { + *out = false + return nil + } + switch strings.ToLower((*input)[0]) { + case "false", "0": + *out = false + default: + *out = true + } + return nil +} + +func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { + if len(*input) == 0 { + *out = 0 + } + str := (*input)[0] + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *out = i + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go new file mode 100644 index 000000000..a9d084d9f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime includes helper functions for working with API objects +// that follow the kubernetes API object conventions, which are: +// +// 0. Your API objects have a common metadata struct member, TypeMeta. +// 1. Your code refers to an internal set of API objects. +// 2. In a separate package, you have an external set of API objects. +// 3. The external set is considered to be versioned, and no breaking +// changes are ever made to it (fields may be added but not changed +// or removed). +// 4. As your api evolves, you'll make an additional versioned package +// with every major change. +// 5. Versioned packages have conversion functions which convert to +// and from the internal version. +// 6. You'll continue to support older versions according to your +// deprecation policy, and you can easily provide a program/library +// to update old versions into new versions because of 5. +// 7. All of your serializations and deserializations are handled in a +// centralized place. +// +// Package runtime provides a conversion helper to make 5 easy, and the +// Encode/Decode/DecodeInto trio to accomplish 7. You can also register +// additional "codecs" which use a version of your choice. It's +// recommended that you register your types with runtime in your +// package's init function. +// +// As a bonus, a few common types useful from all api objects and versions +// are provided in types.go. + +package runtime diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go similarity index 88% rename from vendor/k8s.io/kubernetes/pkg/runtime/embedded.go rename to vendor/k8s.io/apimachinery/pkg/runtime/embedded.go index a62080e39..e8825a787 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/embedded.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,21 +19,21 @@ package runtime import ( "errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" ) type encodable struct { E Encoder `json:"-"` obj Object - versions []unversioned.GroupVersion + versions []schema.GroupVersion } -func (e encodable) GetObjectKind() unversioned.ObjectKind { return e.obj.GetObjectKind() } +func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } // NewEncodable creates an object that will be encoded with the provided codec on demand. // Provided as a convenience for test cases dealing with internal objects. -func NewEncodable(e Encoder, obj Object, versions ...unversioned.GroupVersion) Object { +func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object { if _, ok := obj.(*Unknown); ok { return obj } @@ -52,7 +52,7 @@ func (re encodable) MarshalJSON() ([]byte, error) { // NewEncodableList creates an object that will be encoded with the provided codec on demand. // Provided as a convenience for test cases dealing with internal objects. -func NewEncodableList(e Encoder, objects []Object, versions ...unversioned.GroupVersion) []Object { +func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object { out := make([]Object, len(objects)) for i := range objects { if _, ok := objects[i].(*Unknown); ok { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go new file mode 100644 index 000000000..c9a0e1696 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -0,0 +1,102 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type notRegisteredErr struct { + gvk schema.GroupVersionKind + t reflect.Type +} + +// NewNotRegisteredErr is exposed for testing. +func NewNotRegisteredErr(gvk schema.GroupVersionKind, t reflect.Type) error { + return ¬RegisteredErr{gvk: gvk, t: t} +} + +func (k *notRegisteredErr) Error() string { + if k.t != nil { + return fmt.Sprintf("no kind is registered for the type %v", k.t) + } + if len(k.gvk.Kind) == 0 { + return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion()) + } + if k.gvk.Version == APIVersionInternal { + return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group) + } + + return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion()) +} + +// IsNotRegisteredError returns true if the error indicates the provided +// object or input data is not registered. +func IsNotRegisteredError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*notRegisteredErr) + return ok +} + +type missingKindErr struct { + data string +} + +func NewMissingKindErr(data string) error { + return &missingKindErr{data} +} + +func (k *missingKindErr) Error() string { + return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) +} + +// IsMissingKind returns true if the error indicates that the provided object +// is missing a 'Kind' field. +func IsMissingKind(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingKindErr) + return ok +} + +type missingVersionErr struct { + data string +} + +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. +func NewMissingVersionErr(data string) error { + return &missingVersionErr{data} +} + +func (k *missingVersionErr) Error() string { + return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) +} + +func IsMissingVersion(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingVersionErr) + return ok +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/runtime/extension.go rename to vendor/k8s.io/apimachinery/pkg/runtime/extension.go index eca82986e..4d23ee9ee 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/extension.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go new file mode 100644 index 000000000..9947bd8e1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -0,0 +1,767 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + + It has these top-level messages: + RawExtension + TypeMeta + Unknown +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") +} +func (m *RawExtension) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RawExtension) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Raw != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) + i += copy(data[i:], m.Raw) + } + return i, nil +} + +func (m *TypeMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TypeMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + return i, nil +} + +func (m *Unknown) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Unknown) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.Raw != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) + i += copy(data[i:], m.Raw) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *RawExtension) Size() (n int) { + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeMeta) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Unknown) Size() (n int) { + var l int + _ = l + l = m.TypeMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ContentEncoding) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RawExtension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawExtension{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Unknown) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Unknown{`, + `TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`, + `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RawExtension) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unknown) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unknown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentEncoding = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x90, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x93, 0x6d, 0xa1, 0xeb, 0xb4, 0xb0, 0x32, 0x1e, 0x8c, 0x7b, 0x98, 0x2c, 0x3d, 0xd9, + 0x83, 0x33, 0xb0, 0x22, 0x78, 0xdd, 0x94, 0x82, 0x22, 0x82, 0x0c, 0xfe, 0x01, 0x4f, 0x4e, 0x93, + 0x31, 0x1d, 0x62, 0xdf, 0x09, 0x93, 0x89, 0xb1, 0x37, 0x3f, 0x82, 0x1f, 0xab, 0xc7, 0x1e, 0x3d, + 0x15, 0x1b, 0x3f, 0x84, 0x57, 0xe9, 0x74, 0x5a, 0x6b, 0x45, 0xf6, 0x96, 0x79, 0x9f, 0xe7, 0xf7, + 0xbc, 0xcf, 0x1b, 0xf4, 0xac, 0x78, 0x5a, 0x51, 0xa5, 0x59, 0x51, 0x4f, 0xa5, 0x01, 0x69, 0x65, + 0xc5, 0x3e, 0x4b, 0xc8, 0xb4, 0x61, 0x5e, 0x10, 0xa5, 0x9a, 0x8b, 0x74, 0xa6, 0x40, 0x9a, 0x05, + 0x2b, 0x8b, 0x9c, 0x99, 0x1a, 0xac, 0x9a, 0x4b, 0x96, 0x4b, 0x90, 0x46, 0x58, 0x99, 0xd1, 0xd2, + 0x68, 0xab, 0x71, 0xbc, 0x03, 0xe8, 0x31, 0x40, 0xcb, 0x22, 0xa7, 0x1e, 0xb8, 0x7c, 0x94, 0x2b, + 0x3b, 0xab, 0xa7, 0x34, 0xd5, 0x73, 0x96, 0xeb, 0x5c, 0x33, 0xc7, 0x4d, 0xeb, 0x8f, 0xee, 0xe5, + 0x1e, 0xee, 0x6b, 0x97, 0x77, 0xf9, 0xf8, 0x7f, 0x05, 0x6a, 0xab, 0x3e, 0x31, 0x05, 0xb6, 0xb2, + 0xe6, 0xb4, 0xc4, 0x70, 0x84, 0x06, 0x5c, 0x34, 0x93, 0x2f, 0x56, 0x42, 0xa5, 0x34, 0xe0, 0x07, + 0xa8, 0x63, 0x44, 0x13, 0x85, 0x57, 0xe1, 0xc3, 0x41, 0xd2, 0x6b, 0xd7, 0x71, 0x87, 0x8b, 0x86, + 0x6f, 0x67, 0xc3, 0x0f, 0xe8, 0xfc, 0xf5, 0xa2, 0x94, 0x2f, 0xa5, 0x15, 0xf8, 0x1a, 0x21, 0x51, + 0xaa, 0xb7, 0xd2, 0x6c, 0x21, 0xe7, 0xbe, 0x93, 0xe0, 0xe5, 0x3a, 0x0e, 0xda, 0x75, 0x8c, 0x6e, + 0x5e, 0x3d, 0xf7, 0x0a, 0x3f, 0x72, 0xe1, 0x2b, 0xd4, 0x2d, 0x14, 0x64, 0xd1, 0x99, 0x73, 0x0f, + 0xbc, 0xbb, 0xfb, 0x42, 0x41, 0xc6, 0x9d, 0x32, 0xfc, 0x15, 0xa2, 0xde, 0x1b, 0x28, 0x40, 0x37, + 0x80, 0xdf, 0xa1, 0x73, 0xeb, 0xb7, 0xb9, 0xfc, 0xfe, 0xf5, 0x88, 0xde, 0xf2, 0xc3, 0xe8, 0xbe, + 0x5e, 0x72, 0xd7, 0x87, 0x1f, 0x0a, 0xf3, 0x43, 0xd8, 0xfe, 0xc2, 0xb3, 0x7f, 0x2f, 0xc4, 0x37, + 0xe8, 0x22, 0xd5, 0x60, 0x25, 0xd8, 0x09, 0xa4, 0x3a, 0x53, 0x90, 0x47, 0x1d, 0x57, 0xf6, 0xbe, + 0xcf, 0xbb, 0x18, 0xff, 0x2d, 0xf3, 0x53, 0x3f, 0x7e, 0x82, 0xfa, 0x7e, 0xb4, 0x5d, 0x1d, 0x75, + 0x1d, 0x7e, 0xcf, 0xe3, 0xfd, 0xf1, 0x1f, 0x89, 0x1f, 0xfb, 0x92, 0xd1, 0x72, 0x43, 0x82, 0xd5, + 0x86, 0x04, 0xdf, 0x37, 0x24, 0xf8, 0xda, 0x92, 0x70, 0xd9, 0x92, 0x70, 0xd5, 0x92, 0xf0, 0x47, + 0x4b, 0xc2, 0x6f, 0x3f, 0x49, 0xf0, 0xbe, 0xe7, 0x8f, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x5d, + 0x24, 0xc6, 0x1a, 0x81, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto new file mode 100644 index 000000000..57fc84078 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "runtime"; + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message RawExtension { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + optional bytes raw = 1; +} + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message TypeMeta { + // +optional + optional string apiVersion = 1; + + // +optional + optional string kind = 2; +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message Unknown { + optional TypeMeta typeMeta = 1; + + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + optional bytes raw = 2; + + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + optional string contentEncoding = 3; + + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + optional string contentType = 4; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go new file mode 100644 index 000000000..a6c1a8d34 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -0,0 +1,212 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" +) + +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme +} + +var _ ObjectConvertor = unsafeObjectConvertor{} + +// ConvertToVersion converts in to the provided outVersion without copying the input first, which +// is only safe if the output object is not mutated or reused. +func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +// UnsafeObjectConvertor performs object conversion without copying the object structure, +// for use when the converted object will not be reused or mutated. Primarily for use within +// versioned codecs, which use the external object for serialization but do not return it. +func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { + return unsafeObjectConvertor{scheme} +} + +// SetField puts the value of src, into fieldName, which must be a member of v. +// The value of src must be assignable to the field. +func SetField(src interface{}, v reflect.Value, fieldName string) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(field.Type()) { + field.Set(srcValue) + return nil + } + if srcValue.Type().ConvertibleTo(field.Type()) { + field.Set(srcValue.Convert(field.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) +} + +// Field puts the value of fieldName, which must be a member of v, into dest, +// which must be a variable to which this field's value can be assigned. +func Field(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + destValue, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + if field.Type().AssignableTo(destValue.Type()) { + destValue.Set(field) + return nil + } + if field.Type().ConvertibleTo(destValue.Type()) { + destValue.Set(field.Convert(destValue.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) +} + +// fieldPtr puts the address of fieldName, which must be a member of v, +// into dest, which must be an address of a variable to which this field's +// address can be assigned. +func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) + } + v, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + field = field.Addr() + if field.Type().AssignableTo(v.Type()) { + v.Set(field) + return nil + } + if field.Type().ConvertibleTo(v.Type()) { + v.Set(field.Convert(v.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) +} + +// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. +// TODO: accept a content type. +func EncodeList(e Encoder, objects []Object) error { + var errs []error + for i := range objects { + data, err := Encode(e, objects[i]) + if err != nil { + errs = append(errs, err) + continue + } + // TODO: Set ContentEncoding and ContentType. + objects[i] = &Unknown{Raw: data} + } + return errors.NewAggregate(errs) +} + +func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { + for _, decoder := range decoders { + // TODO: Decode based on ContentType. + obj, err := Decode(decoder, obj.Raw) + if err != nil { + if IsNotRegisteredError(err) { + continue + } + return nil, err + } + return obj, nil + } + // could not decode, so leave the object as Unknown, but give the decoders the + // chance to set Unknown.TypeMeta if it is available. + for _, decoder := range decoders { + if err := DecodeInto(decoder, obj.Raw, obj); err == nil { + return obj, nil + } + } + return obj, nil +} + +// DecodeList alters the list in place, attempting to decode any objects found in +// the list that have the Unknown type. Any errors that occur are returned +// after the entire list is processed. Decoders are tried in order. +func DecodeList(objects []Object, decoders ...Decoder) []error { + errs := []error(nil) + for i, obj := range objects { + switch t := obj.(type) { + case *Unknown: + decoded, err := decodeListItem(t, decoders) + if err != nil { + errs = append(errs, err) + break + } + objects[i] = decoded + } + } + return errs +} + +// MultiObjectTyper returns the types of objects across multiple schemes in order. +type MultiObjectTyper []ObjectTyper + +var _ ObjectTyper = MultiObjectTyper{} + +func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + for _, t := range m { + gvks, unversionedType, err = t.ObjectKinds(obj) + if err == nil { + return + } + } + return +} + +func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + for _, t := range m { + if t.Recognizes(gvk) { + return true + } + } + return false +} + +// SetZeroValue would set the object of objPtr to zero value of its type. +func SetZeroValue(objPtr Object) error { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil +} + +// DefaultFramer is valid for any stream that can read objects serially without +// any separation in the stream. +var DefaultFramer = defaultFramer{} + +type defaultFramer struct{} + +func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } +func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go new file mode 100644 index 000000000..fcb18ba11 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -0,0 +1,251 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "io" + "net/url" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // APIVersionInternal may be used if you are registering a type that should not + // be considered stable or serialized - it is a convention only and has no + // special behavior in this package. + APIVersionInternal = "__internal" +) + +// GroupVersioner refines a set of possible conversion targets into a single option. +type GroupVersioner interface { + // KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no + // target is known. In general, if the return target is not in the input list, the caller is expected to invoke + // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. + // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. + KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) +} + +// Encoders write objects to a serialized form +type Encoder interface { + // Encode writes an object to a stream. Implementations may return errors if the versions are + // incompatible, or if no conversion is defined. + Encode(obj Object, w io.Writer) error +} + +// Decoders attempt to load an object from data. +type Decoder interface { + // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the + // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and + // version from the serialized data, or an error. If into is non-nil, it will be used as the target type + // and implementations may choose to use it rather than reallocating an object. However, the object is not + // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are + // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the + // type of the into may be used to guide conversion decisions. + Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) +} + +// Serializer is the core interface for transforming objects into a serialized format and back. +// Implementations may choose to perform conversion of the object, but no assumptions should be made. +type Serializer interface { + Encoder + Decoder +} + +// Codec is a Serializer that deals with the details of versioning objects. It offers the same +// interface as Serializer, so this is a marker to consumers that care about the version of the objects +// they receive. +type Codec Serializer + +// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and +// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing +// and the desired version must be specified. +type ParameterCodec interface { + // DecodeParameters takes the given url.Values in the specified group version and decodes them + // into the provided object, or returns an error. + DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error + // EncodeParameters encodes the provided object as query parameters or returns an error. + EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) +} + +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} + +// SerializerInfo contains information about a specific serialization format +type SerializerInfo struct { + // MediaType is the value that represents this serializer over the wire. + MediaType string + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the individual object serializer for this media type. + Serializer Serializer + // PrettySerializer, if set, can serialize this object in a form biased towards + // readability. + PrettySerializer Serializer + // StreamSerializer, if set, describes the streaming serialization format + // for this media type. + StreamSerializer *StreamSerializerInfo +} + +// StreamSerializerInfo contains information about a specific stream serialization format +type StreamSerializerInfo struct { + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the top level object serializer for this type when streaming + Serializer + // Framer is the factory for retrieving streams that separate objects on the wire + Framer +} + +// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers +// for multiple supported media types. This would commonly be accepted by a server component +// that performs HTTP content negotiation to accept multiple formats. +type NegotiatedSerializer interface { + // SupportedMediaTypes is the media types supported for reading and writing single objects. + SupportedMediaTypes() []SerializerInfo + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers +// that can read and write data at rest. This would commonly be used by client tools that must +// read files, or server side storage interfaces that persist restful objects. +type StorageSerializer interface { + // SupportedMediaTypes are the media types supported for reading and writing objects. + SupportedMediaTypes() []SerializerInfo + + // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats + // by introspecting the data at rest. + UniversalDeserializer() Decoder + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// NestedObjectEncoder is an optional interface that objects may implement to be given +// an opportunity to encode any nested Objects / RawExtensions during serialization. +type NestedObjectEncoder interface { + EncodeNestedObjects(e Encoder) error +} + +// NestedObjectDecoder is an optional interface that objects may implement to be given +// an opportunity to decode any nested Objects / RawExtensions during serialization. +type NestedObjectDecoder interface { + DecodeNestedObjects(d Decoder) error +} + +/////////////////////////////////////////////////////////////////////////////// +// Non-codec interfaces + +type ObjectDefaulter interface { + // Default takes an object (must be a pointer) and applies any default values. + // Defaulters may not error. + Default(in Object) +} + +type ObjectVersioner interface { + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) +} + +// ObjectConvertor converts an object to a different version. +type ObjectConvertor interface { + // Convert attempts to convert one object into another, or returns an error. This method does + // not guarantee the in object is not mutated. The context argument will be passed to + // all nested conversions. + Convert(in, out, context interface{}) error + // ConvertToVersion takes the provided object and converts it the provided version. This + // method does not guarantee that the in object is not mutated. This method is similar to + // Convert() but handles specific details of choosing the correct output version. + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + ConvertFieldLabel(version, kind, label, value string) (string, string, error) +} + +// ObjectTyper contains methods for extracting the APIVersion and Kind +// of objects. +type ObjectTyper interface { + // ObjectKinds returns the all possible group,version,kind of the provided object, true if + // the object is unversioned, or an error if the object is not recognized + // (IsNotRegisteredError will return true). + ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error) + // Recognizes returns true if the scheme is able to handle the provided version and kind, + // or more precisely that the provided version is a possible conversion or decoding + // target. + Recognizes(gvk schema.GroupVersionKind) bool +} + +// ObjectCreater contains methods for instantiating an object by kind and version. +type ObjectCreater interface { + New(kind schema.GroupVersionKind) (out Object, err error) +} + +// ObjectCopier duplicates an object. +type ObjectCopier interface { + // Copy returns an exact copy of the provided Object, or an error if the + // copy could not be completed. + Copy(Object) (Object, error) +} + +// ResourceVersioner provides methods for setting and retrieving +// the resource version from an API object. +type ResourceVersioner interface { + SetResourceVersion(obj Object, version string) error + ResourceVersion(obj Object) (string, error) +} + +// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. +type SelfLinker interface { + SetSelfLink(obj Object, selfLink string) error + SelfLink(obj Object) (string, error) + + // Knowing Name is sometimes necessary to use a SelfLinker. + Name(obj Object) (string, error) + // Knowing Namespace is sometimes necessary to use a SelfLinker + Namespace(obj Object) (string, error) +} + +// All API types registered with Scheme must support the Object interface. Since objects in a scheme are +// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows +// serializers to set the kind, version, and group the object is represented as. An Object may choose +// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. +type Object interface { + GetObjectKind() schema.ObjectKind +} + +// Unstructured objects store values as map[string]interface{}, with only values that can be serialized +// to JSON allowed. +type Unstructured interface { + // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected + // to bypass conversion. + IsUnstructuredObject() + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be + // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to + // and from JSON. + UnstructuredContent() map[string]interface{} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go new file mode 100644 index 000000000..2ec6db820 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *Unknown) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } + +// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind +// interface if no objects are provided, or the ObjectKind interface of the object in the +// highest array position. +func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { + last := obj.Last() + if last == nil { + return schema.EmptyObjectKind + } + return last.GetObjectKind() +} + +// First returns the leftmost object in the VersionedObjects array, which is usually the +// object as serialized on the wire. +func (obj *VersionedObjects) First() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[0] +} + +// Last is the rightmost object in the VersionedObjects array, which is the object after +// all transformations have been applied. This is the same object that would be returned +// by Decode in a normal invocation (without VersionedObjects in the into argument). +func (obj *VersionedObjects) Last() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[len(obj.Objects)-1] +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go new file mode 100644 index 000000000..dfe4c5f53 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// DO NOT EDIT! + +/* + Package schema is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto + + It has these top-level messages: +*/ +package schema + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +var fileDescriptorGenerated = []byte{ + // 199 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0xce, 0x2f, 0x4e, 0x05, 0x31, + 0x10, 0xc7, 0xf1, 0xd6, 0x20, 0x90, 0xc8, 0x27, 0x46, 0x12, 0x0c, 0x1d, 0x81, 0x41, 0x73, 0x01, + 0x3c, 0xae, 0xbb, 0x6f, 0xe8, 0x36, 0xa5, 0x7f, 0xd2, 0x4e, 0x49, 0x70, 0x1c, 0x81, 0x63, 0xad, + 0x5c, 0x89, 0x64, 0xcb, 0x45, 0x48, 0xda, 0x15, 0x84, 0x04, 0xd7, 0x5f, 0x9a, 0xcf, 0xe4, 0x7b, + 0xf9, 0xe8, 0xee, 0x8b, 0xb2, 0x11, 0x5d, 0x9d, 0x28, 0x07, 0x62, 0x2a, 0xf8, 0x4a, 0xe1, 0x1c, + 0x33, 0x1e, 0x1f, 0x3a, 0x59, 0xaf, 0xe7, 0xc5, 0x06, 0xca, 0x6f, 0x98, 0x9c, 0xc1, 0x5c, 0x03, + 0x5b, 0x4f, 0x58, 0xe6, 0x85, 0xbc, 0x46, 0x43, 0x81, 0xb2, 0x66, 0x3a, 0xab, 0x94, 0x23, 0xc7, + 0xab, 0xeb, 0xe1, 0xd4, 0x6f, 0xa7, 0x92, 0x33, 0xea, 0x70, 0x6a, 0xb8, 0xd3, 0xad, 0xb1, 0xbc, + 0xd4, 0x49, 0xcd, 0xd1, 0xa3, 0x89, 0x26, 0x62, 0xe7, 0x53, 0x7d, 0xee, 0xab, 0x8f, 0xfe, 0x1a, + 0x67, 0x4f, 0x77, 0xff, 0xe5, 0x54, 0xb6, 0x2f, 0x68, 0x03, 0x17, 0xce, 0x7f, 0x5b, 0x1e, 0x6e, + 0xd6, 0x1d, 0xc4, 0xb6, 0x83, 0xf8, 0xdc, 0x41, 0xbc, 0x37, 0x90, 0x6b, 0x03, 0xb9, 0x35, 0x90, + 0x5f, 0x0d, 0xe4, 0xc7, 0x37, 0x88, 0xa7, 0x8b, 0x51, 0xf3, 0x13, 0x00, 0x00, 0xff, 0xff, 0xd9, + 0x82, 0x09, 0xbe, 0x07, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto new file mode 100644 index 000000000..ebc1a263d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime.schema; + +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "schema"; + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go new file mode 100644 index 000000000..1a9bba106 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -0,0 +1,277 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + "strings" +) + +// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` +func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { + var gvr *GroupVersionResource + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} + } + + return gvr, ParseGroupResource(arg) +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupResource struct { + Group string + Resource string +} + +func (gr GroupResource) WithVersion(version string) GroupVersionResource { + return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} +} + +func (gr GroupResource) Empty() bool { + return len(gr.Group) == 0 && len(gr.Resource) == 0 +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed +// for each field. +func ParseGroupResource(gr string) GroupResource { + if i := strings.Index(gr, "."); i == -1 { + return GroupResource{Resource: gr} + } else { + return GroupResource{Group: gr[i+1:], Resource: gr[:i]} + } +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionResource struct { + Group string + Version string + Resource string +} + +func (gvr GroupVersionResource) Empty() bool { + return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 +} + +func (gvr GroupVersionResource) GroupResource() GroupResource { + return GroupResource{Group: gvr.Group, Resource: gvr.Resource} +} + +func (gvr GroupVersionResource) GroupVersion() GroupVersion { + return GroupVersion{Group: gvr.Group, Version: gvr.Version} +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupKind struct { + Group string + Kind string +} + +func (gk GroupKind) Empty() bool { + return len(gk.Group) == 0 && len(gk.Kind) == 0 +} + +func (gk GroupKind) WithVersion(version string) GroupVersionKind { + return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionKind struct { + Group string + Version string + Kind string +} + +// Empty returns true if group, version, and kind are empty +func (gvk GroupVersionKind) Empty() bool { + return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 +} + +func (gvk GroupVersionKind) GroupKind() GroupKind { + return GroupKind{Group: gvk.Group, Kind: gvk.Kind} +} + +func (gvk GroupVersionKind) GroupVersion() GroupVersion { + return GroupVersion{Group: gvk.Group, Version: gvk.Version} +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion struct { + Group string + Version string +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. It prefers a match to group and version over just group. +// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { + for _, gvk := range kinds { + if gvk.Group == gv.Group && gvk.Version == gv.Version { + return gvk, true + } + } + for _, gvk := range kinds { + if gvk.Group == gv.Group { + return gv.WithKind(gvk.Kind), true + } + } + return GroupVersionKind{}, false +} + +// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ParseGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{"", gv}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{gv[:i], gv[i+1:]}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. +func (gv GroupVersion) WithKind(kind string) GroupVersionKind { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} +} + +// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. +func (gv GroupVersion) WithResource(resource string) GroupVersionResource { + return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} +} + +// GroupVersions can be used to represent a set of desired group versions. +// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +type GroupVersions []GroupVersion + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. +func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { + var targets []GroupVersionKind + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + targets = append(targets, target) + } + if len(targets) == 1 { + return targets[0], true + } + if len(targets) > 1 { + return bestMatch(kinds, targets), true + } + return GroupVersionKind{}, false +} + +// bestMatch tries to pick best matching GroupVersionKind and falls back to the first +// found if no exact match exists. +func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { + for _, gvk := range targets { + for _, k := range kinds { + if k == gvk { + return k + } + } + } + return targets[0] +} + +// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that +// do not use TypeMeta. +func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk == nil { + return "", "" + } + return gvk.GroupVersion().String(), gvk.Kind +} + +// FromAPIVersionAndKind returns a GVK representing the provided fields for types that +// do not use TypeMeta. This method exists to support test types and legacy serializations +// that have a distinct group and kind. +// TODO: further reduce usage of this method. +func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { + if gv, err := ParseGroupVersion(apiVersion); err == nil { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} + } + return GroupVersionKind{Kind: kind} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go new file mode 100644 index 000000000..b57066845 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// All objects that are serialized from a Scheme encode their type information. This interface is used +// by serialization to set type information from the Scheme onto the serialized version of an object. +// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. +type ObjectKind interface { + // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil + // should clear the current setting. + SetGroupVersionKind(kind GroupVersionKind) + // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does + // not expose or provide these fields. + GroupVersionKind() GroupVersionKind +} + +// EmptyObjectKind implements the ObjectKind interface as a noop +var EmptyObjectKind = emptyObjectKind{} + +type emptyObjectKind struct{} + +// SetGroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} + +// GroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go new file mode 100644 index 000000000..fbec6ad9b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -0,0 +1,601 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "net/url" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Scheme defines methods for serializing and deserializing API objects, a type +// registry for converting group, version, and kind information to and from Go +// schemas, and mappings between Go schemas of different versions. A scheme is the +// foundation for a versioned API and versioned configuration over time. +// +// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time +// identifier for a particular representation of that Type (typically backwards +// compatible), a Kind is the unique name for that Type within the Version, and a +// Group identifies a set of Versions, Kinds, and Types that evolve over time. An +// Unversioned Type is one that is not yet formally bound to a type and is promised +// to be backwards compatible (effectively a "v1" of a Type that does not expect +// to break in the future). +// +// Schemes are not expected to change at runtime and are only threadsafe after +// registration is complete. +type Scheme struct { + // versionMap allows one to figure out the go type of an object with + // the given version and name. + gvkToType map[schema.GroupVersionKind]reflect.Type + + // typeToGroupVersion allows one to find metadata for a given go object. + // The reflect.Type we index by should *not* be a pointer. + typeToGVK map[reflect.Type][]schema.GroupVersionKind + + // unversionedTypes are transformed without conversion in ConvertToVersion. + unversionedTypes map[reflect.Type]schema.GroupVersionKind + + // unversionedKinds are the names of kinds that can be created in the context of any group + // or version + // TODO: resolve the status of unversioned types. + unversionedKinds map[string]reflect.Type + + // Map from version and resource to the corresponding func to convert + // resource field labels in that version to internal version. + fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc + + // defaulterFuncs is an array of interfaces to be called with an object to provide defaulting + // the provided object must be a pointer. + defaulterFuncs map[reflect.Type]func(interface{}) + + // converter stores all registered conversion functions. It also has + // default coverting behavior. + converter *conversion.Converter + + // cloner stores all registered copy functions. It also has default + // deep copy behavior. + cloner *conversion.Cloner +} + +// Function to convert a field selector to internal representation. +type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error) + +// NewScheme creates a new Scheme. This scheme is pluggable by default. +func NewScheme() *Scheme { + s := &Scheme{ + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, + cloner: conversion.NewCloner(), + fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, + defaulterFuncs: map[reflect.Type]func(interface{}){}, + } + s.converter = conversion.NewConverter(s.nameFunc) + + s.AddConversionFuncs(DefaultEmbeddedConversions()...) + + // Enable map[string][]string conversions by default + if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil { + panic(err) + } + if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil { + panic(err) + } + if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil { + panic(err) + } + return s +} + +// nameFunc returns the name of the type that we wish to use to determine when two types attempt +// a conversion. Defaults to the go name of the type if the type is not registered. +func (s *Scheme) nameFunc(t reflect.Type) string { + // find the preferred names for this type + gvks, ok := s.typeToGVK[t] + if !ok { + return t.Name() + } + + for _, gvk := range gvks { + internalGV := gvk.GroupVersion() + internalGV.Version = "__internal" // this is hacky and maybe should be passed in + internalGVK := internalGV.WithKind(gvk.Kind) + + if internalType, exists := s.gvkToType[internalGVK]; exists { + return s.typeToGVK[internalType][0].Kind + } + } + + return gvks[0].Kind +} + +// fromScope gets the input version, desired output version, and desired Scheme +// from a conversion.Scope. +func (s *Scheme) fromScope(scope conversion.Scope) *Scheme { + return s +} + +// Converter allows access to the converter for the scheme +func (s *Scheme) Converter() *conversion.Converter { + return s.converter +} + +// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules. +// Whenever an object of this type is serialized, it is serialized with the provided group version and is not +// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an +// API group and version that would never be updated. +// +// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into +// every version with particular schemas. Resolve this method at that point. +func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { + s.AddKnownTypes(version, types...) + for _, obj := range types { + t := reflect.TypeOf(obj).Elem() + gvk := version.WithKind(t.Name()) + s.unversionedTypes[t] = gvk + if _, ok := s.unversionedKinds[gvk.Kind]; ok { + panic(fmt.Sprintf("%v has already been registered as unversioned kind %q - kind name must be unique", reflect.TypeOf(t), gvk.Kind)) + } + s.unversionedKinds[gvk.Kind] = t + } +} + +// AddKnownTypes registers all types passed in 'types' as being members of version 'version'. +// All objects passed to types should be pointers to structs. The name that go reports for +// the struct becomes the "kind" field when encoding. Version may not be empty - use the +// APIVersionInternal constant if you have a type that does not have a formal version. +func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { + for _, obj := range types { + t := reflect.TypeOf(obj) + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj) + } +} + +// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should +// be encoded as. Useful for testing when you don't want to make multiple packages to define +// your structs. Version may not be empty - use the APIVersionInternal constant if you have a +// type that does not have a formal version. +func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { + t := reflect.TypeOf(obj) + if len(gvk.Version) == 0 { + panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) + } + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + if t.Kind() != reflect.Struct { + panic("All types must be pointers to structs.") + } + + if oldT, found := s.gvkToType[gvk]; found && oldT != t { + panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name())) + } + + s.gvkToType[gvk] = t + + for _, existingGvk := range s.typeToGVK[t] { + if existingGvk == gvk { + return + } + } + s.typeToGVK[t] = append(s.typeToGVK[t], gvk) +} + +// KnownTypes returns the types known for the given version. +func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type { + types := make(map[string]reflect.Type) + for gvk, t := range s.gvkToType { + if gv != gvk.GroupVersion() { + continue + } + + types[gvk.Kind] = t + } + return types +} + +// AllKnownTypes returns the all known types. +func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { + return s.gvkToType +} + +// ObjectKind returns the group,version,kind of the go object and true if this object +// is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { + gvks, unversionedType, err := s.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, false, err + } + return gvks[0], unversionedType, nil +} + +// ObjectKinds returns all possible group,version,kind of the go object, true if the +// object is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, false, err + } + t := v.Type() + + gvks, ok := s.typeToGVK[t] + if !ok { + return nil, false, NewNotRegisteredErr(schema.GroupVersionKind{}, t) + } + _, unversionedType := s.unversionedTypes[t] + + return gvks, unversionedType, nil +} + +// Recognizes returns true if the scheme is able to handle the provided group,version,kind +// of an object. +func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool { + _, exists := s.gvkToType[gvk] + return exists +} + +func (s *Scheme) IsUnversioned(obj Object) (bool, bool) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return false, false + } + t := v.Type() + + if _, ok := s.typeToGVK[t]; !ok { + return false, false + } + _, ok := s.unversionedTypes[t] + return ok, true +} + +// New returns a new API object of the given version and name, or an error if it hasn't +// been registered. The version and kind fields must be specified. +func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { + if t, exists := s.gvkToType[kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + + if t, exists := s.unversionedKinds[kind.Kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + return nil, NewNotRegisteredErr(kind, nil) +} + +// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern +// (for two conversion types) to the converter. These functions are checked first during +// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering +// typed conversions. +func (s *Scheme) AddGenericConversionFunc(fn conversion.GenericConversionFunc) { + s.converter.AddGenericConversionFunc(fn) +} + +// Log sets a logger on the scheme. For test purposes only +func (s *Scheme) Log(l conversion.DebugLogger) { + s.converter.Debug = l +} + +// AddIgnoredConversionType identifies a pair of types that should be skipped by +// conversion (because the data inside them is explicitly dropped during +// conversion). +func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { + return s.converter.RegisterIgnoredConversion(from, to) +} + +// AddConversionFuncs adds functions to the list of conversion functions. The given +// functions should know how to convert between two of your API objects, or their +// sub-objects. We deduce how to call these functions from the types of their two +// parameters; see the comment for Converter.Register. +// +// Note that, if you need to copy sub-objects that didn't change, you can use the +// conversion.Scope object that will be passed to your conversion function. +// Additionally, all conversions started by Scheme will set the SrcVersion and +// DestVersion fields on the Meta object. Example: +// +// s.AddConversionFuncs( +// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error { +// // You can depend on Meta() being non-nil, and this being set to +// // the source version, e.g., "" +// s.Meta().SrcVersion +// // You can depend on this being set to the destination version, +// // e.g., "v1". +// s.Meta().DestVersion +// // Call scope.Convert to copy sub-fields. +// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0) +// return nil +// }, +// ) +// +// (For more detail about conversion functions, see Converter.Register's comment.) +// +// Also note that the default behavior, if you don't add a conversion function, is to +// sanely copy fields that have the same names and same type names. It's OK if the +// destination type has extra fields, but it must not remove any. So you only need to +// add conversion functions for things with changed/removed fields. +func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { + for _, f := range conversionFuncs { + if err := s.converter.RegisterConversionFunc(f); err != nil { + return err + } + } + return nil +} + +// Similar to AddConversionFuncs, but registers conversion functions that were +// automatically generated. +func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { + for _, f := range conversionFuncs { + if err := s.converter.RegisterGeneratedConversionFunc(f); err != nil { + return err + } + } + return nil +} + +// AddDeepCopyFuncs adds a function to the list of deep-copy functions. +// For the expected format of deep-copy function, see the comment for +// Copier.RegisterDeepCopyFunction. +func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { + for _, f := range deepCopyFuncs { + if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { + return err + } + } + return nil +} + +// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were +// automatically generated. +func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...conversion.GeneratedDeepCopyFunc) error { + for _, fn := range deepCopyFuncs { + if err := s.cloner.RegisterGeneratedDeepCopyFunc(fn); err != nil { + return err + } + } + return nil +} + +// AddFieldLabelConversionFunc adds a conversion function to convert field selectors +// of the given kind from the given version to internal version representation. +func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { + if s.fieldLabelConversionFuncs[version] == nil { + s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{} + } + + s.fieldLabelConversionFuncs[version][kind] = conversionFunc + return nil +} + +// AddStructFieldConversion allows you to specify a mechanical copy for a moved +// or renamed struct field without writing an entire conversion function. See +// the comment in conversion.Converter.SetStructFieldCopy for parameter details. +// Call as many times as needed, even on the same fields. +func (s *Scheme) AddStructFieldConversion(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error { + return s.converter.SetStructFieldCopy(srcFieldType, srcFieldName, destFieldType, destFieldName) +} + +// RegisterInputDefaults sets the provided field mapping function and field matching +// as the defaults for the provided input type. The fn may be nil, in which case no +// mapping will happen by default. Use this method to register a mechanism for handling +// a specific input type in conversion, such as a map[string]string to structs. +func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { + return s.converter.RegisterInputDefaults(in, fn, defaultFlags) +} + +// AddDefaultingFuncs adds functions to the list of default-value functions. +// Each of the given functions is responsible for applying default values +// when converting an instance of a versioned API object into an internal +// API object. These functions do not need to handle sub-objects. We deduce +// how to call these functions from the types of their two parameters. +// +// s.AddDefaultingFuncs( +// func(obj *v1.Pod) { +// if obj.OptionalField == "" { +// obj.OptionalField = "DefaultValue" +// } +// }, +// ) +func (s *Scheme) AddDefaultingFuncs(defaultingFuncs ...interface{}) error { + for _, f := range defaultingFuncs { + err := s.converter.RegisterDefaultingFunc(f) + if err != nil { + return err + } + } + return nil +} + +// AddTypeDefaultingFuncs registers a function that is passed a pointer to an +// object and can default fields on the object. These functions will be invoked +// when Default() is called. The function will never be called unless the +// defaulted object matches srcType. If this function is invoked twice with the +// same srcType, the fn passed to the later call will be used instead. +func (s *Scheme) AddTypeDefaultingFunc(srcType Object, fn func(interface{})) { + s.defaulterFuncs[reflect.TypeOf(srcType)] = fn +} + +// Default sets defaults on the provided Object. +func (s *Scheme) Default(src Object) { + if fn, ok := s.defaulterFuncs[reflect.TypeOf(src)]; ok { + fn(src) + } +} + +// Copy does a deep copy of an API object. +func (s *Scheme) Copy(src Object) (Object, error) { + dst, err := s.DeepCopy(src) + if err != nil { + return nil, err + } + return dst.(Object), nil +} + +// Performs a deep copy of the given object. +func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) { + return s.cloner.DeepCopy(src) +} + +// Convert will attempt to convert in into out. Both must be pointers. For easy +// testing of conversion functions. Returns an error if the conversion isn't +// possible. You can call this with types that haven't been registered (for example, +// a to test conversion of types that are nested within registered types). The +// context interface is passed to the convertor. +// TODO: identify whether context should be hidden, or behind a formal context/scope +// interface +func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + flags, meta := s.generateConvertMeta(in) + meta.Context = context + if flags == 0 { + flags = conversion.AllowDifferentFieldTypeNames + } + return s.converter.Convert(in, out, flags, meta) +} + +// Converts the given field label and value for an kind field selector from +// versioned representation to an unversioned one. +func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + if s.fieldLabelConversionFuncs[version] == nil { + return "", "", fmt.Errorf("No field label conversion function found for version: %s", version) + } + conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind] + if !ok { + return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind) + } + return conversionFunc(label, value) +} + +// ConvertToVersion attempts to convert an input object to its matching Kind in another +// version within this scheme. Will return an error if the provided version does not +// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also +// return an error if the conversion does not result in a valid Object being +// returned. Passes target down to the conversion methods as the Context on the scope. +func (s *Scheme) ConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(true, in, target) +} + +// UnsafeConvertToVersion will convert in to the provided target if such a conversion is possible, +// but does not guarantee the output object does not share fields with the input object. It attempts to be as +// efficient as possible when doing conversion. +func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(false, in, target) +} + +// convertToVersion handles conversion with an optional copy. +func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { + // determine the incoming kinds with as few allocations as possible. + t := reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } + kinds, ok := s.typeToGVK[t] + if !ok || len(kinds) == 0 { + return nil, NewNotRegisteredErr(schema.GroupVersionKind{}, t) + } + + gvk, ok := target.KindForGroupVersionKinds(kinds) + if !ok { + // try to see if this type is listed as unversioned (for legacy support) + // TODO: when we move to server API versions, we should completely remove the unversioned concept + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, s, in, gvk) + } + return copyAndSetTargetKind(copy, s, in, unversionedKind) + } + + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is not suitable for converting to %q", t, target) + } + + // target wants to use the existing type, set kind and return (no conversion necessary) + for _, kind := range kinds { + if gvk == kind { + return copyAndSetTargetKind(copy, s, in, gvk) + } + } + + // type is unversioned, no conversion necessary + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, s, in, gvk) + } + return copyAndSetTargetKind(copy, s, in, unversionedKind) + } + + out, err := s.New(gvk) + if err != nil { + return nil, err + } + + if copy { + copied, err := s.Copy(in) + if err != nil { + return nil, err + } + in = copied + } + + flags, meta := s.generateConvertMeta(in) + meta.Context = target + if err := s.converter.Convert(in, out, flags, meta); err != nil { + return nil, err + } + + setTargetKind(out, gvk) + return out, nil +} + +// generateConvertMeta constructs the meta value we pass to Convert. +func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { + return s.converter.DefaultMeta(reflect.TypeOf(in)) +} + +// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. +func copyAndSetTargetKind(copy bool, copier ObjectCopier, obj Object, kind schema.GroupVersionKind) (Object, error) { + if copy { + copied, err := copier.Copy(obj) + if err != nil { + return nil, err + } + obj = copied + } + setTargetKind(obj, kind) + return obj, nil +} + +// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. +func setTargetKind(obj Object, kind schema.GroupVersionKind) { + if kind.Version == APIVersionInternal { + // internal is a special case + // TODO: look at removing the need to special case this + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + return + } + obj.GetObjectKind().SetGroupVersionKind(kind) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go new file mode 100644 index 000000000..944db4818 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// SchemeBuilder collects functions that add things to a scheme. It's to allow +// code to compile without explicitly referencing generated types. You should +// declare one in each package that will have generated deep copy or conversion +// functions. +type SchemeBuilder []func(*Scheme) error + +// AddToScheme applies all the stored functions to the scheme. A non-nil error +// indicates that one function failed and the attempt was abandoned. +func (sb *SchemeBuilder) AddToScheme(s *Scheme) error { + for _, f := range *sb { + if err := f(s); err != nil { + return err + } + } + return nil +} + +// Register adds a scheme setup function to the list. +func (sb *SchemeBuilder) Register(funcs ...func(*Scheme) error) { + for _, f := range funcs { + *sb = append(*sb, f) + } +} + +// NewSchemeBuilder calls Register for you. +func NewSchemeBuilder(funcs ...func(*Scheme) error) SchemeBuilder { + var sb SchemeBuilder + sb.Register(funcs...) + return sb +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go new file mode 100644 index 000000000..65f451124 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -0,0 +1,237 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +) + +// serializerExtensions are for serializers that are conditionally compiled in +var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} + +type serializerType struct { + AcceptContentTypes []string + ContentType string + FileExtensions []string + // EncodesAsText should be true if this content type can be represented safely in UTF-8 + EncodesAsText bool + + Serializer runtime.Serializer + PrettySerializer runtime.Serializer + + AcceptStreamContentTypes []string + StreamContentType string + + Framer runtime.Framer + StreamSerializer runtime.Serializer +} + +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { + jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) + jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) + yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) + + serializers := []serializerType{ + { + AcceptContentTypes: []string{"application/json"}, + ContentType: "application/json", + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + PrettySerializer: jsonPrettySerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + }, + { + AcceptContentTypes: []string{"application/yaml"}, + ContentType: "application/yaml", + FileExtensions: []string{"yaml"}, + EncodesAsText: true, + Serializer: yamlSerializer, + }, + } + + for _, fn := range serializerExtensions { + if serializer, ok := fn(scheme); ok { + serializers = append(serializers, serializer) + } + } + return serializers +} + +// CodecFactory provides methods for retrieving codecs and serializers for specific +// versions and content types. +type CodecFactory struct { + scheme *runtime.Scheme + serializers []serializerType + universal runtime.Decoder + accepts []runtime.SerializerInfo + + legacySerializer runtime.Serializer +} + +// NewCodecFactory provides methods for retrieving serializers for the supported wire formats +// and conversion wrappers to define preferred internal and external versions. In the future, +// as the internal version is used less, callers may instead use a defaulting serializer and +// only convert objects which are shared internally (Status, common API machinery). +// TODO: allow other codecs to be compiled in? +// TODO: accept a scheme interface +func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) + return newCodecFactory(scheme, serializers) +} + +// newCodecFactory is a helper for testing that allows a different metafactory to be specified. +func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { + decoders := make([]runtime.Decoder, 0, len(serializers)) + var accepts []runtime.SerializerInfo + alreadyAccepted := make(map[string]struct{}) + + var legacySerializer runtime.Serializer + for _, d := range serializers { + decoders = append(decoders, d.Serializer) + for _, mediaType := range d.AcceptContentTypes { + if _, ok := alreadyAccepted[mediaType]; ok { + continue + } + alreadyAccepted[mediaType] = struct{}{} + info := runtime.SerializerInfo{ + MediaType: d.ContentType, + EncodesAsText: d.EncodesAsText, + Serializer: d.Serializer, + PrettySerializer: d.PrettySerializer, + } + if d.StreamSerializer != nil { + info.StreamSerializer = &runtime.StreamSerializerInfo{ + Serializer: d.StreamSerializer, + EncodesAsText: d.EncodesAsText, + Framer: d.Framer, + } + } + accepts = append(accepts, info) + if mediaType == runtime.ContentTypeJSON { + legacySerializer = d.Serializer + } + } + } + if legacySerializer == nil { + legacySerializer = serializers[0].Serializer + } + + return CodecFactory{ + scheme: scheme, + serializers: serializers, + universal: recognizer.NewDecoder(decoders...), + + accepts: accepts, + + legacySerializer: legacySerializer, + } +} + +// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. +func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { + return f.accepts +} + +// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from +// any recognized source. The returned codec will always encode output to JSON. If a type is not +// found in the list of versions an error will be returned. +// +// This method is deprecated - clients and servers should negotiate a serializer by mime-type and +// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). +// +// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. +// All other callers will be forced to request a Codec directly. +func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { + return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) +} + +// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies +// runtime.Object. It does not perform conversion. It does not perform defaulting. +func (f CodecFactory) UniversalDeserializer() runtime.Decoder { + return f.universal +} + +// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used +// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes +// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate +// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified, +// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs +// defaulting. +// +// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form +// TODO: only accept a group versioner +func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder { + var versioner runtime.GroupVersioner + if len(versions) == 0 { + versioner = runtime.InternalGroupVersioner + } else { + versioner = schema.GroupVersions(versions) + } + return f.CodecForVersions(nil, f.universal, nil, versioner) +} + +// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list, +// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not +// converted. If encode or decode are nil, no conversion is performed. +func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec { + // TODO: these are for backcompat, remove them in the future + if encode == nil { + encode = runtime.DisabledGroupVersioner + } + if decode == nil { + decode = runtime.InternalGroupVersioner + } + return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode) +} + +// DecoderToVersion returns a decoder that targets the provided group version. +func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return f.CodecForVersions(nil, decoder, nil, gv) +} + +// EncoderForVersion returns an encoder that targets the provided group version. +func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return f.CodecForVersions(encoder, nil, gv, nil) +} + +// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. +type DirectCodecFactory struct { + CodecFactory +} + +// EncoderForVersion returns an encoder that does not do conversion. +func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return versioning.DirectEncoder{ + Version: version, + Encoder: serializer, + ObjectTyper: f.CodecFactory.scheme, + } +} + +// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. +func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return versioning.DirectDecoder{ + Decoder: serializer, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go new file mode 100644 index 000000000..28bb91b53 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -0,0 +1,245 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "io" + + "github.com/ghodss/yaml" + "github.com/ugorji/go/codec" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer +// is not nil, the object has the group, version, and kind fields set. +func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: false, + pretty: pretty, + } +} + +// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that +// matches JSON, and will error if constructs are used that do not serialize to JSON. +func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: true, + } +} + +type Serializer struct { + meta MetaFactory + creater runtime.ObjectCreater + typer runtime.ObjectTyper + yaml bool + pretty bool +} + +// Serializer implements Serializer +var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} + +// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then +// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be +// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using +// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of +// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + versioned.Objects = []runtime.Object{obj} + return versioned, actual, nil + } + + data := originalData + if s.yaml { + altered, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, nil, err + } + data = altered + } + + actual, err := s.meta.Interpret(data) + if err != nil { + return nil, nil, err + } + + if gvk != nil { + // apply kind and version defaulting from provided default + if len(actual.Kind) == 0 { + actual.Kind = gvk.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = gvk.Group + actual.Version = gvk.Version + } + if len(actual.Version) == 0 && actual.Group == gvk.Group { + actual.Version = gvk.Version + } + } + + if unk, ok := into.(*runtime.Unknown); ok && unk != nil { + unk.Raw = originalData + unk.ContentType = runtime.ContentTypeJSON + unk.GetObjectKind().SetGroupVersionKind(*actual) + return unk, actual, nil + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + typed := types[0] + if len(actual.Kind) == 0 { + actual.Kind = typed.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = typed.Group + actual.Version = typed.Version + } + if len(actual.Version) == 0 && actual.Group == typed.Group { + actual.Version = typed.Version + } + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(string(originalData)) + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr(string(originalData)) + } + + // use the target if necessary + obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) + if err != nil { + return nil, actual, err + } + + if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { + return nil, actual, err + } + return obj, actual, nil +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if s.yaml { + json, err := json.Marshal(obj) + if err != nil { + return err + } + data, err := yaml.JSONToYAML(json) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + if s.pretty { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + encoder := json.NewEncoder(w) + return encoder.Encode(obj) +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { + if s.yaml { + // we could potentially look for '---' + return false, true, nil + } + _, _, ok = utilyaml.GuessJSONStream(peek, 2048) + return ok, false, nil +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var Framer = jsonFramer{} + +type jsonFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { + // we can write JSON objects directly to the writer, because they are self-framing + return w +} + +// NewFrameReader implements stream framing for this serializer +func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // we need to extract the JSON chunks of data to pass to Decode() + return framer.NewJSONFramedReader(r) +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var YAMLFramer = yamlFramer{} + +type yamlFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { + return yamlFrameWriter{w} +} + +// NewFrameReader implements stream framing for this serializer +func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // extract the YAML document chunks directly + return utilyaml.NewDocumentDecoder(r) +} + +type yamlFrameWriter struct { + w io.Writer +} + +// Write separates each document with the YAML document separator (`---` followed by line +// break). Writers must write well formed YAML documents (include a final line break). +func (w yamlFrameWriter) Write(data []byte) (n int, err error) { + if _, err := w.w.Write([]byte("---\n")); err != nil { + return 0, err + } + return w.w.Write(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go new file mode 100644 index 000000000..df3f5f989 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MetaFactory is used to store and retrieve the version and kind +// information for JSON objects in a serializer. +type MetaFactory interface { + // Interpret should return the version and kind of the wire-format of + // the object. + Interpret(data []byte) (*schema.GroupVersionKind, error) +} + +// DefaultMetaFactory is a default factory for versioning objects in JSON. The object +// in memory and in the default JSON serialization will use the "kind" and "apiVersion" +// fields. +var DefaultMetaFactory = SimpleMetaFactory{} + +// SimpleMetaFactory provides default methods for retrieving the type and version of objects +// that are identified with an "apiVersion" and "kind" fields in their JSON +// serialization. It may be parameterized with the names of the fields in memory, or an +// optional list of base structs to search for those fields in memory. +type SimpleMetaFactory struct { +} + +// Interpret will return the APIVersion and Kind of the JSON wire-format +// encoding of an object, or an error. +func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + findKind := struct { + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // +optional + Kind string `json:"kind,omitempty"` + }{} + if err := json.Unmarshal(data, &findKind); err != nil { + return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err) + } + gv, err := schema.ParseGroupVersion(findKind.APIVersion) + if err != nil { + return nil, err + } + return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go new file mode 100644 index 000000000..a42b4a41a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// TODO: We should split negotiated serializers that we can change versions on from those we can change +// serialization formats on +type negotiatedSerializerWrapper struct { + info runtime.SerializerInfo +} + +func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer { + return &negotiatedSerializerWrapper{info} +} + +func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{n.info} +} + +func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder { + return e +} + +func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder { + return d +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go new file mode 100644 index 000000000..381748d69 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package protobuf provides a Kubernetes serializer for the protobuf format. +package protobuf diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index a202a18d6..8d4ea7118 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,9 +24,10 @@ import ( "github.com/gogo/protobuf/proto" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/framer" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" ) var ( @@ -35,7 +36,7 @@ var ( // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2). // - // See k8s.io/kubernetes/pkg/runtime/generated.proto for details of the runtime.Unknown message. + // See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message. // // This encoding scheme is experimental, and is subject to change at any time. protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00} @@ -76,6 +77,7 @@ type Serializer struct { } var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, @@ -83,7 +85,7 @@ var _ runtime.Serializer = &Serializer{} // be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. -func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { if versioned, ok := into.(*runtime.VersionedObjects); ok { into = versioned.Last() obj, actual, err := s.Decode(originalData, gvk, into) @@ -124,7 +126,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKi if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { *intoUnknown = unk - if len(intoUnknown.ContentType) == 0 { + if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { intoUnknown.ContentType = s.contentType } return intoUnknown, &actual, nil @@ -167,17 +169,30 @@ func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKi // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + prefixSize := uint64(len(s.prefix)) + var unk runtime.Unknown - kind := obj.GetObjectKind().GroupVersionKind() - unk = runtime.Unknown{ - TypeMeta: runtime.TypeMeta{ - Kind: kind.Kind, - APIVersion: kind.GroupVersion().String(), - }, + switch t := obj.(type) { + case *runtime.Unknown: + estimatedSize := prefixSize + uint64(t.Size()) + data := make([]byte, estimatedSize) + i, err := t.MarshalTo(data[prefixSize:]) + if err != nil { + return err + } + copy(data, s.prefix) + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + default: + kind := obj.GetObjectKind().GroupVersionKind() + unk = runtime.Unknown{ + TypeMeta: runtime.TypeMeta{ + Kind: kind.Kind, + APIVersion: kind.GroupVersion().String(), + }, + } } - prefixSize := uint64(len(s.prefix)) - switch t := obj.(type) { case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement @@ -224,23 +239,23 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { } // RecognizesData implements the RecognizingDecoder interface. -func (s *Serializer) RecognizesData(peek io.Reader) (bool, error) { +func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { prefix := make([]byte, 4) n, err := peek.Read(prefix) if err != nil { if err == io.EOF { - return false, nil + return false, false, nil } - return false, err + return false, false, err } if n != 4 { - return false, nil + return false, false, nil } - return bytes.Equal(s.prefix, prefix), nil + return bytes.Equal(s.prefix, prefix), false, nil } // copyKindDefaults defaults dst to the value in src if dst does not have a value set. -func copyKindDefaults(dst, src *unversioned.GroupVersionKind) { +func copyKindDefaults(dst, src *schema.GroupVersionKind) { if src == nil { return } @@ -301,7 +316,7 @@ var _ runtime.Serializer = &RawSerializer{} // be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. -func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { if into == nil { return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) } @@ -326,7 +341,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersio } data := originalData - actual := &unversioned.GroupVersionKind{} + actual := &schema.GroupVersionKind{} copyKindDefaults(actual, gvk) if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { @@ -371,7 +386,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *unversioned.GroupVersio } // unmarshalToObject is the common code between decode in the raw and normal serializer. -func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *unversioned.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *unversioned.GroupVersionKind, error) { +func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) { // use the target if necessary obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into) if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go new file mode 100644 index 000000000..545cf78df --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" +) + +const ( + // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from + // depending on it unintentionally. + // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the + // CodecFactory on initialization. + contentTypeProtobuf = "application/vnd.kubernetes.protobuf" +) + +func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { + serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) + raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) + return serializerType{ + AcceptContentTypes: []string{contentTypeProtobuf}, + ContentType: contentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: serializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: raw, + }, true +} + +func init() { + serializerExtensions = append(serializerExtensions, protobufSerializer) +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go index 4b8b1e204..38497ab53 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/recognizer/recognizer.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ import ( "fmt" "io" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) type RecognizingDecoder interface { @@ -81,7 +81,7 @@ func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) { return false, anyUnknown, lastErr } -func (d *decoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { var ( lastErr error skipped []runtime.Decoder diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go rename to vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index c34f9a570..91fd4ed4f 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,8 +23,8 @@ import ( "fmt" "io" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // Encoder is a runtime.Encoder on a stream. @@ -37,7 +37,7 @@ type Encoder interface { // Decoder is a runtime.Decoder from a stream. type Decoder interface { // Decode will return io.EOF when no more objects are available. - Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) + Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) // Close closes the underlying stream. Close() error } @@ -71,7 +71,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size") // Decode reads the next object from the stream and decodes it. -func (d *decoder) Decode(defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { +func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { base := 0 for { n, err := d.reader.Read(d.buf[base:]) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go new file mode 100644 index 000000000..829d4fa89 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -0,0 +1,273 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioning + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// NewCodecForScheme is a convenience method for callers that are using a scheme. +func NewCodecForScheme( + // TODO: I should be a scheme interface? + scheme *runtime.Scheme, + encoder runtime.Encoder, + decoder runtime.Decoder, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, nil, encodeVersion, decodeVersion) +} + +// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. +func NewDefaultingCodecForScheme( + // TODO: I should be a scheme interface? + scheme *runtime.Scheme, + encoder runtime.Encoder, + decoder runtime.Decoder, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, scheme, encodeVersion, decodeVersion) +} + +// NewCodec takes objects in their internal versions and converts them to external versions before +// serializing them. It assumes the serializer provided to it only deals with external versions. +// This class is also a serializer, but is generally used with a specific version. +func NewCodec( + encoder runtime.Encoder, + decoder runtime.Decoder, + convertor runtime.ObjectConvertor, + creater runtime.ObjectCreater, + copier runtime.ObjectCopier, + typer runtime.ObjectTyper, + defaulter runtime.ObjectDefaulter, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + internal := &codec{ + encoder: encoder, + decoder: decoder, + convertor: convertor, + creater: creater, + copier: copier, + typer: typer, + defaulter: defaulter, + + encodeVersion: encodeVersion, + decodeVersion: decodeVersion, + } + return internal +} + +type codec struct { + encoder runtime.Encoder + decoder runtime.Decoder + convertor runtime.ObjectConvertor + creater runtime.ObjectCreater + copier runtime.ObjectCopier + typer runtime.ObjectTyper + defaulter runtime.ObjectDefaulter + + encodeVersion runtime.GroupVersioner + decodeVersion runtime.GroupVersioner +} + +// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is +// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an +// into that matches the serialized version. +func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + versioned, isVersioned := into.(*runtime.VersionedObjects) + if isVersioned { + into = versioned.Last() + } + + obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) + if err != nil { + return nil, gvk, err + } + + if d, ok := obj.(runtime.NestedObjectDecoder); ok { + if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { + return nil, gvk, err + } + } + + // if we specify a target, use generic conversion. + if into != nil { + if into == obj { + if isVersioned { + return versioned, gvk, nil + } + return into, gvk, nil + } + + // perform defaulting if requested + if c.defaulter != nil { + // create a copy to ensure defaulting is not applied to the original versioned objects + if isVersioned { + copied, err := c.copier.Copy(obj) + if err != nil { + utilruntime.HandleError(err) + copied = obj + } + versioned.Objects = []runtime.Object{copied} + } + c.defaulter.Default(obj) + } else { + if isVersioned { + versioned.Objects = []runtime.Object{obj} + } + } + + if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { + return nil, gvk, err + } + + if isVersioned { + versioned.Objects = append(versioned.Objects, into) + return versioned, gvk, nil + } + return into, gvk, nil + } + + // Convert if needed. + if isVersioned { + // create a copy, because ConvertToVersion does not guarantee non-mutation of objects + copied, err := c.copier.Copy(obj) + if err != nil { + utilruntime.HandleError(err) + copied = obj + } + versioned.Objects = []runtime.Object{copied} + } + + // perform defaulting if requested + if c.defaulter != nil { + c.defaulter.Default(obj) + } + + out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion) + if err != nil { + return nil, gvk, err + } + if isVersioned { + if versioned.Last() != out { + versioned.Objects = append(versioned.Objects, out) + } + return versioned, gvk, nil + } + return out, gvk, nil +} + +// Encode ensures the provided object is output in the appropriate group and version, invoking +// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. +func (c *codec) Encode(obj runtime.Object, w io.Writer) error { + switch obj.(type) { + case *runtime.Unknown, runtime.Unstructured: + return c.encoder.Encode(obj, w) + } + + gvks, isUnversioned, err := c.typer.ObjectKinds(obj) + if err != nil { + return err + } + + if c.encodeVersion == nil || isUnversioned { + if e, ok := obj.(runtime.NestedObjectEncoder); ok { + if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + return err + } + } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + objectKind.SetGroupVersionKind(gvks[0]) + err = c.encoder.Encode(obj, w) + objectKind.SetGroupVersionKind(old) + return err + } + + // Perform a conversion if necessary + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) + if err != nil { + return err + } + + if e, ok := out.(runtime.NestedObjectEncoder); ok { + if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + return err + } + } + + // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object + err = c.encoder.Encode(out, w) + // restore the old GVK, in case conversion returned the same object + objectKind.SetGroupVersionKind(old) + return err +} + +// DirectEncoder serializes an object and ensures the GVK is set. +type DirectEncoder struct { + Version runtime.GroupVersioner + runtime.Encoder + runtime.ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if runtime.IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// DirectDecoder clears the group version kind of a deserialized object. +type DirectDecoder struct { + runtime.Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go rename to vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go index 19b8378a4..29722d52e 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/swagger_doc_generator.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go new file mode 100644 index 000000000..f972c5e69 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// Note that the types provided in this file are not versioned and are intended to be +// safe to use from within all versions of every API object. + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type TypeMeta struct { + // +optional + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // +optional + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` +} + +const ( + ContentTypeJSON string = "application/json" +) + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type RawExtension struct { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + Raw []byte `protobuf:"bytes,1,opt,name=raw"` + // Object can hold a representation of this extension - useful for working with versioned + // structs. + Object Object `json:"-"` +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type Unknown struct { + TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + Raw []byte `protobuf:"bytes,2,opt,name=raw"` + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + ContentType string `protobuf:"bytes,4,opt,name=contentType"` +} + +// VersionedObjects is used by Decoders to give callers a way to access all versions +// of an object during the decoding process. +type VersionedObjects struct { + // Objects is the set of objects retrieved during decoding, in order of conversion. + // The 0 index is the object as serialized on the wire. If conversion has occurred, + // other objects may be present. The right most object is the same as would be returned + // by a normal Decode call. + Objects []Object +} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go rename to vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index 142dd05da..ead96ee05 100644 --- a/vendor/k8s.io/kubernetes/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go new file mode 100644 index 000000000..b75b3ebb2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package runtime + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + reflect "reflect" +) + +// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. +func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { + return []conversion.GeneratedDeepCopyFunc{ + {Fn: DeepCopy_runtime_RawExtension, InType: reflect.TypeOf(&RawExtension{})}, + {Fn: DeepCopy_runtime_TypeMeta, InType: reflect.TypeOf(&TypeMeta{})}, + {Fn: DeepCopy_runtime_Unknown, InType: reflect.TypeOf(&Unknown{})}, + } +} + +func DeepCopy_runtime_RawExtension(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RawExtension) + out := out.(*RawExtension) + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + // in.Object is kind 'Interface' + if in.Object != nil { + if newVal, err := c.DeepCopy(&in.Object); err != nil { + return err + } else { + out.Object = *newVal.(*Object) + } + } + return nil + } +} + +func DeepCopy_runtime_TypeMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TypeMeta) + out := out.(*TypeMeta) + *out = *in + return nil + } +} + +func DeepCopy_runtime_Unknown(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Unknown) + out := out.(*Unknown) + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go new file mode 100644 index 000000000..298f798c4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/selection/operator.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selection + +// Operator represents a key/field's relationship to value(s). +// See labels.Requirement and fields.Requirement for more details. +type Operator string + +const ( + DoesNotExist Operator = "!" + Equals Operator = "=" + DoubleEquals Operator = "==" + In Operator = "in" + NotEquals Operator = "!=" + NotIn Operator = "notin" + Exists Operator = "exists" + GreaterThan Operator = "gt" + LessThan Operator = "lt" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go new file mode 100644 index 000000000..783cbcdc8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types implements various generic types used throughout kubernetes. +package types diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go new file mode 100644 index 000000000..1e2130da0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -0,0 +1,60 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + "strings" +) + +// NamespacedName comprises a resource name, with a mandatory namespace, +// rendered as "<namespace>/<name>". Being a type captures intent and +// helps make sure that UIDs, namespaced names and non-namespaced names +// do not get conflated in code. For most use cases, namespace and name +// will already have been format validated at the API entry point, so we +// don't do that here. Where that's not the case (e.g. in testing), +// consider using NamespacedNameOrDie() in testing.go in this package. + +type NamespacedName struct { + Namespace string + Name string +} + +const ( + Separator = '/' +) + +// String returns the general purpose string representation +func (n NamespacedName) String() string { + return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) +} + +// NewNamespacedNameFromString parses the provided string and returns a NamespacedName. +// The expected format is as per String() above. +// If the input string is invalid, the returned NamespacedName has all empty string field values. +// This allows a single-value return from this function, while still allowing error checks in the caller. +// Note that an input string which does not include exactly one Separator is not a valid input (as it could never +// have neem returned by String() ) +func NewNamespacedNameFromString(s string) NamespacedName { + nn := NamespacedName{} + result := strings.Split(s, string(Separator)) + if len(result) == 2 { + nn.Namespace = result[0] + nn.Name = result[1] + } + return nn +} diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go new file mode 100644 index 000000000..fee348d7e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// NodeName is a type that holds a api.Node's Name identifier. +// Being a type captures intent and helps make sure that the node name +// is not confused with similar concepts (the hostname, the cloud provider id, +// the cloud provider name etc) +// +// To clarify the various types: +// +// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. +// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. +// +// * Hostname is the hostname of the local machine (from uname -n). +// However, some components allow the user to pass in a --hostname-override flag, +// which will override this in most places. In the absence of anything more meaningful, +// kubelet will use Hostname as the Node.Name when it creates the Node. +// +// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. +// +// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the +// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up +// to the cloudprovider how to do this mapping. +// +// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the +// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if +// we are using a custom DHCP domain it won't be. +type NodeName string diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go new file mode 100644 index 000000000..d522d1dbd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// Similarly to above, these are constants to support HTTP PATCH utilized by +// both the client and server that didn't make sense for a whole package to be +// dedicated to. +type PatchType string + +const ( + JSONPatchType PatchType = "application/json-patch+json" + MergePatchType PatchType = "application/merge-patch+json" + StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" +) diff --git a/vendor/k8s.io/kubernetes/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/types/uid.go rename to vendor/k8s.io/apimachinery/pkg/types/uid.go index de6cd18fb..869339222 100644 --- a/vendor/k8s.io/kubernetes/pkg/types/uid.go +++ b/vendor/k8s.io/apimachinery/pkg/types/uid.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go b/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go rename to vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go index b59792abf..dc770c11e 100644 --- a/vendor/k8s.io/kubernetes/pkg/types/unix_user_id.go +++ b/vendor/k8s.io/apimachinery/pkg/types/unix_user_id.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go new file mode 100644 index 000000000..0f730875e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -0,0 +1,280 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strings" + "text/tabwriter" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// StringDiff diffs a and b and returns a human readable diff. +func StringDiff(a, b string) string { + ba := []byte(a) + bb := []byte(b) + out := []byte{} + i := 0 + for ; i < len(ba) && i < len(bb); i++ { + if ba[i] != bb[i] { + break + } + out = append(out, ba[i]) + } + out = append(out, []byte("\n\nA: ")...) + out = append(out, ba[i:]...) + out = append(out, []byte("\n\nB: ")...) + out = append(out, bb[i:]...) + out = append(out, []byte("\n\n")...) + return string(out) +} + +// ObjectDiff writes the two objects out as JSON and prints out the identical part of +// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'. +// For debugging tests. +func ObjectDiff(a, b interface{}) string { + ab, err := json.Marshal(a) + if err != nil { + panic(fmt.Sprintf("a: %v", err)) + } + bb, err := json.Marshal(b) + if err != nil { + panic(fmt.Sprintf("b: %v", err)) + } + return StringDiff(string(ab), string(bb)) +} + +// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects, +// which shows absolutely everything by recursing into every single pointer +// (go's %#v formatters OTOH stop at a certain point). This is needed when you +// can't figure out why reflect.DeepEqual is returning false and nothing is +// showing you differences. This will. +func ObjectGoPrintDiff(a, b interface{}) string { + s := spew.ConfigState{DisableMethods: true} + return StringDiff( + s.Sprintf("%#v", a), + s.Sprintf("%#v", b), + ) +} + +func ObjectReflectDiff(a, b interface{}) string { + vA, vB := reflect.ValueOf(a), reflect.ValueOf(b) + if vA.Type() != vB.Type() { + return fmt.Sprintf("type A %T and type B %T do not match", a, b) + } + diffs := objectReflectDiff(field.NewPath("object"), vA, vB) + if len(diffs) == 0 { + return "<no diffs>" + } + out := []string{""} + for _, d := range diffs { + out = append(out, + fmt.Sprintf("%s:", d.path), + limit(fmt.Sprintf(" a: %#v", d.a), 80), + limit(fmt.Sprintf(" b: %#v", d.b), 80), + ) + } + return strings.Join(out, "\n") +} + +func limit(s string, max int) string { + if len(s) > max { + return s[:max] + } + return s +} + +func public(s string) bool { + if len(s) == 0 { + return false + } + return s[:1] == strings.ToUpper(s[:1]) +} + +type diff struct { + path *field.Path + a, b interface{} +} + +type orderedDiffs []diff + +func (d orderedDiffs) Len() int { return len(d) } +func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d orderedDiffs) Less(i, j int) bool { + a, b := d[i].path.String(), d[j].path.String() + if a < b { + return true + } + return false +} + +func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff { + switch a.Type().Kind() { + case reflect.Struct: + var changes []diff + for i := 0; i < a.Type().NumField(); i++ { + if !public(a.Type().Field(i).Name) { + if reflect.DeepEqual(a.Interface(), b.Interface()) { + continue + } + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 { + changes = append(changes, sub...) + } else { + if !reflect.DeepEqual(a.Field(i).Interface(), b.Field(i).Interface()) { + changes = append(changes, diff{path: path, a: a.Field(i).Interface(), b: b.Field(i).Interface()}) + } + } + } + return changes + case reflect.Ptr, reflect.Interface: + if a.IsNil() || b.IsNil() { + switch { + case a.IsNil() && b.IsNil(): + return nil + case a.IsNil(): + return []diff{{path: path, a: nil, b: b.Interface()}} + default: + return []diff{{path: path, a: a.Interface(), b: nil}} + } + } + return objectReflectDiff(path, a.Elem(), b.Elem()) + case reflect.Chan: + if !reflect.DeepEqual(a.Interface(), b.Interface()) { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + case reflect.Slice: + lA, lB := a.Len(), b.Len() + l := lA + if lB < lA { + l = lB + } + if lA == lB && lA == 0 { + if a.IsNil() != b.IsNil() { + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } + return nil + } + for i := 0; i < l; i++ { + if !reflect.DeepEqual(a.Index(i), b.Index(i)) { + return objectReflectDiff(path.Index(i), a.Index(i), b.Index(i)) + } + } + var diffs []diff + for i := l; i < lA; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil}) + } + for i := l; i < lB; i++ { + diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)}) + } + if len(diffs) == 0 { + diffs = append(diffs, diff{path: path, a: a, b: b}) + } + return diffs + case reflect.Map: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + aKeys := make(map[interface{}]interface{}) + for _, key := range a.MapKeys() { + aKeys[key.Interface()] = a.MapIndex(key).Interface() + } + var missing []diff + for _, key := range b.MapKeys() { + if _, ok := aKeys[key.Interface()]; ok { + delete(aKeys, key.Interface()) + if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) { + continue + } + missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...) + continue + } + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()}) + } + for key, value := range aKeys { + missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil}) + } + if len(missing) == 0 { + missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()}) + } + sort.Sort(orderedDiffs(missing)) + return missing + default: + if reflect.DeepEqual(a.Interface(), b.Interface()) { + return nil + } + if !a.CanInterface() { + return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}} + } + return []diff{{path: path, a: a.Interface(), b: b.Interface()}} + } +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + s := spew.ConfigState{ + Indent: " ", + // Extra deep spew. + DisableMethods: true, + } + sA := s.Sdump(a) + sB := s.Sdump(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go new file mode 100644 index 000000000..b3b39bc38 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors implements various utility functions and types around errors. +package errors diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go new file mode 100644 index 000000000..de62fe399 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -0,0 +1,182 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" +) + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +type Aggregate interface { + error + Errors() []error +} + +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func NewAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) + } + result += "]" + return result +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Matcher is used to match errors. Returns true if the error matches. +type Matcher func(error) bool + +// FilterOut removes all errors that match any of the matchers from the input +// error. If the input is a singular error, only that error is tested. If the +// input implements the Aggregate interface, the list of errors will be +// processed recursively. +// +// This can be used, for example, to remove known-OK errors (such as io.EOF or +// os.PathNotFound) from a list of errors. +func FilterOut(err error, fns ...Matcher) error { + if err == nil { + return nil + } + if agg, ok := err.(Aggregate); ok { + return NewAggregate(filterErrors(agg.Errors(), fns...)) + } + if !matchesError(err, fns...) { + return err + } + return nil +} + +// matchesError returns true if any Matcher returns true +func matchesError(err error, fns ...Matcher) bool { + for _, fn := range fns { + if fn(err) { + return true + } + } + return false +} + +// filterErrors returns any errors (or nested errors, if the list contains +// nested Errors) for which all fns return false. If no errors +// remain a nil list is returned. The resulting silec will have all +// nested slices flattened as a side effect. +func filterErrors(list []error, fns ...Matcher) []error { + result := []error{} + for _, err := range list { + r := FilterOut(err, fns...) + if r != nil { + result = append(result, r) + } + } + return result +} + +// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func Flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := Flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return NewAggregate(result) +} + +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// AggregateGoroutines runs the provided functions in parallel, stuffing all +// non-nil errors into the returned Aggregate. +// Returns nil if all the functions complete successfully. +func AggregateGoroutines(funcs ...func() error) Aggregate { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { errChan <- f() }(f) + } + errs := make([]error, 0) + for i := 0; i < cap(errChan); i++ { + if err := <-errChan; err != nil { + errs = append(errs, err) + } + } + return NewAggregate(errs) +} + +// ErrPreconditionViolated is returned when the precondition is violated +var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/util/framer/framer.go rename to vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 7ca806fa0..066680f44 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go new file mode 100644 index 000000000..7b9c554e0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -0,0 +1,374 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// DO NOT EDIT! + +/* + Package intstr is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto + + It has these top-level messages: + IntOrString +*/ +package intstr + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") +} +func (m *IntOrString) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IntOrString) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Type)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IntVal)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StrVal))) + i += copy(data[i:], m.StrVal) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *IntOrString) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Type |= (Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.IntVal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0xf4, 0x30, + 0x1c, 0xc6, 0x93, 0xf7, 0xee, 0x3d, 0xb4, 0x82, 0x43, 0x71, 0x38, 0x1c, 0xd2, 0xa2, 0x20, 0x5d, + 0x4c, 0x56, 0x71, 0xec, 0x76, 0x20, 0x08, 0x3d, 0x71, 0x70, 0x6b, 0xef, 0x62, 0x2e, 0xf4, 0x2e, + 0x09, 0xe9, 0xbf, 0x42, 0xb7, 0xfb, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x3a, 0xde, 0xe8, 0x20, 0x87, + 0xad, 0xdf, 0xc2, 0x49, 0x9a, 0x16, 0x74, 0x4a, 0x9e, 0xe7, 0xf9, 0xfd, 0x02, 0xf1, 0x6e, 0xf2, + 0xab, 0x82, 0x4a, 0xcd, 0xf2, 0x32, 0xe3, 0x56, 0x71, 0xe0, 0x05, 0x7b, 0xe2, 0x6a, 0xa9, 0x2d, + 0x1b, 0x86, 0xd4, 0xc8, 0x4d, 0xba, 0x58, 0x49, 0xc5, 0x6d, 0xc5, 0x4c, 0x2e, 0x58, 0x09, 0x72, + 0xcd, 0xa4, 0x82, 0x02, 0x2c, 0x13, 0x5c, 0x71, 0x9b, 0x02, 0x5f, 0x52, 0x63, 0x35, 0x68, 0xff, + 0xbc, 0x97, 0xe8, 0x5f, 0x89, 0x9a, 0x5c, 0xd0, 0x4e, 0xa2, 0xbd, 0x74, 0x7a, 0x29, 0x24, 0xac, + 0xca, 0x8c, 0x2e, 0xf4, 0x86, 0x09, 0x2d, 0x34, 0x73, 0x6e, 0x56, 0x3e, 0xba, 0xe4, 0x82, 0xbb, + 0xf5, 0x6f, 0x9e, 0xbd, 0x60, 0xef, 0x68, 0xa6, 0xe0, 0xd6, 0xce, 0xc1, 0x4a, 0x25, 0xfc, 0xc8, + 0x1b, 0x43, 0x65, 0xf8, 0x14, 0x87, 0x38, 0x1a, 0xc5, 0x27, 0xf5, 0x3e, 0x40, 0xed, 0x3e, 0x18, + 0xdf, 0x55, 0x86, 0x7f, 0x0f, 0x67, 0xe2, 0x08, 0xff, 0xc2, 0x9b, 0x48, 0x05, 0xf7, 0xe9, 0x7a, + 0xfa, 0x2f, 0xc4, 0xd1, 0xff, 0xf8, 0x78, 0x60, 0x27, 0x33, 0xd7, 0x26, 0xc3, 0xda, 0x71, 0x05, + 0xd8, 0x8e, 0x1b, 0x85, 0x38, 0x3a, 0xfc, 0xe5, 0xe6, 0xae, 0x4d, 0x86, 0xf5, 0xfa, 0xe0, 0xf5, + 0x2d, 0x40, 0xdb, 0x8f, 0x10, 0xc5, 0x51, 0xdd, 0x10, 0xb4, 0x6b, 0x08, 0x7a, 0x6f, 0x08, 0xda, + 0xb6, 0x04, 0xd7, 0x2d, 0xc1, 0xbb, 0x96, 0xe0, 0xcf, 0x96, 0xe0, 0xe7, 0x2f, 0x82, 0x1e, 0x26, + 0xfd, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x55, 0xdf, 0x2a, 0x60, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto new file mode 100644 index 000000000..cccaf6f68 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go rename to vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 53bc0fc7e..02586b348 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,9 +20,14 @@ import ( "encoding/json" "fmt" "math" + "runtime/debug" "strconv" "strings" + "k8s.io/apimachinery/pkg/openapi" + + "github.com/go-openapi/spec" + "github.com/golang/glog" "github.com/google/gofuzz" ) @@ -32,9 +37,9 @@ import ( // accept a name or number. // TODO: Rename to Int32OrString // -// +gencopy=true // +protobuf=true // +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true type IntOrString struct { Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` IntVal int32 `protobuf:"varint,2,opt,name=intVal"` @@ -54,6 +59,9 @@ const ( // than int32. // TODO: convert to (val int32) func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } return IntOrString{Type: Int, IntVal: int32(val)} } @@ -62,6 +70,16 @@ func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } +// Parse the given string and try to convert it to an integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.Atoi(val) + if err != nil { + return FromString(val) + } + return FromInt(i) +} + // UnmarshalJSON implements the json.Unmarshaller interface. func (intstr *IntOrString) UnmarshalJSON(value []byte) error { if value[0] == '"' { @@ -102,6 +120,17 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { } } +func (_ IntOrString) OpenAPIDefinition() openapi.OpenAPIDefinition { + return openapi.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "int-or-string", + }, + }, + } +} + func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { return @@ -144,5 +173,5 @@ func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { } return int(v), true, nil } - return 0, false, fmt.Errorf("invalid value: neither int nor percentage") + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go new file mode 100644 index 000000000..e8054a12e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "encoding/json" + "io" +) + +// NewEncoder delegates to json.NewEncoder +// It is only here so this package can be a drop-in for common encoding/json uses +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal delegates to json.Marshal +// It is only here so this package can be a drop-in for common encoding/json uses +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}) error { + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}) error { + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v) + case []interface{}: + err = convertSliceNumbers(v) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go new file mode 100644 index 000000000..c32082e93 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -0,0 +1,269 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + + "github.com/golang/glog" + "golang.org/x/net/http2" +) + +// IsProbableEOF returns true if the given error resembles a connection termination +// scenario that would justify assuming that the watch is empty. +// These errors are what the Go http stack returns back to us which are general +// connection closure errors (strongly correlated) and callers that need to +// differentiate probable errors in connection behavior between normal "this is +// disconnected" should use the method. +func IsProbableEOF(err error) bool { + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + switch { + case err == io.EOF: + return true + case err.Error() == "http: can't write HTTP request on broken connection": + return true + case strings.Contains(err.Error(), "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): + return true + } + return false +} + +var defaultTransport = http.DefaultTransport.(*http.Transport) + +// SetOldTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetOldTransportDefaults(t *http.Transport) *http.Transport { + if t.Proxy == nil || isDefault(t.Proxy) { + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + if t.Dial == nil { + t.Dial = defaultTransport.Dial + } + if t.TLSHandshakeTimeout == 0 { + t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout + } + return t +} + +// SetTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetTransportDefaults(t *http.Transport) *http.Transport { + t = SetOldTransportDefaults(t) + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { + glog.Infof("HTTP2 has been explicitly disabled") + } else { + if err := http2.ConfigureTransport(t); err != nil { + glog.Warningf("Transport failed http2 configuration: %v", err) + } + } + return t +} + +type RoundTripperWrapper interface { + http.RoundTripper + WrappedRoundTripper() http.RoundTripper +} + +type DialFunc func(net, addr string) (net.Conn, error) + +func Dialer(transport http.RoundTripper) (DialFunc, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.Dial, nil + case RoundTripperWrapper: + return Dialer(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %v", transport) + } +} + +// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied. +// This makes it safe to call CloneTLSConfig on a config in active use by a server. +// TODO: replace with tls.Config#Clone when we move to go1.8 +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} + +type TLSClientConfigHolder interface { + TLSClientConfig() *tls.Config +} + +func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.TLSClientConfig, nil + case TLSClientConfigHolder: + return transport.TLSClientConfig(), nil + case RoundTripperWrapper: + return TLSClientConfig(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %v", transport) + } +} + +func FormatURL(scheme string, host string, port int, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + Path: path, + } +} + +func GetHTTPClient(req *http.Request) string { + if userAgent, ok := req.Header["User-Agent"]; ok { + if len(userAgent) > 0 { + return userAgent[0] + } + } + return "unknown" +} + +// Extracts and returns the clients IP from the given request. +// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. +// Returns nil if none of them are set or is set to an invalid value. +func GetClientIP(req *http.Request) net.IP { + hdr := req.Header + // First check the X-Forwarded-For header for requests via proxy. + hdrForwardedFor := hdr.Get("X-Forwarded-For") + if hdrForwardedFor != "" { + // X-Forwarded-For can be a csv of IPs in case of multiple proxies. + // Use the first valid one. + parts := strings.Split(hdrForwardedFor, ",") + for _, part := range parts { + ip := net.ParseIP(strings.TrimSpace(part)) + if ip != nil { + return ip + } + } + } + + // Try the X-Real-Ip header. + hdrRealIp := hdr.Get("X-Real-Ip") + if hdrRealIp != "" { + ip := net.ParseIP(hdrRealIp) + if ip != nil { + return ip + } + } + + // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. + // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err == nil { + return net.ParseIP(host) + } + + // Fallback if Remote Address was just IP. + return net.ParseIP(req.RemoteAddr) +} + +var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) + +// isDefault checks to see if the transportProxierFunc is pointing to the default one +func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { + transportProxierPointer := fmt.Sprintf("%p", transportProxier) + return transportProxierPointer == defaultProxyFuncPointer +} + +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + host := req.URL.Host + // for some urls, the Host is already the host, not the host:port + if net.ParseIP(host) == nil { + var err error + host, _, err = net.SplitHostPort(req.URL.Host) + if err != nil { + return delegate(req) + } + } + + ip := net.ParseIP(host) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go new file mode 100644 index 000000000..a1e53d2e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -0,0 +1,278 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + + "strings" + + "github.com/golang/glog" +) + +type Route struct { + Interface string + Destination net.IP + Gateway net.IP + // TODO: add more fields here if needed +} + +func getRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + if input == nil { + return nil, fmt.Errorf("input is nil") + } + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + //ignore the headers in the route info + if strings.HasPrefix(line, "Iface") { + continue + } + fields := strings.Fields(line) + routes = append(routes, Route{}) + route := &routes[len(routes)-1] + route.Interface = fields[0] + ip, err := parseIP(fields[1]) + if err != nil { + return nil, err + } + route.Destination = ip + ip, err = parseIP(fields[2]) + if err != nil { + return nil, err + } + route.Gateway = ip + } + return routes, nil +} + +func parseIP(str string) (net.IP, error) { + if str == "" { + return nil, fmt.Errorf("input is nil") + } + bytes, err := hex.DecodeString(str) + if err != nil { + return nil, err + } + //TODO add ipv6 support + if len(bytes) != net.IPv4len { + return nil, fmt.Errorf("only IPv4 is supported") + } + bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0] + return net.IP(bytes), nil +} + +func isInterfaceUp(intf *net.Interface) bool { + if intf == nil { + return false + } + if intf.Flags&net.FlagUp != 0 { + glog.V(4).Infof("Interface %v is up", intf.Name) + return true + } + return false +} + +//getFinalIP method receives all the IP addrs of a Interface +//and returns a nil if the address is Loopback, Ipv6, link-local or nil. +//It returns a valid IPv4 if an Ipv4 address is found in the array. +func getFinalIP(addrs []net.Addr) (net.IP, error) { + if len(addrs) > 0 { + for i := range addrs { + glog.V(4).Infof("Checking addr %s.", addrs[i].String()) + ip, _, err := net.ParseCIDR(addrs[i].String()) + if err != nil { + return nil, err + } + //Only IPv4 + //TODO : add IPv6 support + if ip.To4() != nil { + if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + glog.V(4).Infof("IP found %v", ip) + return ip, nil + } else { + glog.V(4).Infof("Loopback/link-local found %v", ip) + } + } else { + glog.V(4).Infof("%v is not a valid IPv4 address", ip) + } + + } + } + return nil, nil +} + +func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { + intf, err := nw.InterfaceByName(intfName) + if err != nil { + return nil, err + } + if isInterfaceUp(intf) { + addrs, err := nw.Addrs(intf) + if err != nil { + return nil, err + } + glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + finalIP, err := getFinalIP(addrs) + if err != nil { + return nil, err + } + if finalIP != nil { + glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP) + return finalIP, nil + } + } + + return nil, nil +} + +func flagsSet(flags net.Flags, test net.Flags) bool { + return flags&test != 0 +} + +func flagsClear(flags net.Flags, test net.Flags) bool { + return flags&test == 0 +} + +func chooseHostInterfaceNativeGo() (net.IP, error) { + intfs, err := net.Interfaces() + if err != nil { + return nil, err + } + i := 0 + var ip net.IP + for i = range intfs { + if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) { + addrs, err := intfs[i].Addrs() + if err != nil { + return nil, err + } + if len(addrs) > 0 { + for _, addr := range addrs { + if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil { + if addrIP.To4() != nil { + ip = addrIP.To4() + if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { + break + } + } + } + } + if ip != nil { + // This interface should suffice. + break + } + } + } + } + if ip == nil { + return nil, fmt.Errorf("no acceptable interface from host") + } + glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) + return ip, nil +} + +//ChooseHostInterface is a method used fetch an IP for a daemon. +//It uses data from /proc/net/route file. +//For a node with no internet connection ,it returns error +//For a multi n/w interface node it returns the IP of the interface with gateway on it. +func ChooseHostInterface() (net.IP, error) { + inFile, err := os.Open("/proc/net/route") + if err != nil { + if os.IsNotExist(err) { + return chooseHostInterfaceNativeGo() + } + return nil, err + } + defer inFile.Close() + var nw networkInterfacer = networkInterface{} + return chooseHostInterfaceFromRoute(inFile, nw) +} + +type networkInterfacer interface { + InterfaceByName(intfName string) (*net.Interface, error) + Addrs(intf *net.Interface) ([]net.Addr, error) +} + +type networkInterface struct{} + +func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + intf, err := net.InterfaceByName(intfName) + if err != nil { + return nil, err + } + return intf, nil +} + +func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + return addrs, nil +} + +func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) { + routes, err := getRoutes(inFile) + if err != nil { + return nil, err + } + zero := net.IP{0, 0, 0, 0} + var finalIP net.IP + for i := range routes { + //find interface with gateway + if routes[i].Destination.Equal(zero) { + glog.V(4).Infof("Default route transits interface %q", routes[i].Interface) + finalIP, err := getIPFromInterface(routes[i].Interface, nw) + if err != nil { + return nil, err + } + if finalIP != nil { + glog.V(4).Infof("Choosing IP %v ", finalIP) + return finalIP, nil + } + } + } + glog.V(4).Infof("No valid IP found") + if finalIP == nil { + return nil, fmt.Errorf("Unable to select an IP.") + } + return nil, nil +} + +// If bind-address is usable, return it directly +// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default +// interface. +func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { + hostIP, err := ChooseHostInterface() + if err != nil { + return nil, err + } + bindAddress = hostIP + } + return bindAddress, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/util/net/port_range.go rename to vendor/k8s.io/apimachinery/pkg/util/net/port_range.go index 527552571..6a50e6186 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/net/port_range.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -71,12 +71,17 @@ func (pr *PortRange) Set(value string) error { high, err = strconv.Atoi(value[hyphenIndex+1:]) } if err != nil { - return fmt.Errorf("unable to parse port range: %s", value) + return fmt.Errorf("unable to parse port range: %s: %v", value, err) + } + + if low > 65535 || high > 65535 { + return fmt.Errorf("the port range cannot be greater than 65535: %s", value) } if high < low { return fmt.Errorf("end port cannot be less than start port: %s", value) } + pr.Base = low pr.Size = 1 + high - low return nil diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/util/net/port_split.go rename to vendor/k8s.io/apimachinery/pkg/util/net/port_split.go index be40eb75f..c0fd4e20f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/net/port_split.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ package net import ( "strings" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) var validSchemes = sets.NewString("http", "https", "") diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go new file mode 100644 index 000000000..461144f0b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -0,0 +1,46 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "reflect" + "syscall" +) + +// IPNetEqual checks if the two input IPNets are representing the same subnet. +// For example, +// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. +// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. +func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { + if ipnet1 == nil || ipnet2 == nil { + return false + } + if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { + return true + } + return false +} + +// Returns if the given err is "connection reset by peer" error. +func IsConnectionReset(err error) bool { + opErr, ok := err.(*net.OpError) + if ok && opErr.Err.Error() == syscall.ECONNRESET.Error() { + return true + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go similarity index 82% rename from vendor/k8s.io/kubernetes/pkg/util/rand/rand.go rename to vendor/k8s.io/apimachinery/pkg/util/rand/rand.go index f2647af33..db109c2cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/rand/rand.go +++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,8 +23,6 @@ import ( "time" ) -var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789") -var numLetters = len(letters) var rng = struct { sync.Mutex rand *rand.Rand @@ -72,12 +70,16 @@ func Perm(n int) []int { return rng.rand.Perm(n) } -// String generates a random alphanumeric string n characters long. This will -// panic if n is less than zero. +// We omit vowels from the set of available characters to reduce the chances +// of "bad words" being formed. +var alphanums = []rune("bcdfghjklmnpqrstvwxz0123456789") + +// String generates a random alphanumeric string, without vowels, which is n +// characters long. This will panic if n is less than zero. func String(length int) string { b := make([]rune, length) for i := range b { - b[i] = letters[Intn(numLetters)] + b[i] = alphanums[Intn(len(alphanums))] } return string(b) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go new file mode 100644 index 000000000..748174e19 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "runtime" + "sync" + "time" + + "github.com/golang/glog" +) + +var ( + // ReallyCrash controls the behavior of HandleCrash and now defaults + // true. It's still exposed so components can optionally set to false + // to restore prior behavior. + ReallyCrash = true +) + +// PanicHandlers is a list of functions which will be invoked when a panic happens. +var PanicHandlers = []func(interface{}){logPanic} + +// HandleCrash simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// TODO: remove this function. We are switching to a world where it's safe for +// apiserver to panic, since it will be restarted by kubelet. At the beginning +// of the Kubernetes project, nothing was going to restart apiserver and so +// catching panics was important. But it's actually much simpler for montoring +// software if we just exit when an unexpected panic happens. +func HandleCrash(additionalHandlers ...func(interface{})) { + if r := recover(); r != nil { + for _, fn := range PanicHandlers { + fn(r) + } + for _, fn := range additionalHandlers { + fn(r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) + } + } +} + +// logPanic logs the caller tree when a panic occurs. +func logPanic(r interface{}) { + callers := getCallers(r) + glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) +} + +func getCallers(r interface{}) string { + callers := "" + for i := 0; true; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + callers = callers + fmt.Sprintf("%v:%v\n", file, line) + } + + return callers +} + +// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// error occurs. +// TODO(lavalamp): for testability, this and the below HandleError function +// should be packaged up into a testable and reusable object. +var ErrorHandlers = []func(error){ + logError, + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError, +} + +// HandlerError is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. +func HandleError(err error) { + // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead + if err == nil { + return + } + + for _, fn := range ErrorHandlers { + fn(err) + } +} + +// logError prints an error with the call stack of the location it was reported +func logError(err error) { + glog.ErrorDepth(2, err) +} + +type rudimentaryErrorBackoff struct { + minPeriod time.Duration // immutable + // TODO(lavalamp): use the clock for testability. Need to move that + // package for that to be accessible here. + lastErrorTimeLock sync.Mutex + lastErrorTime time.Time +} + +// OnError will block if it is called more often than the embedded period time. +// This will prevent overly tight hot error loops. +func (r *rudimentaryErrorBackoff) OnError(error) { + r.lastErrorTimeLock.Lock() + defer r.lastErrorTimeLock.Unlock() + d := time.Since(r.lastErrorTime) + if d < r.minPeriod { + time.Sleep(r.minPeriod - d) + } + r.lastErrorTime = time.Now() +} + +// GetCaller returns the caller of the function that calls it. +func GetCaller() string { + var pc [1]uintptr + runtime.Callers(3, pc[:]) + f := runtime.FuncForPC(pc[0]) + if f == nil { + return fmt.Sprintf("Unable to find caller") + } + return f.Name() +} + +// RecoverFromPanic replaces the specified error with an error containing the +// original error, and the call tree when a panic occurs. This enables error +// handlers to handle errors and panics the same way. +func RecoverFromPanic(err *error) { + if r := recover(); r != nil { + callers := getCallers(r) + + *err = fmt.Errorf( + "recovered from panic %q. (err=%v) Call stack:\n%v", + r, + *err, + callers) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/util/sets/byte.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index fe1068cdf..a460e4b1f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -174,6 +174,15 @@ func (s Byte) List() []byte { return []byte(res) } +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + // Returns a single element from the set. func (s Byte) PopAny() (byte, bool) { for key := range s { diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go new file mode 100644 index 000000000..28a6a7d5c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by set-gen. Do not edit it manually! + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go similarity index 93% rename from vendor/k8s.io/kubernetes/pkg/util/sets/empty.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/empty.go index 73ac74c14..cd22b953a 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/empty.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/util/sets/int.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/int.go index e7a2b5db1..0614e9fb0 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -174,6 +174,15 @@ func (s Int) List() []int { return []int(res) } +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + // Returns a single element from the set. func (s Int) PopAny() (int, bool) { for key := range s { diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/util/sets/int64.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index f31da7750..82e1ba782 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -174,6 +174,15 @@ func (s Int64) List() []int64 { return []int64(res) } +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + // Returns a single element from the set. func (s Int64) PopAny() (int64, bool) { for key := range s { diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/util/sets/string.go rename to vendor/k8s.io/apimachinery/pkg/util/sets/string.go index 572aa9157..baef7a6a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -174,6 +174,15 @@ func (s String) List() []string { return []string(res) } +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + // Returns a single element from the set. func (s String) PopAny() (string, bool) { for key := range s { diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go new file mode 100644 index 000000000..43c779a11 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -0,0 +1,254 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = fmt.Sprintf("%s", v.Type) + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + detail = "supported values: " + strings.Join(validValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go new file mode 100644 index 000000000..2efc8eec7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -0,0 +1,91 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "bytes" + "fmt" + "strconv" +) + +// Path represents the path from some root to a particular field. +type Path struct { + name string // the name of this field or "" if this is an index + index string // if name == "", this is a subscript (index or map key) of the previous element + parent *Path // nil if this is the root element +} + +// NewPath creates a root Path object. +func NewPath(name string, moreNames ...string) *Path { + r := &Path{name: name, parent: nil} + for _, anotherName := range moreNames { + r = &Path{name: anotherName, parent: r} + } + return r +} + +// Root returns the root element of this Path. +func (p *Path) Root() *Path { + for ; p.parent != nil; p = p.parent { + // Do nothing. + } + return p +} + +// Child creates a new Path that is a child of the method receiver. +func (p *Path) Child(name string, moreNames ...string) *Path { + r := NewPath(name, moreNames...) + r.Root().parent = p + return r +} + +// Index indicates that the previous Path is to be subscripted by an int. +// This sets the same underlying value as Key. +func (p *Path) Index(index int) *Path { + return &Path{index: strconv.Itoa(index), parent: p} +} + +// Key indicates that the previous Path is to be subscripted by a string. +// This sets the same underlying value as Index. +func (p *Path) Key(key string) *Path { + return &Path{index: key, parent: p} +} + +// String produces a string representation of the Path. +func (p *Path) String() string { + // make a slice to iterate + elems := []*Path{} + for ; p != nil; p = p.parent { + elems = append(elems, p) + } + + // iterate, but it has to be backwards + buf := bytes.NewBuffer(nil) + for i := range elems { + p := elems[len(elems)-1-i] + if p.parent != nil && len(p.name) > 0 { + // This is either the root or it is a subscript. + buf.WriteString(".") + } + if len(p.name) > 0 { + buf.WriteString(p.name) + } else { + fmt.Fprintf(buf, "[%s]", p.index) + } + } + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go new file mode 100644 index 000000000..a0afc26e7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -0,0 +1,343 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strings" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupId tests that the argument is a valid Unix GID. +func IsValidGroupId(gid int64) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserId tests that the argument is a valid Unix UID. +func IsValidUserId(uid int64) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + } + return nil +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + if value == "." { + errs = append(errs, `must not be '.'`) + } else if value == ".." { + errs = append(errs, `must not be '..'`) + } else if strings.HasPrefix(value, "..") { + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go new file mode 100644 index 000000000..ff89dc170 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wait provides tools for polling or listening for changes +// to a condition. +package wait diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go new file mode 100644 index 000000000..4704afd91 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -0,0 +1,332 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package wait + +import ( + "errors" + "math/rand" + "time" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// For any test of the style: +// ... +// <- time.After(timeout): +// t.Errorf("Timed out") +// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s +// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine +// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. +var ForeverTestTimeout = time.Second * 30 + +// NeverStop may be passed to Until to make it never stop. +var NeverStop <-chan struct{} = make(chan struct{}) + +// Forever calls f every period for ever. +// +// Forever is syntactic sugar on top of Until. +func Forever(f func(), period time.Duration) { + Until(f, period, NeverStop) +} + +// Until loops until stop channel is closed, running f every period. +// +// Until is syntactic sugar on top of JitterUntil with zero jitter factor and +// with sliding = true (which means the timer for period starts after the f +// completes). +func Until(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, true, stopCh) +} + +// NonSlidingUntil loops until stop channel is closed, running f every +// period. +// +// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter +// factor, with sliding = false (meaning the timer for period starts at the same +// time as the function starts). +func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { + JitterUntil(f, period, 0.0, false, stopCh) +} + +// JitterUntil loops until stop channel is closed, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jitterd. +// +// If slidingis true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Close stopCh to stop. f may not be invoked if stop channel is already +// closed. Pass NeverStop to if you don't want it stop. +func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { + for { + + select { + case <-stopCh: + return + default: + } + + jitteredPeriod := period + if jitterFactor > 0.0 { + jitteredPeriod = Jitter(period, jitterFactor) + } + + var t *time.Timer + if !sliding { + t = time.NewTimer(jitteredPeriod) + } + + func() { + defer runtime.HandleCrash() + f() + }() + + if sliding { + t = time.NewTimer(jitteredPeriod) + } + + // NOTE: b/c there is no priority selection in golang + // it is possible for this to race, meaning we could + // trigger t.C and stopCh, and t.C select falls through. + // In order to mitigate we re-check stopCh at the beginning + // of every loop to prevent extra executions of f(). + select { + case <-stopCh: + return + case <-t.C: + } + } +} + +// Jitter returns a time.Duration between duration and duration + maxFactor * +// duration. +// +// This allows clients to avoid converging on periodic behavior. If maxFactor +// is 0.0, a suggested default value will be chosen. +func Jitter(duration time.Duration, maxFactor float64) time.Duration { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + return wait +} + +// ErrWaitTimeout is returned when the condition exited without success. +var ErrWaitTimeout = errors.New("timed out waiting for the condition") + +// ConditionFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +type ConditionFunc func() (done bool, err error) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + Duration time.Duration // the base duration + Factor float64 // Duration is multipled by factor each iteration + Jitter float64 // The amount of jitter applied each iteration + Steps int // Exit with error after this many steps +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It checks the condition up to Steps times, increasing the wait by multipling +// the previous duration by Factor. +// +// If Jitter is greater than zero, a random amount of each duration is added +// (between duration and duration*(1+jitter)). +// +// If the condition never returns true, ErrWaitTimeout is returned. All other +// errors terminate immediately. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + duration := backoff.Duration + for i := 0; i < backoff.Steps; i++ { + if i != 0 { + adjusted := duration + if backoff.Jitter > 0.0 { + adjusted = Jitter(duration, backoff.Jitter) + } + time.Sleep(adjusted) + duration = time.Duration(float64(duration) * backoff.Factor) + } + if ok, err := condition(); err != nil || ok { + return err + } + } + return ErrWaitTimeout +} + +// Poll tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always waits the interval before the run of 'condition'. +// 'condition' will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +func Poll(interval, timeout time.Duration, condition ConditionFunc) error { + return pollInternal(poller(interval, timeout), condition) +} + +func pollInternal(wait WaitFunc, condition ConditionFunc) error { + return WaitFor(wait, condition, NeverStop) +} + +// PollImmediate tries a condition func until it returns true, an error, or the timeout +// is reached. +// +// Poll always checks 'condition' before waiting for the interval. 'condition' +// will always be invoked at least once. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +// +// If you want to Poll something forever, see PollInfinite. +func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { + return pollImmediateInternal(poller(interval, timeout), condition) +} + +func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { + done, err := condition() + if err != nil { + return err + } + if done { + return nil + } + return pollInternal(wait, condition) +} + +// PollInfinite tries a condition func until it returns true or an error +// +// PollInfinite always waits the interval before the run of 'condition'. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollInfinite(interval time.Duration, condition ConditionFunc) error { + done := make(chan struct{}) + defer close(done) + return PollUntil(interval, condition, done) +} + +// PollImmediateInfinite tries a condition func until it returns true or an error +// +// PollImmediateInfinite runs the 'condition' before waiting for the interval. +// +// Some intervals may be missed if the condition takes too long or the time +// window is too short. +func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { + done, err := condition() + if err != nil { + return err + } + if done { + return nil + } + return PollInfinite(interval, condition) +} + +// PollUntil tries a condition func until it returns true, an error or stopCh is +// closed. +// +// PolUntil always waits interval before the first run of 'condition'. +// 'condition' will always be invoked at least once. +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return WaitFor(poller(interval, 0), condition, stopCh) +} + +// WaitFunc creates a channel that receives an item every time a test +// should be executed and is closed when the last test should be invoked. +type WaitFunc func(done <-chan struct{}) <-chan struct{} + +// WaitFor continually checks 'fn' as driven by 'wait'. +// +// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value +// placed on the channel and once more when the channel is closed. +// +// If 'fn' returns an error the loop ends and that error is returned, and if +// 'fn' returns true the loop ends and nil is returned. +// +// ErrWaitTimeout will be returned if the channel is closed without fn ever +// returning true. +func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { + c := wait(done) + for { + _, open := <-c + ok, err := fn() + if err != nil { + return err + } + if ok { + return nil + } + if !open { + break + } + } + return ErrWaitTimeout +} + +// poller returns a WaitFunc that will send to the channel every interval until +// timeout has elapsed and then closes the channel. +// +// Over very short intervals you may receive no ticks before the channel is +// closed. A timeout of 0 is interpreted as an infinity. +// +// Output ticks are not buffered. If the channel is not ready to receive an +// item, the tick is skipped. +func poller(interval, timeout time.Duration) WaitFunc { + return WaitFunc(func(done <-chan struct{}) <-chan struct{} { + ch := make(chan struct{}) + + go func() { + defer close(ch) + + tick := time.NewTicker(interval) + defer tick.Stop() + + var after <-chan time.Time + if timeout != 0 { + // time.After is more convenient, but it + // potentially leaves timers around much longer + // than necessary if we exit early. + timer := time.NewTimer(timeout) + after = timer.C + defer timer.Stop() + } + + for { + select { + case <-tick.C: + // If the consumer isn't ready for this signal drop it and + // check the other channels. + select { + case ch <- struct{}{}: + default: + } + case <-after: + return + case <-done: + return + } + } + }() + + return ch + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go new file mode 100644 index 000000000..6ebfaea70 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -0,0 +1,346 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "unicode" + + "github.com/ghodss/yaml" + "github.com/golang/glog" +) + +// ToJSON converts a single YAML document into a JSON document +// or returns an error. If the document appears to be JSON the +// YAML decoding path is not used (so that error messages are +// JSON specific). +func ToJSON(data []byte) ([]byte, error) { + if hasJSONPrefix(data) { + return data, nil + } + return yaml.YAMLToJSON(data) +} + +// YAMLToJSONDecoder decodes YAML documents from an io.Reader by +// separating individual documents. It first converts the YAML +// body to JSON, then unmarshals the JSON. +type YAMLToJSONDecoder struct { + reader Reader +} + +// NewYAMLToJSONDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk, converting it to JSON via +// yaml.YAMLToJSON, and then passing it to json.Decoder. +func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { + reader := bufio.NewReader(r) + return &YAMLToJSONDecoder{ + reader: NewYAMLReader(reader), + } +} + +// Decode reads a YAML document as JSON from the stream or returns +// an error. The decoding rules match json.Unmarshal, not +// yaml.Unmarshal. +func (d *YAMLToJSONDecoder) Decode(into interface{}) error { + bytes, err := d.reader.Read() + if err != nil && err != io.EOF { + return err + } + + if len(bytes) != 0 { + err := yaml.Unmarshal(bytes, into) + if err != nil { + return YAMLSyntaxError{err} + } + } + return err +} + +// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if +// the data is not sufficient. +type YAMLDecoder struct { + r io.ReadCloser + scanner *bufio.Scanner + remaining []byte +} + +// NewDocumentDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk. io.ErrShortBuffer will be +// returned if the entire buffer could not be read to assist +// the caller in framing the chunk. +func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { + scanner := bufio.NewScanner(r) + scanner.Split(splitYAMLDocument) + return &YAMLDecoder{ + r: r, + scanner: scanner, + } +} + +// Read reads the previous slice into the buffer, or attempts to read +// the next chunk. +// TODO: switch to readline approach. +func (d *YAMLDecoder) Read(data []byte) (n int, err error) { + left := len(d.remaining) + if left == 0 { + // return the next chunk from the stream + if !d.scanner.Scan() { + err := d.scanner.Err() + if err == nil { + err = io.EOF + } + return 0, err + } + out := d.scanner.Bytes() + d.remaining = out + left = len(out) + } + + // fits within data + if left <= len(data) { + copy(data, d.remaining) + d.remaining = nil + return len(d.remaining), nil + } + + // caller will need to reread + copy(data, d.remaining[:left]) + d.remaining = d.remaining[left:] + return len(data), io.ErrShortBuffer +} + +func (d *YAMLDecoder) Close() error { + return d.r.Close() +} + +const yamlSeparator = "\n---" +const separator = "---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// decoder is a convenience interface for Decode. +type decoder interface { + Decode(into interface{}) error +} + +// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or +// YAML documents by sniffing for a leading { character. +type YAMLOrJSONDecoder struct { + r io.Reader + bufferSize int + + decoder decoder + rawData []byte +} + +type JSONSyntaxError struct { + Line int + Err error +} + +func (e JSONSyntaxError) Error() string { + return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error()) +} + +type YAMLSyntaxError struct { + err error +} + +func (e YAMLSyntaxError) Error() string { + return e.err.Error() +} + +// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents +// or JSON documents from the given reader as a stream. bufferSize determines +// how far into the stream the decoder will look to figure out whether this +// is a JSON stream (has whitespace followed by an open brace). +func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { + return &YAMLOrJSONDecoder{ + r: r, + bufferSize: bufferSize, + } +} + +// Decode unmarshals the next object from the underlying stream into the +// provide object, or returns an error. +func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { + if d.decoder == nil { + buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) + if isJSON { + glog.V(4).Infof("decoding stream as JSON") + d.decoder = json.NewDecoder(buffer) + d.rawData = origData + } else { + glog.V(4).Infof("decoding stream as YAML") + d.decoder = NewYAMLToJSONDecoder(buffer) + } + } + err := d.decoder.Decode(into) + if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { + if syntax, ok := err.(*json.SyntaxError); ok { + data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) + if readErr != nil { + glog.V(4).Infof("reading stream failed: %v", readErr) + } + js := string(data) + + // if contents from io.Reader are not complete, + // use the original raw data to prevent panic + if int64(len(js)) <= syntax.Offset { + js = string(d.rawData) + } + + start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 + line := strings.Count(js[:start], "\n") + return JSONSyntaxError{ + Line: line, + Err: fmt.Errorf(syntax.Error()), + } + } + } + return err +} + +type Reader interface { + Read() ([]byte, error) +} + +type YAMLReader struct { + reader Reader +} + +func NewYAMLReader(r *bufio.Reader) *YAMLReader { + return &YAMLReader{ + reader: &LineReader{reader: r}, + } +} + +// Read returns a full YAML document. +func (r *YAMLReader) Read() ([]byte, error) { + var buffer bytes.Buffer + for { + line, err := r.reader.Read() + if err != nil && err != io.EOF { + return nil, err + } + + sep := len([]byte(separator)) + if i := bytes.Index(line, []byte(separator)); i == 0 { + // We have a potential document terminator + i += sep + after := line[i:] + if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { + if buffer.Len() != 0 { + return buffer.Bytes(), nil + } + if err == io.EOF { + return nil, err + } + } + } + if err == io.EOF { + if buffer.Len() != 0 { + // If we're at EOF, we have a final, non-terminated line. Return it. + return buffer.Bytes(), nil + } + return nil, err + } + buffer.Write(line) + } +} + +type LineReader struct { + reader *bufio.Reader +} + +// Read returns a single line (with '\n' ended) from the underlying reader. +// An error is returned iff there is an error with the underlying reader. +func (r *LineReader) Read() ([]byte, error) { + var ( + isPrefix bool = true + err error = nil + line []byte + buffer bytes.Buffer + ) + + for isPrefix && err == nil { + line, isPrefix, err = r.reader.ReadLine() + buffer.Write(line) + } + buffer.WriteByte('\n') + return buffer.Bytes(), err +} + +// GuessJSONStream scans the provided reader up to size, looking +// for an open brace indicating this is JSON. It will return the +// bufio.Reader it creates for the consumer. +func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { + buffer := bufio.NewReaderSize(r, size) + b, _ := buffer.Peek(size) + return buffer, b, hasJSONPrefix(b) +} + +var jsonPrefix = []byte("{") + +// hasJSONPrefix returns true if the provided buffer appears to start with +// a JSON open brace. +func hasJSONPrefix(buf []byte) bool { + return hasPrefix(buf, jsonPrefix) +} + +// Return true if the first non-whitespace bytes in buf is +// prefix. +func hasPrefix(buf []byte, prefix []byte) bool { + trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) + return bytes.HasPrefix(trim, prefix) +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go new file mode 100644 index 000000000..00431d489 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies the type for version information collected at build time. +// +k8s:openapi-gen=true +package version diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go new file mode 100644 index 000000000..72727b503 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/types.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Info contains versioning information. +// TODO: Add []string of api versions supported? It's still unclear +// how we'll want to distribute that information. +type Info struct { + Major string `json:"major"` + Minor string `json:"minor"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// String returns info as a human-friendly version string. +func (info Info) String() string { + return info.GitVersion +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go new file mode 100644 index 000000000..5fde5e742 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch contains a generic watchable interface, and a fake for +// testing code that uses the watch interface. +package watch diff --git a/vendor/k8s.io/kubernetes/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/watch/filter.go rename to vendor/k8s.io/apimachinery/pkg/watch/filter.go index 1eff5b949..3ca27f22c 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/filter.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/watch/mux.go rename to vendor/k8s.io/apimachinery/pkg/watch/mux.go index 700c26bca..fafccd78e 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/mux.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,8 +19,8 @@ package watch import ( "sync" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" ) // FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch @@ -81,8 +81,8 @@ const internalRunFunctionMarker = "internal-do-function" // a function type we can shoehorn into the queue. type functionFakeRuntimeObject func() -func (obj functionFakeRuntimeObject) GetObjectKind() unversioned.ObjectKind { - return unversioned.EmptyObjectKind +func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind } // Execute f, blocking the incoming queue (and waiting for it to drain first). diff --git a/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go rename to vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 2802a9e01..93bb1cdf7 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/net" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. diff --git a/vendor/k8s.io/kubernetes/pkg/watch/until.go b/vendor/k8s.io/apimachinery/pkg/watch/until.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/watch/until.go rename to vendor/k8s.io/apimachinery/pkg/watch/until.go index 9f34f9d00..6e139de59 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/until.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/until.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,9 +17,10 @@ limitations under the License. package watch import ( + "errors" "time" - "k8s.io/kubernetes/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/wait" ) // ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, @@ -28,10 +29,14 @@ import ( // from false to true). type ConditionFunc func(event Event) (bool, error) +// errWatchClosed is returned when the watch channel is closed before timeout in Until. +var errWatchClosed = errors.New("watch closed before Until timeout") + // Until reads items from the watch until each provided condition succeeds, and then returns the last watch // encountered. The first condition that returns an error terminates the watch (and the event is also returned). // If no event has been received, the returned event will be nil. // Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// A zero timeout means to wait forever. func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc) (*Event, error) { ch := watcher.ResultChan() defer watcher.Stop() @@ -40,7 +45,7 @@ func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc after = time.After(timeout) } else { ch := make(chan time.Time) - close(ch) + defer close(ch) after = ch } var lastEvent *Event @@ -52,7 +57,7 @@ func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc return lastEvent, err } if done { - break + continue } } ConditionSucceeded: @@ -60,7 +65,7 @@ func Until(timeout time.Duration, watcher Interface, conditions ...ConditionFunc select { case event, ok := <-ch: if !ok { - return lastEvent, wait.ErrWaitTimeout + return lastEvent, errWatchClosed } lastEvent = &event diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go new file mode 100644 index 000000000..dd49c41f9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -0,0 +1,269 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/golang/glog" +) + +// Interface can be implemented by anything that knows how to watch and report changes. +type Interface interface { + // Stops watching. Will close the channel returned by ResultChan(). Releases + // any resources used by the watch. + Stop() + + // Returns a chan which will receive all the events. If an error occurs + // or Stop() is called, this channel will be closed, in which case the + // watch should be completely cleaned up. + ResultChan() <-chan Event +} + +// EventType defines the possible types of events. +type EventType string + +const ( + Added EventType = "ADDED" + Modified EventType = "MODIFIED" + Deleted EventType = "DELETED" + Error EventType = "ERROR" + + DefaultChanSize int32 = 100 +) + +// Event represents a single event to a watched resource. +type Event struct { + Type EventType + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *api.Status is recommended; other types may make sense + // depending on context. + Object runtime.Object +} + +type emptyWatch chan Event + +// NewEmptyWatch returns a watch interface that returns no results and is closed. +// May be used in certain error conditions where no information is available but +// an error is not warranted. +func NewEmptyWatch() Interface { + ch := make(chan Event) + close(ch) + return emptyWatch(ch) +} + +// Stop implements Interface +func (w emptyWatch) Stop() { +} + +// ResultChan implements Interface +func (w emptyWatch) ResultChan() <-chan Event { + return chan Event(w) +} + +// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type FakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewFake() *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event), + } +} + +func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + +// Stop implements Interface.Stop(). +func (f *FakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *FakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event) +} + +func (f *FakeWatcher) ResultChan() <-chan Event { + return f.result +} + +// Add sends an add event. +func (f *FakeWatcher) Add(obj runtime.Object) { + f.result <- Event{Added, obj} +} + +// Modify sends a modify event. +func (f *FakeWatcher) Modify(obj runtime.Object) { + f.result <- Event{Modified, obj} +} + +// Delete sends a delete event. +func (f *FakeWatcher) Delete(lastValue runtime.Object) { + f.result <- Event{Deleted, lastValue} +} + +// Error sends an Error event. +func (f *FakeWatcher) Error(errValue runtime.Object) { + f.result <- Event{Error, errValue} +} + +// Action sends an event of the requested type, for table-based testing. +func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { + f.result <- Event{action, obj} +} + +// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type RaceFreeFakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewRaceFreeFake() *RaceFreeFakeWatcher { + return &RaceFreeFakeWatcher{ + result: make(chan Event, DefaultChanSize), + } +} + +// Stop implements Interface.Stop(). +func (f *RaceFreeFakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *RaceFreeFakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *RaceFreeFakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event, DefaultChanSize) +} + +func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event { + f.Lock() + defer f.Unlock() + return f.result +} + +// Add sends an add event. +func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Added, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Modify sends a modify event. +func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Modified, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Delete sends a delete event. +func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Deleted, lastValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Error sends an Error event. +func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Error, errValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Action sends an event of the requested type, for table-based testing. +func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{action, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go similarity index 100% rename from vendor/k8s.io/kubernetes/third_party/forked/reflect/deep_equal.go rename to vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go new file mode 100644 index 000000000..67957ee33 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/type.go @@ -0,0 +1,91 @@ +//This package is copied from Go library reflect/type.go. +//The struct tag library provides no way to extract the list of struct tags, only +//a specific tag +package reflect + +import ( + "fmt" + + "strconv" + "strings" +) + +type StructTag struct { + Name string + Value string +} + +func (t StructTag) String() string { + return fmt.Sprintf("%s:%q", t.Name, t.Value) +} + +type StructTags []StructTag + +func (tags StructTags) String() string { + s := make([]string, 0, len(tags)) + for _, tag := range tags { + s = append(s, tag.String()) + } + return "`" + strings.Join(s, " ") + "`" +} + +func (tags StructTags) Has(name string) bool { + for i := range tags { + if tags[i].Name == name { + return true + } + } + return false +} + +// ParseStructTags returns the full set of fields in a struct tag in the order they appear in +// the struct tag. +func ParseStructTags(tag string) (StructTags, error) { + tags := StructTags{} + for tag != "" { + // Skip leading space. + i := 0 + for i < len(tag) && tag[i] == ' ' { + i++ + } + tag = tag[i:] + if tag == "" { + break + } + + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. + i = 0 + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { + i++ + } + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + break + } + name := string(tag[:i]) + tag = tag[i+1:] + + // Scan quoted string to find value. + i = 1 + for i < len(tag) && tag[i] != '"' { + if tag[i] == '\\' { + i++ + } + i++ + } + if i >= len(tag) { + break + } + qvalue := string(tag[:i+1]) + tag = tag[i+1:] + + value, err := strconv.Unquote(qvalue) + if err != nil { + return nil, err + } + tags = append(tags, StructTag{Name: name, Value: value}) + } + return tags, nil +} diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go new file mode 100644 index 000000000..ff4c57a4f --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -0,0 +1,439 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/emicklei/go-restful/swagger" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + restclient "k8s.io/client-go/rest" +) + +// defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). +const defaultRetries = 2 + +// DiscoveryInterface holds the methods that discover server-supported API groups, +// versions and resources. +type DiscoveryInterface interface { + RESTClient() restclient.Interface + ServerGroupsInterface + ServerResourcesInterface + ServerVersionInterface + SwaggerSchemaInterface +} + +// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. +type CachedDiscoveryInterface interface { + DiscoveryInterface + // Fresh returns true if no cached data was used that had been retrieved before the instantiation. + Fresh() bool + // Invalidate enforces that no cached data is used in the future that is older than the current time. + Invalidate() +} + +// ServerGroupsInterface has methods for obtaining supported groups on the API server +type ServerGroupsInterface interface { + // ServerGroups returns the supported groups, with information like supported versions and the + // preferred version. + ServerGroups() (*metav1.APIGroupList, error) +} + +// ServerResourcesInterface has methods for obtaining supported resources on the API server +type ServerResourcesInterface interface { + // ServerResourcesForGroupVersion returns the supported resources for a group and version. + ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) + // ServerResources returns the supported resources for all groups and versions. + ServerResources() ([]*metav1.APIResourceList, error) + // ServerPreferredResources returns the supported resources with the version preferred by the + // server. + ServerPreferredResources() ([]*metav1.APIResourceList, error) + // ServerPreferredNamespacedResources returns the supported namespaced resources with the + // version preferred by the server. + ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) +} + +// ServerVersionInterface has a method for retrieving the server's version. +type ServerVersionInterface interface { + // ServerVersion retrieves and parses the server's version (git version). + ServerVersion() (*version.Info, error) +} + +// SwaggerSchemaInterface has a method to retrieve the swagger schema. +type SwaggerSchemaInterface interface { + // SwaggerSchema retrieves and parses the swagger API schema the server supports. + SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) +} + +// DiscoveryClient implements the functions that discover server-supported API groups, +// versions and resources. +type DiscoveryClient struct { + restClient restclient.Interface + + LegacyPrefix string +} + +// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so +// group would be "". +func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { + groupVersions := []metav1.GroupVersionForDiscovery{} + for _, version := range apiVersions.Versions { + groupVersion := metav1.GroupVersionForDiscovery{ + GroupVersion: version, + Version: version, + } + groupVersions = append(groupVersions, groupVersion) + } + apiGroup.Versions = groupVersions + // There should be only one groupVersion returned at /api + apiGroup.PreferredVersion = groupVersions[0] + return +} + +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { + // Get the groupVersions exposed at /api + v := &metav1.APIVersions{} + err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) + apiGroup := metav1.APIGroup{} + if err == nil && len(v.Versions) != 0 { + apiGroup = apiVersionsToAPIGroup(v) + } + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + + // Get the groupVersions exposed at /apis + apiGroupList = &metav1.APIGroupList{} + err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api + if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + apiGroupList = &metav1.APIGroupList{} + } + + // append the group retrieved from /api to the list if not empty + if len(v.Versions) != 0 { + apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) + } + return apiGroupList, nil +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { + url := url.URL{} + if len(groupVersion) == 0 { + return nil, fmt.Errorf("groupVersion shouldn't be empty") + } + if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { + url.Path = d.LegacyPrefix + "/" + groupVersion + } else { + url.Path = "/apis/" + groupVersion + } + resources = &metav1.APIResourceList{ + GroupVersion: groupVersion, + } + err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources) + if err != nil { + // ignore 403 or 404 error to be compatible with an v1.0 server. + if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + return resources, nil + } + return nil, err + } + return resources, nil +} + +// serverResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) serverResources(failEarly bool) ([]*metav1.APIResourceList, error) { + apiGroups, err := d.ServerGroups() + if err != nil { + return nil, err + } + + result := []*metav1.APIResourceList{} + failedGroups := make(map[schema.GroupVersion]error) + + for _, apiGroup := range apiGroups.Groups { + for _, version := range apiGroup.Versions { + gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + resources, err := d.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + // TODO: maybe restrict this to NotFound errors + failedGroups[gv] = err + if failEarly { + return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups} + } + continue + } + + result = append(result, resources) + } + } + + if len(failedGroups) == 0 { + return result, nil + } + + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// ServerResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return withRetries(defaultRetries, d.serverResources) +} + +// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. +type ErrGroupDiscoveryFailed struct { + // Groups is a list of the groups that failed to load and the error cause + Groups map[schema.GroupVersion]error +} + +// Error implements the error interface +func (e *ErrGroupDiscoveryFailed) Error() string { + var groups []string + for k, v := range e.Groups { + groups = append(groups, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(groups) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) +} + +// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover +// a complete list of APIs for the client to use. +func IsGroupDiscoveryFailedError(err error) bool { + _, ok := err.(*ErrGroupDiscoveryFailed) + return err != nil && ok +} + +// serverPreferredResources returns the supported resources with the version preferred by the server. +func (d *DiscoveryClient) serverPreferredResources(failEarly bool) ([]*metav1.APIResourceList, error) { + serverGroupList, err := d.ServerGroups() + if err != nil { + return nil, err + } + + result := []*metav1.APIResourceList{} + failedGroups := make(map[schema.GroupVersion]error) + + grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource + grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + + for _, apiGroup := range serverGroupList.Groups { + for _, version := range apiGroup.Versions { + groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + // TODO: maybe restrict this to NotFound errors + failedGroups[groupVersion] = err + if failEarly { + return nil, &ErrGroupDiscoveryFailed{Groups: failedGroups} + } + continue + } + + // create empty list which is filled later in another loop + emptyApiResourceList := metav1.APIResourceList{ + GroupVersion: version.GroupVersion, + } + gvApiResourceLists[groupVersion] = &emptyApiResourceList + result = append(result, &emptyApiResourceList) + + for i := range apiResourceList.APIResources { + apiResource := &apiResourceList.APIResources[i] + if strings.Contains(apiResource.Name, "/") { + continue + } + gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} + if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + // only override with preferred version + continue + } + grVersions[gv] = version.Version + grApiResources[gv] = apiResource + } + } + } + + // group selected APIResources according to GroupVersion into APIResourceLists + for groupResource, apiResource := range grApiResources { + version := grVersions[groupResource] + groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} + apiResourceList := gvApiResourceLists[groupVersion] + apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) + } + + if len(failedGroups) == 0 { + return result, nil + } + + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. +func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return withRetries(defaultRetries, func(retryEarly bool) ([]*metav1.APIResourceList, error) { + return d.serverPreferredResources(retryEarly) + }) +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. +func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + all, err := d.ServerPreferredResources() + return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { + return r.Namespaced + }), all), err +} + +// ServerVersion retrieves and parses the server's version (git version). +func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { + body, err := d.restClient.Get().AbsPath("/version").Do().Raw() + if err != nil { + return nil, err + } + var info version.Info + err = json.Unmarshal(body, &info) + if err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &info, nil +} + +// SwaggerSchema retrieves and parses the swagger API schema the server supports. +func (d *DiscoveryClient) SwaggerSchema(version schema.GroupVersion) (*swagger.ApiDeclaration, error) { + if version.Empty() { + return nil, fmt.Errorf("groupVersion cannot be empty") + } + + groupList, err := d.ServerGroups() + if err != nil { + return nil, err + } + groupVersions := metav1.ExtractGroupVersions(groupList) + // This check also takes care the case that kubectl is newer than the running endpoint + if stringDoesntExistIn(version.String(), groupVersions) { + return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions) + } + var path string + if len(d.LegacyPrefix) > 0 && version == v1.SchemeGroupVersion { + path = "/swaggerapi" + d.LegacyPrefix + "/" + version.Version + } else { + path = "/swaggerapi/apis/" + version.Group + "/" + version.Version + } + + body, err := d.restClient.Get().AbsPath(path).Do().Raw() + if err != nil { + return nil, err + } + var schema swagger.ApiDeclaration + err = json.Unmarshal(body, &schema) + if err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &schema, nil +} + +// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. +func withRetries(maxRetries int, f func(failEarly bool) ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { + var result []*metav1.APIResourceList + var err error + for i := 0; i < maxRetries; i++ { + failEarly := i < maxRetries-1 + result, err = f(failEarly) + if err == nil { + return result, nil + } + if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { + return nil, err + } + } + return result, err +} + +func setDiscoveryDefaults(config *restclient.Config) error { + config.APIPath = "" + config.GroupVersion = nil + codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} + config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) + if len(config.UserAgent) == 0 { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + return nil +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client +// can be used to discover supported resources in the API server. +func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { + config := *c + if err := setDiscoveryDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.UnversionedRESTClientFor(&config) + return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If +// there is an error, it panics. +func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { + client, err := NewDiscoveryClientForConfig(c) + if err != nil { + panic(err) + } + return client + +} + +// New creates a new DiscoveryClient for the given RESTClient. +func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { + return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} +} + +func stringDoesntExistIn(str string, slice []string) bool { + for _, s := range slice { + if s == str { + return false + } + } + return true +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *DiscoveryClient) RESTClient() restclient.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go new file mode 100644 index 000000000..f184bc929 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// MatchesServerVersion queries the server to compares the build version +// (git hash) of the client with the server's build version. It returns an error +// if it failed to contact the server or if the versions are not an exact match. +func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { + sVer, err := client.ServerVersion() + if err != nil { + return fmt.Errorf("couldn't read version from server: %v\n", err) + } + // GitVersion includes GitCommit and GitTreeState, but best to be safe? + if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { + return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) + } + + return nil +} + +// NegotiateVersion queries the server's supported api versions to find +// a version that both client and server support. +// - If no version is provided, try registered client versions in order of +// preference. +// - If version is provided and the server does not support it, +// return an error. +// TODO negotiation should be reserved for cases where we need a version for a given group. In those cases, it should return an ordered list of +// server preferences. From that list, a separate function can match from an ordered list of client versions. +// This is not what the function has ever done before, but it makes more logical sense. +func NegotiateVersion(client DiscoveryInterface, requiredGV *schema.GroupVersion, clientRegisteredGVs []schema.GroupVersion) (*schema.GroupVersion, error) { + clientVersions := sets.String{} + for _, gv := range clientRegisteredGVs { + clientVersions.Insert(gv.String()) + } + groups, err := client.ServerGroups() + if err != nil { + // This is almost always a connection error, and higher level code should treat this as a generic error, + // not a negotiation specific error. + return nil, err + } + versions := metav1.ExtractGroupVersions(groups) + serverVersions := sets.String{} + for _, v := range versions { + serverVersions.Insert(v) + } + + // If version explicitly requested verify that both client and server support it. + // If server does not support warn, but try to negotiate a lower version. + if requiredGV != nil { + if !clientVersions.Has(requiredGV.String()) { + return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", requiredGV, clientVersions) + + } + // If the server supports no versions, then we should just use the preferredGV + // This can happen because discovery fails due to 403 Forbidden errors + if len(serverVersions) == 0 { + return requiredGV, nil + } + if serverVersions.Has(requiredGV.String()) { + return requiredGV, nil + } + // If we are using an explicit config version the server does not support, fail. + return nil, fmt.Errorf("server does not support API version %q", requiredGV) + } + + for _, clientGV := range clientRegisteredGVs { + if serverVersions.Has(clientGV.String()) { + // Version was not explicitly requested in command config (--api-version). + // Ok to fall back to a supported version with a warning. + // TODO: caesarxuchao: enable the warning message when we have + // proper fix. Please refer to issue #14895. + // if len(version) != 0 { + // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) + // } + t := clientGV + return &t, nil + } + } + + // if we have no server versions and we have no required version, choose the first clientRegisteredVersion + if len(serverVersions) == 0 && len(clientRegisteredGVs) > 0 { + return &clientRegisteredGVs[0], nil + } + + // fall back to an empty GroupVersion. Most client commands no longer respect a GroupVersion anyway + return &schema.GroupVersion{}, nil +} + +// GroupVersionResources converts APIResourceLists to the GroupVersionResources. +func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) { + gvrs := map[schema.GroupVersionResource]struct{}{} + for _, rl := range rls { + gv, err := schema.ParseGroupVersion(rl.GroupVersion) + if err != nil { + return nil, err + } + for i := range rl.APIResources { + gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} + } + } + return gvrs, nil +} + +// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped. +func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList { + result := []*metav1.APIResourceList{} + for _, rl := range rls { + filtered := *rl + filtered.APIResources = nil + for i := range rl.APIResources { + if pred.Match(rl.GroupVersion, &rl.APIResources[i]) { + filtered.APIResources = append(filtered.APIResources, rl.APIResources[i]) + } + } + if filtered.APIResources != nil { + result = append(result, &filtered) + } + } + return result +} + +type ResourcePredicate interface { + Match(groupVersion string, r *metav1.APIResource) bool +} + +type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool + +func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { + return fn(groupVersion, r) +} + +// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported. +type SupportsAllVerbs struct { + Verbs []string +} + +func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { + return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) +} diff --git a/vendor/k8s.io/client-go/discovery/restmapper.go b/vendor/k8s.io/client-go/discovery/restmapper.go new file mode 100644 index 000000000..9b0769a18 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/restmapper.go @@ -0,0 +1,320 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/golang/glog" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewRESTMapper(groupResources []*APIGroupResources, versionInterfaces meta.VersionInterfacesFunc) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}, versionInterfaces) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + versionMapper.Add(gv.WithKind(resource.Kind), scope) + // TODO only do this if it supports listing + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl DiscoveryInterface) ([]*APIGroupResources, error) { + apiGroups, err := cl.ServerGroups() + if err != nil { + return nil, err + } + var result []*APIGroupResources + for _, group := range apiGroups.Groups { + groupResources := &APIGroupResources{ + Group: group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + if errors.IsNotFound(err) { + continue // ignore as this can race with deletion of 3rd party APIs + } + return nil, err + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl CachedDiscoveryInterface + versionInterface meta.VersionInterfacesFunc +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl CachedDiscoveryInterface, versionInterface meta.VersionInterfacesFunc) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + versionInterface: versionInterface, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewRESTMapper(groupResources, d.versionInterface) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + glog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// AliasesForResource returns whether a resource has an alias or not. +func (d *DeferredDiscoveryRESTMapper) AliasesForResource(resource string) (as []string, ok bool) { + del, err := d.getDelegate() + if err != nil { + return nil, false + } + as, ok = del.AliasesForResource(resource) + if len(as) == 0 && !d.cl.Fresh() { + d.Reset() + as, ok = d.AliasesForResource(resource) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/discovery/unstructured.go b/vendor/k8s.io/client-go/discovery/unstructured.go new file mode 100644 index 000000000..ee72d668b --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/unstructured.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for +// runtime.Unstructured object based on discovery information. +type UnstructuredObjectTyper struct { + registered map[schema.GroupVersionKind]bool +} + +// NewUnstructuredObjectTyper returns a runtime.ObjectTyper for +// unstructred objects based on discovery information. +func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { + dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)} + for _, group := range groupResources { + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + for _, resource := range resources { + dot.registered[gv.WithKind(resource.Kind)] = true + } + } + } + return dot +} + +// ObjectKind returns the group,version,kind of the provided object, or an error +// if the object in not runtime.Unstructured or has no group,version,kind +// information. +func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) { + if _, ok := obj.(runtime.Unstructured); !ok { + return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) + } + + return obj.GetObjectKind().GroupVersionKind(), nil +} + +// ObjectKinds returns a slice of one element with the group,version,kind of the +// provided object, or an error if the object is not runtime.Unstructured or +// has no group,version,kind information. unversionedType will always be false +// because runtime.Unstructured object should always have group,version,kind +// information set. +func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + gvk, err := d.ObjectKind(obj) + if err != nil { + return nil, false, err + } + + return []schema.GroupVersionKind{gvk}, false, nil +} + +// Recognizes returns true if the provided group,version,kind was in the +// discovery information. +func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return d.registered[gvk] +} + +// IsUnversioned returns false always because runtime.Unstructured objects +// should always have group,version,kind information set. ok will be true if the +// object's group,version,kind is api.Registry. +func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { + gvk, err := d.ObjectKind(obj) + if err != nil { + return false, false + } + + return false, d.registered[gvk] +} + +var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go new file mode 100644 index 000000000..5fd0c54e8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -0,0 +1,514 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + glog "github.com/golang/glog" + discovery "k8s.io/client-go/discovery" + appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + autoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" + batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" + certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CoreV1() corev1.CoreV1Interface + // Deprecated: please explicitly pick a version if possible. + Core() corev1.CoreV1Interface + AppsV1beta1() appsv1beta1.AppsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Apps() appsv1beta1.AppsV1beta1Interface + AuthenticationV1() authenticationv1.AuthenticationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1.AuthenticationV1Interface + AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface + AuthorizationV1() authorizationv1.AuthorizationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authorization() authorizationv1.AuthorizationV1Interface + AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface + AutoscalingV1() autoscalingv1.AutoscalingV1Interface + // Deprecated: please explicitly pick a version if possible. + Autoscaling() autoscalingv1.AutoscalingV1Interface + AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface + BatchV1() batchv1.BatchV1Interface + // Deprecated: please explicitly pick a version if possible. + Batch() batchv1.BatchV1Interface + BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface + CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Certificates() certificatesv1beta1.CertificatesV1beta1Interface + ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Extensions() extensionsv1beta1.ExtensionsV1beta1Interface + PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Policy() policyv1beta1.PolicyV1beta1Interface + RbacV1beta1() rbacv1beta1.RbacV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Rbac() rbacv1beta1.RbacV1beta1Interface + RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Settings() settingsv1alpha1.SettingsV1alpha1Interface + StorageV1beta1() storagev1beta1.StorageV1beta1Interface + StorageV1() storagev1.StorageV1Interface + // Deprecated: please explicitly pick a version if possible. + Storage() storagev1.StorageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *corev1.CoreV1Client + *appsv1beta1.AppsV1beta1Client + *authenticationv1.AuthenticationV1Client + *authenticationv1beta1.AuthenticationV1beta1Client + *authorizationv1.AuthorizationV1Client + *authorizationv1beta1.AuthorizationV1beta1Client + *autoscalingv1.AutoscalingV1Client + *autoscalingv2alpha1.AutoscalingV2alpha1Client + *batchv1.BatchV1Client + *batchv2alpha1.BatchV2alpha1Client + *certificatesv1beta1.CertificatesV1beta1Client + *extensionsv1beta1.ExtensionsV1beta1Client + *policyv1beta1.PolicyV1beta1Client + *rbacv1beta1.RbacV1beta1Client + *rbacv1alpha1.RbacV1alpha1Client + *settingsv1alpha1.SettingsV1alpha1Client + *storagev1beta1.StorageV1beta1Client + *storagev1.StorageV1Client +} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + if c == nil { + return nil + } + return c.CoreV1Client +} + +// Deprecated: Core retrieves the default version of CoreClient. +// Please explicitly pick a version. +func (c *Clientset) Core() corev1.CoreV1Interface { + if c == nil { + return nil + } + return c.CoreV1Client +} + +// AppsV1beta1 retrieves the AppsV1beta1Client +func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { + if c == nil { + return nil + } + return c.AppsV1beta1Client +} + +// Deprecated: Apps retrieves the default version of AppsClient. +// Please explicitly pick a version. +func (c *Clientset) Apps() appsv1beta1.AppsV1beta1Interface { + if c == nil { + return nil + } + return c.AppsV1beta1Client +} + +// AuthenticationV1 retrieves the AuthenticationV1Client +func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1Client +} + +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1Client +} + +// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client +func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { + if c == nil { + return nil + } + return c.AuthenticationV1beta1Client +} + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1Client +} + +// Deprecated: Authorization retrieves the default version of AuthorizationClient. +// Please explicitly pick a version. +func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1Client +} + +// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client +func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { + if c == nil { + return nil + } + return c.AuthorizationV1beta1Client +} + +// AutoscalingV1 retrieves the AutoscalingV1Client +func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { + if c == nil { + return nil + } + return c.AutoscalingV1Client +} + +// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. +// Please explicitly pick a version. +func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { + if c == nil { + return nil + } + return c.AutoscalingV1Client +} + +// AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client +func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { + if c == nil { + return nil + } + return c.AutoscalingV2alpha1Client +} + +// BatchV1 retrieves the BatchV1Client +func (c *Clientset) BatchV1() batchv1.BatchV1Interface { + if c == nil { + return nil + } + return c.BatchV1Client +} + +// Deprecated: Batch retrieves the default version of BatchClient. +// Please explicitly pick a version. +func (c *Clientset) Batch() batchv1.BatchV1Interface { + if c == nil { + return nil + } + return c.BatchV1Client +} + +// BatchV2alpha1 retrieves the BatchV2alpha1Client +func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { + if c == nil { + return nil + } + return c.BatchV2alpha1Client +} + +// CertificatesV1beta1 retrieves the CertificatesV1beta1Client +func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { + if c == nil { + return nil + } + return c.CertificatesV1beta1Client +} + +// Deprecated: Certificates retrieves the default version of CertificatesClient. +// Please explicitly pick a version. +func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { + if c == nil { + return nil + } + return c.CertificatesV1beta1Client +} + +// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client +func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { + if c == nil { + return nil + } + return c.ExtensionsV1beta1Client +} + +// Deprecated: Extensions retrieves the default version of ExtensionsClient. +// Please explicitly pick a version. +func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { + if c == nil { + return nil + } + return c.ExtensionsV1beta1Client +} + +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { + if c == nil { + return nil + } + return c.PolicyV1beta1Client +} + +// Deprecated: Policy retrieves the default version of PolicyClient. +// Please explicitly pick a version. +func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { + if c == nil { + return nil + } + return c.PolicyV1beta1Client +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { + if c == nil { + return nil + } + return c.RbacV1beta1Client +} + +// Deprecated: Rbac retrieves the default version of RbacClient. +// Please explicitly pick a version. +func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { + if c == nil { + return nil + } + return c.RbacV1beta1Client +} + +// RbacV1alpha1 retrieves the RbacV1alpha1Client +func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { + if c == nil { + return nil + } + return c.RbacV1alpha1Client +} + +// SettingsV1alpha1 retrieves the SettingsV1alpha1Client +func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { + if c == nil { + return nil + } + return c.SettingsV1alpha1Client +} + +// Deprecated: Settings retrieves the default version of SettingsClient. +// Please explicitly pick a version. +func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { + if c == nil { + return nil + } + return c.SettingsV1alpha1Client +} + +// StorageV1beta1 retrieves the StorageV1beta1Client +func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { + if c == nil { + return nil + } + return c.StorageV1beta1Client +} + +// StorageV1 retrieves the StorageV1Client +func (c *Clientset) StorageV1() storagev1.StorageV1Interface { + if c == nil { + return nil + } + return c.StorageV1Client +} + +// Deprecated: Storage retrieves the default version of StorageClient. +// Please explicitly pick a version. +func (c *Clientset) Storage() storagev1.StorageV1Interface { + if c == nil { + return nil + } + return c.StorageV1Client +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.CoreV1Client, err = corev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AppsV1beta1Client, err = appsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthenticationV1Client, err = authenticationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthenticationV1beta1Client, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthorizationV1Client, err = authorizationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AuthorizationV1beta1Client, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AutoscalingV1Client, err = autoscalingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.AutoscalingV2alpha1Client, err = autoscalingv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.BatchV1Client, err = batchv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.BatchV2alpha1Client, err = batchv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.CertificatesV1beta1Client, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.ExtensionsV1beta1Client, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.PolicyV1beta1Client, err = policyv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.RbacV1beta1Client, err = rbacv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.RbacV1alpha1Client, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.SettingsV1alpha1Client, err = settingsv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.StorageV1beta1Client, err = storagev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.StorageV1Client, err = storagev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.CoreV1Client = corev1.NewForConfigOrDie(c) + cs.AppsV1beta1Client = appsv1beta1.NewForConfigOrDie(c) + cs.AuthenticationV1Client = authenticationv1.NewForConfigOrDie(c) + cs.AuthenticationV1beta1Client = authenticationv1beta1.NewForConfigOrDie(c) + cs.AuthorizationV1Client = authorizationv1.NewForConfigOrDie(c) + cs.AuthorizationV1beta1Client = authorizationv1beta1.NewForConfigOrDie(c) + cs.AutoscalingV1Client = autoscalingv1.NewForConfigOrDie(c) + cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.NewForConfigOrDie(c) + cs.BatchV1Client = batchv1.NewForConfigOrDie(c) + cs.BatchV2alpha1Client = batchv2alpha1.NewForConfigOrDie(c) + cs.CertificatesV1beta1Client = certificatesv1beta1.NewForConfigOrDie(c) + cs.ExtensionsV1beta1Client = extensionsv1beta1.NewForConfigOrDie(c) + cs.PolicyV1beta1Client = policyv1beta1.NewForConfigOrDie(c) + cs.RbacV1beta1Client = rbacv1beta1.NewForConfigOrDie(c) + cs.RbacV1alpha1Client = rbacv1alpha1.NewForConfigOrDie(c) + cs.SettingsV1alpha1Client = settingsv1alpha1.NewForConfigOrDie(c) + cs.StorageV1beta1Client = storagev1beta1.NewForConfigOrDie(c) + cs.StorageV1Client = storagev1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.CoreV1Client = corev1.New(c) + cs.AppsV1beta1Client = appsv1beta1.New(c) + cs.AuthenticationV1Client = authenticationv1.New(c) + cs.AuthenticationV1beta1Client = authenticationv1beta1.New(c) + cs.AuthorizationV1Client = authorizationv1.New(c) + cs.AuthorizationV1beta1Client = authorizationv1beta1.New(c) + cs.AutoscalingV1Client = autoscalingv1.New(c) + cs.AutoscalingV2alpha1Client = autoscalingv2alpha1.New(c) + cs.BatchV1Client = batchv1.New(c) + cs.BatchV2alpha1Client = batchv2alpha1.New(c) + cs.CertificatesV1beta1Client = certificatesv1beta1.New(c) + cs.ExtensionsV1beta1Client = extensionsv1beta1.New(c) + cs.PolicyV1beta1Client = policyv1beta1.New(c) + cs.RbacV1beta1Client = rbacv1beta1.New(c) + cs.RbacV1alpha1Client = rbacv1alpha1.New(c) + cs.SettingsV1alpha1Client = settingsv1alpha1.New(c) + cs.StorageV1beta1Client = storagev1beta1.New(c) + cs.StorageV1Client = storagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go new file mode 100644 index 000000000..2af84c669 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated clientset. +package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/import_known_versions.go b/vendor/k8s.io/client-go/kubernetes/import_known_versions.go new file mode 100644 index 000000000..297466b01 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/import_known_versions.go @@ -0,0 +1,42 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +// These imports are the API groups the client will support. +import ( + "fmt" + + "k8s.io/client-go/pkg/api" + _ "k8s.io/client-go/pkg/api/install" + _ "k8s.io/client-go/pkg/apis/apps/install" + _ "k8s.io/client-go/pkg/apis/authentication/install" + _ "k8s.io/client-go/pkg/apis/authorization/install" + _ "k8s.io/client-go/pkg/apis/autoscaling/install" + _ "k8s.io/client-go/pkg/apis/batch/install" + _ "k8s.io/client-go/pkg/apis/certificates/install" + _ "k8s.io/client-go/pkg/apis/extensions/install" + _ "k8s.io/client-go/pkg/apis/policy/install" + _ "k8s.io/client-go/pkg/apis/rbac/install" + _ "k8s.io/client-go/pkg/apis/settings/install" + _ "k8s.io/client-go/pkg/apis/storage/install" +) + +func init() { + if missingVersions := api.Registry.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { + panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go new file mode 100644 index 000000000..5d8ec824f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go new file mode 100644 index 000000000..1fe5dbe4c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + corev1 "k8s.io/client-go/pkg/api/v1" + appsv1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + authenticationv1 "k8s.io/client-go/pkg/apis/authentication/v1" + authenticationv1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1" + authorizationv1 "k8s.io/client-go/pkg/apis/authorization/v1" + authorizationv1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1" + autoscalingv1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + autoscalingv2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + batchv1 "k8s.io/client-go/pkg/apis/batch/v1" + batchv2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + certificatesv1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + extensionsv1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + policyv1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + rbacv1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rbacv1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + settingsv1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + storagev1 "k8s.io/client-go/pkg/apis/storage/v1" + storagev1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(Scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kuberentes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + corev1.AddToScheme(scheme) + appsv1beta1.AddToScheme(scheme) + authenticationv1.AddToScheme(scheme) + authenticationv1beta1.AddToScheme(scheme) + authorizationv1.AddToScheme(scheme) + authorizationv1beta1.AddToScheme(scheme) + autoscalingv1.AddToScheme(scheme) + autoscalingv2alpha1.AddToScheme(scheme) + batchv1.AddToScheme(scheme) + batchv2alpha1.AddToScheme(scheme) + certificatesv1beta1.AddToScheme(scheme) + extensionsv1beta1.AddToScheme(scheme) + policyv1beta1.AddToScheme(scheme) + rbacv1beta1.AddToScheme(scheme) + rbacv1alpha1.AddToScheme(scheme) + settingsv1alpha1.AddToScheme(scheme) + storagev1beta1.AddToScheme(scheme) + storagev1.AddToScheme(scheme) + +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go new file mode 100644 index 000000000..a520f87fc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + rest "k8s.io/client-go/rest" +) + +type AppsV1beta1Interface interface { + RESTClient() rest.Interface + DeploymentsGetter + ScalesGetter + StatefulSetsGetter +} + +// AppsV1beta1Client is used to interact with features provided by the apps group. +type AppsV1beta1Client struct { + restClient rest.Interface +} + +func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1beta1Client { + return &AppsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go new file mode 100644 index 000000000..b30965ee7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go new file mode 100644 index 000000000..deca5c866 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DeploymentExpansion interface{} + +type ScaleExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go new file mode 100644 index 000000000..d3bf9e103 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go new file mode 100644 index 000000000..8b5a7822f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/apps/v1beta1" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) + List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + result = &v1beta1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go new file mode 100644 index 000000000..bd53fc249 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/authentication/v1" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1Client { + return &AuthenticationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go new file mode 100644 index 000000000..54673bfa7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go new file mode 100644 index 000000000..42e76d5e4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go new file mode 100644 index 000000000..9cfef4e6a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go new file mode 100644 index 000000000..fb41782fe --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go new file mode 100644 index 000000000..419dc2cb6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/authentication/v1beta1" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1beta1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1beta1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1beta1Client { + return &AuthenticationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go new file mode 100644 index 000000000..2b7e8ca0b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go new file mode 100644 index 000000000..7f9f1e9fa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go new file mode 100644 index 000000000..375b6f637 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationapi "k8s.io/client-go/pkg/apis/authentication/v1beta1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go new file mode 100644 index 000000000..af2924a30 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/authorization/v1" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1Client { + return &AuthorizationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go new file mode 100644 index 000000000..54673bfa7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go new file mode 100644 index 000000000..42e76d5e4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go new file mode 100644 index 000000000..b2085bceb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go new file mode 100644 index 000000000..c3b487c72 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go new file mode 100644 index 000000000..cfb019eaa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go new file mode 100644 index 000000000..107615080 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go new file mode 100644 index 000000000..08f6d6095 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go new file mode 100644 index 000000000..dfdf6521a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go new file mode 100644 index 000000000..b49b3b30b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -0,0 +1,98 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/authorization/v1beta1" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1beta1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1beta1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1beta1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1beta1Client { + return &AuthorizationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go new file mode 100644 index 000000000..2b7e8ca0b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go new file mode 100644 index 000000000..9b8e10341 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go new file mode 100644 index 000000000..d2ce4f0d7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go new file mode 100644 index 000000000..1ef3e49af --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go new file mode 100644 index 000000000..d341eb14a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go new file mode 100644 index 000000000..cd60e9df6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -0,0 +1,44 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go new file mode 100644 index 000000000..8d03b0811 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go new file mode 100644 index 000000000..b235891c9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV1Client { + return &AutoscalingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go new file mode 100644 index 000000000..54673bfa7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go new file mode 100644 index 000000000..effefbd50 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 000000000..f9c790af2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/autoscaling/v1" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.HorizontalPodAutoscaler, error) + List(opts meta_v1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options meta_v1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts meta_v1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + result = &v1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go new file mode 100644 index 000000000..0d16aa3e9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/autoscaling_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV2alpha1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2alpha1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2alpha1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2alpha1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2alpha1Client { + return &AutoscalingV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go new file mode 100644 index 000000000..d29bd3f4e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go new file mode 100644 index 000000000..e40f2c5a1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go new file mode 100644 index 000000000..c85bfd9f3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/horizontalpodautoscaler.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Update(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + UpdateStatus(*v2alpha1.HorizontalPodAutoscaler) (*v2alpha1.HorizontalPodAutoscaler, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.HorizontalPodAutoscaler, error) + List(opts v1.ListOptions) (*v2alpha1.HorizontalPodAutoscalerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2alpha1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2alpha1.HorizontalPodAutoscaler) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2alpha1.HorizontalPodAutoscalerList, err error) { + result = &v2alpha1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.HorizontalPodAutoscaler, err error) { + result = &v2alpha1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go new file mode 100644 index 000000000..c8766f5b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/batch/v1" + rest "k8s.io/client-go/rest" +) + +type BatchV1Interface interface { + RESTClient() rest.Interface + JobsGetter +} + +// BatchV1Client is used to interact with features provided by the batch group. +type BatchV1Client struct { + restClient rest.Interface +} + +func (c *BatchV1Client) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +// NewForConfig creates a new BatchV1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV1Client for the given RESTClient. +func New(c rest.Interface) *BatchV1Client { + return &BatchV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go new file mode 100644 index 000000000..54673bfa7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go new file mode 100644 index 000000000..68d7741fa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type JobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go new file mode 100644 index 000000000..c8120f6d8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/batch/v1" + rest "k8s.io/client-go/rest" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1.Job) (*v1.Job, error) + Update(*v1.Job) (*v1.Job, error) + UpdateStatus(*v1.Job) (*v1.Job, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Job, error) + List(opts meta_v1.ListOptions) (*v1.JobList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client rest.Interface + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchV1Client, namespace string) *jobs { + return &jobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string, options meta_v1.GetOptions) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts meta_v1.ListOptions) (result *v1.JobList, err error) { + result = &v1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched job. +func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("jobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go new file mode 100644 index 000000000..e2b2dd5cd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + rest "k8s.io/client-go/rest" +) + +type BatchV2alpha1Interface interface { + RESTClient() rest.Interface + CronJobsGetter +} + +// BatchV2alpha1Client is used to interact with features provided by the batch group. +type BatchV2alpha1Client struct { + restClient rest.Interface +} + +func (c *BatchV2alpha1Client) CronJobs(namespace string) CronJobInterface { + return newCronJobs(c, namespace) +} + +// NewForConfig creates a new BatchV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *BatchV2alpha1Client { + return &BatchV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go new file mode 100644 index 000000000..2447f2add --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v2alpha1 "k8s.io/client-go/pkg/apis/batch/v2alpha1" + rest "k8s.io/client-go/rest" +) + +// CronJobsGetter has a method to return a CronJobInterface. +// A group's client should implement this interface. +type CronJobsGetter interface { + CronJobs(namespace string) CronJobInterface +} + +// CronJobInterface has methods to work with CronJob resources. +type CronJobInterface interface { + Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error) + List(opts v1.ListOptions) (*v2alpha1.CronJobList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) + CronJobExpansion +} + +// cronJobs implements CronJobInterface +type cronJobs struct { + client rest.Interface + ns string +} + +// newCronJobs returns a CronJobs +func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { + return &cronJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronjobs"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + Body(cronJob). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + SubResource("status"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + result = &v2alpha1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched cronJob. +func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronjobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go new file mode 100644 index 000000000..d29bd3f4e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go new file mode 100644 index 000000000..078027ef4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go new file mode 100644 index 000000000..c9c39acb2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1beta1Interface interface { + RESTClient() rest.Interface + CertificateSigningRequestsGetter +} + +// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1beta1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSigningRequestInterface { + return newCertificateSigningRequests(c) +} + +// NewForConfig creates a new CertificatesV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CertificatesV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1beta1Client { + return &CertificatesV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 000000000..7407ef068 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/certificates/v1beta1" + rest "k8s.io/client-go/rest" +) + +// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. +// A group's client should implement this interface. +type CertificateSigningRequestsGetter interface { + CertificateSigningRequests() CertificateSigningRequestInterface +} + +// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. +type CertificateSigningRequestInterface interface { + Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) + List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) + CertificateSigningRequestExpansion +} + +// certificateSigningRequests implements CertificateSigningRequestInterface +type certificateSigningRequests struct { + client rest.Interface +} + +// newCertificateSigningRequests returns a CertificateSigningRequests +func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { + return &certificateSigningRequests{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Post(). + Resource("certificatesigningrequests"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + SubResource("status"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. +func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. +func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + result = &v1beta1.CertificateSigningRequestList{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Patch(pt). + Resource("certificatesigningrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go new file mode 100644 index 000000000..4765bba8a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + certificates "k8s.io/client-go/pkg/apis/certificates/v1beta1" +) + +type CertificateSigningRequestExpansion interface { + UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) +} + +func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { + result = &certificates.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + SubResource("approval"). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go new file mode 100644 index 000000000..2b7e8ca0b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go new file mode 100644 index 000000000..50671976a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ComponentStatus, error) + List(opts meta_v1.ListOptions) (*v1.ComponentStatusList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client rest.Interface +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreV1Client) *componentStatuses { + return &componentStatuses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string, options meta_v1.GetOptions) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts meta_v1.ListOptions) (result *v1.ComponentStatusList, err error) { + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched componentStatus. +func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Patch(pt). + Resource("componentstatuses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go new file mode 100644 index 000000000..bb21636e2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*v1.ConfigMap) (*v1.ConfigMap, error) + Update(*v1.ConfigMap) (*v1.ConfigMap, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ConfigMap, error) + List(opts meta_v1.ListOptions) (*v1.ConfigMapList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client rest.Interface + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { + return &configMaps{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string, options meta_v1.GetOptions) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts meta_v1.ListOptions) (result *v1.ConfigMapList, err error) { + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched configMap. +func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("configmaps"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go new file mode 100644 index 000000000..0972960d1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -0,0 +1,163 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +type CoreV1Interface interface { + RESTClient() rest.Interface + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PersistentVolumeClaimsGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreV1Client is used to interact with features provided by the group. +type CoreV1Client struct { + restClient rest.Interface +} + +func (c *CoreV1Client) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreV1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreV1Client) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreV1Client) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreV1Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreV1Client) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreV1Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *CoreV1Client) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreV1Client) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreV1Client) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreV1Client) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreV1Client) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreV1Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1Client { + return &CoreV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/api" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go new file mode 100644 index 000000000..54673bfa7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go new file mode 100644 index 000000000..3580742a8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*v1.Endpoints) (*v1.Endpoints, error) + Update(*v1.Endpoints) (*v1.Endpoints, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Endpoints, error) + List(opts meta_v1.ListOptions) (*v1.EndpointsList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client rest.Interface + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreV1Client, namespace string) *endpoints { + return &endpoints{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string, options meta_v1.GetOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts meta_v1.ListOptions) (result *v1.EndpointsList, err error) { + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched endpoints. +func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpoints"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go new file mode 100644 index 000000000..c4ac11006 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1.Event) (*v1.Event, error) + Update(*v1.Event) (*v1.Event, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Event, error) + List(opts meta_v1.ListOptions) (*v1.EventList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreV1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options meta_v1.GetOptions) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts meta_v1.ListOptions) (result *v1.EventList, err error) { + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go new file mode 100644 index 000000000..9b4490ea9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -0,0 +1,163 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/pkg/api/v1" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) + // Search finds events about the specified object + Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. It returns the copy of +// the event that the server returns, or an error. The namespace and name of the +// target event is deduced from the incompleteEvent. The namespace must either +// match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { + if e.ns != "" && incompleteEvent.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { + ref, err := v1.GetReference(scheme, objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + return e.Interface.PatchWithEventNamespace(event, data) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000..5fe0585b4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type ComponentStatusExpansion interface{} + +type ConfigMapExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} + +type ServiceAccountExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go new file mode 100644 index 000000000..998f03452 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*v1.LimitRange) (*v1.LimitRange, error) + Update(*v1.LimitRange) (*v1.LimitRange, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.LimitRange, error) + List(opts meta_v1.ListOptions) (*v1.LimitRangeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client rest.Interface + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { + return &limitRanges{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string, options meta_v1.GetOptions) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts meta_v1.ListOptions) (result *v1.LimitRangeList, err error) { + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched limitRange. +func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("limitranges"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go new file mode 100644 index 000000000..3092bd8e1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*v1.Namespace) (*v1.Namespace, error) + Update(*v1.Namespace) (*v1.Namespace, error) + UpdateStatus(*v1.Namespace) (*v1.Namespace, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Namespace, error) + List(opts meta_v1.ListOptions) (*v1.NamespaceList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client rest.Interface +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreV1Client) *namespaces { + return &namespaces{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *namespaces) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("namespaces"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string, options meta_v1.GetOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts meta_v1.ListOptions) (result *v1.NamespaceList, err error) { + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched namespace. +func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Patch(pt). + Resource("namespaces"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go similarity index 76% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go index 8f47aec48..203430000 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,18 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 -import "k8s.io/kubernetes/pkg/api" +import "k8s.io/client-go/pkg/api/v1" // The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. type NamespaceExpansion interface { - Finalize(item *api.Namespace) (*api.Namespace, error) + Finalize(item *v1.Namespace) (*v1.Namespace, error) } // Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} +func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) return } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go new file mode 100644 index 000000000..6b82d4fab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Node, error) + List(opts meta_v1.ListOptions) (*v1.NodeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client rest.Interface +} + +// newNodes returns a Nodes +func newNodes(c *CoreV1Client) *nodes { + return &nodes{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string, options meta_v1.GetOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts meta_v1.ListOptions) (result *v1.NodeList, err error) { + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched node. +func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Patch(pt). + Resource("nodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go index 3146cdb35..29c12aabc 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,22 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 -import "k8s.io/kubernetes/pkg/api" +import ( + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/pkg/api/v1" +) // The NodeExpansion interface allows manually adding extra methods to the NodeInterface. type NodeExpansion interface { // PatchStatus modifies the status of an existing node. It returns the copy // of the node that the server returns, or an error. - PatchStatus(nodeName string, data []byte) (*api.Node, error) + PatchStatus(nodeName string, data []byte) (*v1.Node, error) } // PatchStatus modifies the status of an existing node. It returns the copy of // the node that the server returns, or an error. -func (c *nodes) PatchStatus(nodeName string, data []byte) (*api.Node, error) { - result := &api.Node{} - err := c.client.Patch(api.StrategicMergePatchType). +func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { + result := &v1.Node{} + err := c.client.Patch(types.StrategicMergePatchType). Resource("nodes"). Name(nodeName). SubResource("status"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go new file mode 100644 index 000000000..16a4b7316 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) + UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PersistentVolume, error) + List(opts meta_v1.ListOptions) (*v1.PersistentVolumeList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client rest.Interface +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { + return &persistentVolumes{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeList, err error) { + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched persistentVolume. +func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Patch(pt). + Resource("persistentvolumes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go new file mode 100644 index 000000000..ae7cc4a4f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. +// A group's client should implement this interface. +type PersistentVolumeClaimsGetter interface { + PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface +} + +// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. +type PersistentVolumeClaimInterface interface { + Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PersistentVolumeClaim, error) + List(opts meta_v1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) + PersistentVolumeClaimExpansion +} + +// persistentVolumeClaims implements PersistentVolumeClaimInterface +type persistentVolumeClaims struct { + client rest.Interface + ns string +} + +// newPersistentVolumeClaims returns a PersistentVolumeClaims +func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { + return &persistentVolumeClaims{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + SubResource("status"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. +func (c *persistentVolumeClaims) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumeClaims) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *persistentVolumeClaims) Get(name string, options meta_v1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *persistentVolumeClaims) List(opts meta_v1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + result = &v1.PersistentVolumeClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *persistentVolumeClaims) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched persistentVolumeClaim. +func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go new file mode 100644 index 000000000..5648750ea --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*v1.Pod) (*v1.Pod, error) + Update(*v1.Pod) (*v1.Pod, error) + UpdateStatus(*v1.Pod) (*v1.Pod, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Pod, error) + List(opts meta_v1.ListOptions) (*v1.PodList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client rest.Interface + ns string +} + +// newPods returns a Pods +func newPods(c *CoreV1Client, namespace string) *pods { + return &pods{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string, options meta_v1.GetOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts meta_v1.ListOptions) (result *v1.PodList, err error) { + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched pod. +func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pods"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go new file mode 100644 index 000000000..82f3b7fba --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + policy "k8s.io/client-go/pkg/apis/policy/v1beta1" + restclient "k8s.io/client-go/rest" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *v1.Binding) error + Evict(eviction *policy.Eviction) error + GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *v1.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +func (c *pods) Evict(eviction *policy.Eviction) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go new file mode 100644 index 000000000..19c82f17b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*v1.PodTemplate) (*v1.PodTemplate, error) + Update(*v1.PodTemplate) (*v1.PodTemplate, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PodTemplate, error) + List(opts meta_v1.ListOptions) (*v1.PodTemplateList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client rest.Interface + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { + return &podTemplates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string, options meta_v1.GetOptions) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts meta_v1.ListOptions) (result *v1.PodTemplateList, err error) { + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podTemplate. +func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podtemplates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go new file mode 100644 index 000000000..2f4f4fa9e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*v1.ReplicationController) (*v1.ReplicationController, error) + Update(*v1.ReplicationController) (*v1.ReplicationController, error) + UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicationController, error) + List(opts meta_v1.ListOptions) (*v1.ReplicationControllerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client rest.Interface + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { + return &replicationControllers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts meta_v1.ListOptions) (result *v1.ReplicationControllerList, err error) { + result = &v1.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched replicationController. +func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicationcontrollers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go new file mode 100644 index 000000000..565fe1e6d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) + UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ResourceQuota, error) + List(opts meta_v1.ListOptions) (*v1.ResourceQuotaList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client rest.Interface + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string, options meta_v1.GetOptions) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts meta_v1.ListOptions) (result *v1.ResourceQuotaList, err error) { + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcequotas"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go new file mode 100644 index 000000000..fbcede818 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*v1.Secret) (*v1.Secret, error) + Update(*v1.Secret) (*v1.Secret, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Secret, error) + List(opts meta_v1.ListOptions) (*v1.SecretList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client rest.Interface + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreV1Client, namespace string) *secrets { + return &secrets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string, options meta_v1.GetOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts meta_v1.ListOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched secret. +func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secrets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go new file mode 100644 index 000000000..0eccf79a8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Service, error) + List(opts meta_v1.ListOptions) (*v1.ServiceList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client rest.Interface + ns string +} + +// newServices returns a Services +func newServices(c *CoreV1Client, namespace string) *services { + return &services{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *services) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string, options meta_v1.GetOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts meta_v1.ListOptions) (result *v1.ServiceList, err error) { + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched service. +func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("services"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go index 89266e6cd..4937fd1a3 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1 import ( - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/net" + "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" ) // The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. @@ -29,9 +29,9 @@ type ServiceExpansion interface { // ProxyGet returns a response of the service by calling it through the proxy. func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { request := c.client.Get(). - Prefix("proxy"). Namespace(c.ns). Resource("services"). + SubResource("proxy"). Name(net.JoinSchemeNamePort(scheme, name, port)). Suffix(path) for k, v := range params { diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go new file mode 100644 index 000000000..f71789f6a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/api/v1" + rest "k8s.io/client-go/rest" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ServiceAccount, error) + List(opts meta_v1.ListOptions) (*v1.ServiceAccountList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client rest.Interface + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string, options meta_v1.GetOptions) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts meta_v1.ListOptions) (result *v1.ServiceAccountList, err error) { + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched serviceAccount. +func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serviceaccounts"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go new file mode 100644 index 000000000..8c132db9b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) + List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go new file mode 100644 index 000000000..7d0122c2a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go similarity index 78% rename from vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go index 9969aecc9..e737f09ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unversioned +package v1beta1 -import "k8s.io/kubernetes/pkg/apis/extensions" +import "k8s.io/client-go/pkg/apis/extensions/v1beta1" // The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. type DeploymentExpansion interface { - Rollback(*extensions.DeploymentRollback) error + Rollback(*v1beta1.DeploymentRollback) error } // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { +func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go new file mode 100644 index 000000000..5284346e1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +type ExtensionsV1beta1Interface interface { + RESTClient() rest.Interface + DaemonSetsGetter + DeploymentsGetter + IngressesGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter + ScalesGetter + ThirdPartyResourcesGetter +} + +// ExtensionsV1beta1Client is used to interact with features provided by the extensions group. +type ExtensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + +func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { + return newThirdPartyResources(c) +} + +// NewForConfig creates a new ExtensionsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ExtensionsV1beta1Client { + return &ExtensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go new file mode 100644 index 000000000..d0a3d64bc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type DaemonSetExpansion interface{} + +type IngressExpansion interface{} + +type PodSecurityPolicyExpansion interface{} + +type ReplicaSetExpansion interface{} + +type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go new file mode 100644 index 000000000..86bf65b8f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client rest.Interface + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { + return &ingresses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 000000000..ad57a6cab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go new file mode 100644 index 000000000..aa6f505a2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) + List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + result = &v1beta1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go new file mode 100644 index 000000000..733012ade --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 000000000..efeae4b01 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.KindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go new file mode 100644 index 000000000..617c2069b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/extensions/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. +// A group's client should implement this interface. +type ThirdPartyResourcesGetter interface { + ThirdPartyResources() ThirdPartyResourceInterface +} + +// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. +type ThirdPartyResourceInterface interface { + Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) + List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) + ThirdPartyResourceExpansion +} + +// thirdPartyResources implements ThirdPartyResourceInterface +type thirdPartyResources struct { + client rest.Interface +} + +// newThirdPartyResources returns a ThirdPartyResources +func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { + return &thirdPartyResources{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Post(). + Resource("thirdpartyresources"). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. +func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Put(). + Resource("thirdpartyresources"). + Name(thirdPartyResource.Name). + Body(thirdPartyResource). + Do(). + Into(result) + return +} + +// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. +func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("thirdpartyresources"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. +func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Get(). + Resource("thirdpartyresources"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. +func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { + result = &v1beta1.ThirdPartyResourceList{} + err = c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested thirdPartyResources. +func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("thirdpartyresources"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched thirdPartyResource. +func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { + result = &v1beta1.ThirdPartyResource{} + err = c.client.Patch(pt). + Resource("thirdpartyresources"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go new file mode 100644 index 000000000..9c4133e36 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// EvictionsGetter has a method to return a EvictionInterface. +// A group's client should implement this interface. +type EvictionsGetter interface { + Evictions(namespace string) EvictionInterface +} + +// EvictionInterface has methods to work with Eviction resources. +type EvictionInterface interface { + EvictionExpansion +} + +// evictions implements EvictionInterface +type evictions struct { + client rest.Interface + ns string +} + +// newEvictions returns a Evictions +func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { + return &evictions{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go new file mode 100644 index 000000000..bde1baca6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + policy "k8s.io/client-go/pkg/apis/policy/v1beta1" +) + +// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. +type EvictionExpansion interface { + Evict(eviction *policy.Eviction) error +} + +func (c *evictions) Evict(eviction *policy.Eviction) error { + return c.client.Post(). + AbsPath("/api/v1"). + Namespace(eviction.Namespace). + Resource("pods"). + Name(eviction.Name). + SubResource("eviction"). + Body(eviction). + Do(). + Error() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go new file mode 100644 index 000000000..511adc6ef --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type PodDisruptionBudgetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 000000000..8088dd019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + rest "k8s.io/client-go/rest" +) + +// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. +// A group's client should implement this interface. +type PodDisruptionBudgetsGetter interface { + PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface +} + +// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. +type PodDisruptionBudgetInterface interface { + Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) + List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) + PodDisruptionBudgetExpansion +} + +// podDisruptionBudgets implements PodDisruptionBudgetInterface +type podDisruptionBudgets struct { + client rest.Interface + ns string +} + +// newPodDisruptionBudgets returns a PodDisruptionBudgets +func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { + return &podDisruptionBudgets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Post(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus(). + +func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + SubResource("status"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. +func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. +func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. +func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + result = &v1beta1.PodDisruptionBudgetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. +func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podDisruptionBudget. +func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go new file mode 100644 index 000000000..9d62aad04 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/policy/v1beta1" + rest "k8s.io/client-go/rest" +) + +type PolicyV1beta1Interface interface { + RESTClient() rest.Interface + EvictionsGetter + PodDisruptionBudgetsGetter +} + +// PolicyV1beta1Client is used to interact with features provided by the policy group. +type PolicyV1beta1Client struct { + restClient rest.Interface +} + +func (c *PolicyV1beta1Client) Evictions(namespace string) EvictionInterface { + return newEvictions(c, namespace) +} + +func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { + return newPodDisruptionBudgets(c, namespace) +} + +// NewForConfig creates a new PolicyV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &PolicyV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new PolicyV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *PolicyV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new PolicyV1beta1Client for the given RESTClient. +func New(c rest.Interface) *PolicyV1beta1Client { + return &PolicyV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *PolicyV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go new file mode 100644 index 000000000..241061025 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + result = &v1alpha1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 000000000..bc48873a5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + result = &v1alpha1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go new file mode 100644 index 000000000..ba8d10d3b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..f506fc346 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go new file mode 100644 index 000000000..67256d50f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +type RbacV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1alpha1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1alpha1Client struct { + restClient rest.Interface +} + +func (c *RbacV1alpha1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1alpha1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1alpha1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1alpha1Client { + return &RbacV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go new file mode 100644 index 000000000..9c4fa915e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1alpha1.Role) (*v1alpha1.Role, error) + Update(*v1alpha1.Role) (*v1alpha1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Role, error) + List(opts v1.ListOptions) (*v1alpha1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1alpha1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + result = &v1alpha1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go new file mode 100644 index 000000000..5e81967f1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + result = &v1alpha1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go new file mode 100644 index 000000000..6a63571f3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + result = &v1beta1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 000000000..a2f387509 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + result = &v1beta1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go new file mode 100644 index 000000000..d7f80c004 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go new file mode 100644 index 000000000..a24ef4f9a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +type RbacV1beta1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1beta1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1beta1Client struct { + restClient rest.Interface +} + +func (c *RbacV1beta1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1beta1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1beta1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1beta1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1beta1Client { + return &RbacV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go new file mode 100644 index 000000000..aa57ac319 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1beta1.Role) (*v1beta1.Role, error) + Update(*v1beta1.Role) (*v1beta1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Role, error) + List(opts v1.ListOptions) (*v1beta1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1beta1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + result = &v1beta1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go new file mode 100644 index 000000000..3dee9107e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/rbac/v1beta1" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + result = &v1beta1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go new file mode 100644 index 000000000..ba8d10d3b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..d599b2935 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go new file mode 100644 index 000000000..b8fc0a022 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + rest "k8s.io/client-go/rest" +) + +// PodPresetsGetter has a method to return a PodPresetInterface. +// A group's client should implement this interface. +type PodPresetsGetter interface { + PodPresets(namespace string) PodPresetInterface +} + +// PodPresetInterface has methods to work with PodPreset resources. +type PodPresetInterface interface { + Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) + List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) + PodPresetExpansion +} + +// podPresets implements PodPresetInterface +type podPresets struct { + client rest.Interface + ns string +} + +// newPodPresets returns a PodPresets +func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { + return &podPresets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podpresets"). + Body(podPreset). + Do(). + Into(result) + return +} + +// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podpresets"). + Name(podPreset.Name). + Body(podPreset). + Do(). + Into(result) + return +} + +// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. +func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. +func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodPresets that match those selectors. +func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + result = &v1alpha1.PodPresetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podPresets. +func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched podPreset. +func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podpresets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go new file mode 100644 index 000000000..1a90034bc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1alpha1 "k8s.io/client-go/pkg/apis/settings/v1alpha1" + rest "k8s.io/client-go/rest" +) + +type SettingsV1alpha1Interface interface { + RESTClient() rest.Interface + PodPresetsGetter +} + +// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. +type SettingsV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { + return newPodPresets(c, namespace) +} + +// NewForConfig creates a new SettingsV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SettingsV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SettingsV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SettingsV1alpha1Client { + return &SettingsV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go new file mode 100644 index 000000000..54673bfa7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go new file mode 100644 index 000000000..39df9fb87 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go new file mode 100644 index 000000000..a95d30b2d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/storage/v1" + rest "k8s.io/client-go/rest" +) + +type StorageV1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter +} + +// StorageV1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1Client struct { + restClient rest.Interface +} + +func (c *StorageV1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageV1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1Client { + return &StorageV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go new file mode 100644 index 000000000..0283c9970 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/pkg/apis/storage/v1" + rest "k8s.io/client-go/rest" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1.StorageClass) (*v1.StorageClass, error) + Update(*v1.StorageClass) (*v1.StorageClass, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StorageClass, error) + List(opts meta_v1.ListOptions) (*v1.StorageClassList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options meta_v1.GetOptions) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts meta_v1.ListOptions) (result *v1.StorageClassList, err error) { + result = &v1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go new file mode 100644 index 000000000..11b523897 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with custom arguments. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go new file mode 100644 index 000000000..6f3f0c55e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go new file mode 100644 index 000000000..7abb99e57 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" + rest "k8s.io/client-go/rest" +) + +type StorageV1beta1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter +} + +// StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1beta1Client struct { + restClient rest.Interface +} + +func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1beta1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1beta1Client { + return &StorageV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go new file mode 100644 index 000000000..6980fc78b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1beta1 "k8s.io/client-go/pkg/apis/storage/v1beta1" + rest "k8s.io/client-go/rest" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error) + List(opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1beta1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + result = &v1beta1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/pkg/api/OWNERS b/vendor/k8s.io/client-go/pkg/api/OWNERS new file mode 100644 index 000000000..3a9b0c6d1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/OWNERS @@ -0,0 +1,44 @@ +approvers: +- erictune +- lavalamp +- smarterclayton +- thockin +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- kargakis +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- pwittrock +- roberthbailey +- ncdc +- timstclair +- yifan-gu +- eparis +- mwielgus +- timothysc +- soltysh +- piosz +- jsafrane +- jbeda diff --git a/vendor/k8s.io/client-go/pkg/api/defaults.go b/vendor/k8s.io/client-go/pkg/api/defaults.go new file mode 100644 index 000000000..baa49a8d7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/defaults.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs( + func(obj *ListOptions) { + if obj.LabelSelector == nil { + obj.LabelSelector = labels.Everything() + } + if obj.FieldSelector == nil { + obj.FieldSelector = fields.Everything() + } + }, + ) +} diff --git a/vendor/k8s.io/client-go/pkg/api/doc.go b/vendor/k8s.io/client-go/pkg/api/doc.go new file mode 100644 index 000000000..a4262ab36 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package api diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/client-go/pkg/api/field_constants.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/api/field_constants.go rename to vendor/k8s.io/client-go/pkg/api/field_constants.go index 94a825caf..5ead0f13f 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go +++ b/vendor/k8s.io/client-go/pkg/api/field_constants.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/client-go/pkg/api/helpers.go b/vendor/k8s.io/client-go/pkg/api/helpers.go new file mode 100644 index 000000000..faa7df7c0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/helpers.go @@ -0,0 +1,691 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "crypto/md5" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Conversion error conveniently packages up errors in conversions. +type ConversionError struct { + In, Out interface{} + Message string +} + +// Return a helpful string about the error +func (c *ConversionError) Error() string { + return spew.Sprintf( + "Conversion error: %s. (in: %v(%+v) out: %v)", + c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out), + ) +} + +const ( + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" +) + +// NonConvertibleFields iterates over the provided map and filters out all but +// any keys with the "non-convertible.kubernetes.io" prefix. +func NonConvertibleFields(annotations map[string]string) map[string]string { + nonConvertibleKeys := map[string]string{} + for key, value := range annotations { + if strings.HasPrefix(key, NonConvertibleAnnotationPrefix) { + nonConvertibleKeys[key] = value + } + } + return nonConvertibleKeys +} + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) + +var standardResourceQuotaScopes = sets.NewString( + string(ResourceQuotaScopeTerminating), + string(ResourceQuotaScopeNotTerminating), + string(ResourceQuotaScopeBestEffort), + string(ResourceQuotaScopeNotBestEffort), +) + +// IsStandardResourceQuotaScope returns true if the scope is a standard value +func IsStandardResourceQuotaScope(str string) bool { + return standardResourceQuotaScopes.Has(str) +} + +var podObjectCountQuotaResources = sets.NewString( + string(ResourcePods), +) + +var podComputeQuotaResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), +) + +// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope +func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool { + switch scope { + case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort: + return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) + case ResourceQuotaScopeBestEffort: + return podObjectCountQuotaResources.Has(resource) + default: + return true + } +} + +var standardContainerResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) +} + +// IsOpaqueIntResourceName returns true if the resource name has the opaque +// integer resource prefix. +func IsOpaqueIntResourceName(name ResourceName) bool { + return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) +} + +// OpaqueIntResourceName returns a ResourceName with the canonical opaque +// integer prefix prepended. If the argument already has the prefix, it is +// returned unmodified. +func OpaqueIntResourceName(name string) ResourceName { + if IsOpaqueIntResourceName(ResourceName(name)) { + return ResourceName(name) + } + return ResourceName(fmt.Sprintf("%s%s", ResourceOpaqueIntPrefix, name)) +} + +var standardLimitRangeTypes = sets.NewString( + string(LimitTypePod), + string(LimitTypeContainer), + string(LimitTypePersistentVolumeClaim), +) + +// IsStandardLimitRangeType returns true if the type is Pod or Container +func IsStandardLimitRangeType(str string) bool { + return standardLimitRangeTypes.Has(str) +} + +var standardQuotaResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), + string(ResourceRequestsStorage), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourcePersistentVolumeClaims), + string(ResourceConfigMaps), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), +) + +// IsStandardQuotaResourceName returns true if the resource is known to +// the quota tracking system +func IsStandardQuotaResourceName(str string) bool { + return standardQuotaResources.Has(str) +} + +var standardResources = sets.NewString( + string(ResourceCPU), + string(ResourceMemory), + string(ResourceRequestsCPU), + string(ResourceRequestsMemory), + string(ResourceLimitsCPU), + string(ResourceLimitsMemory), + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourceConfigMaps), + string(ResourcePersistentVolumeClaims), + string(ResourceStorage), + string(ResourceRequestsStorage), +) + +// IsStandardResourceName returns true if the resource is known to the system +func IsStandardResourceName(str string) bool { + return standardResources.Has(str) +} + +var integerResources = sets.NewString( + string(ResourcePods), + string(ResourceQuotas), + string(ResourceServices), + string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourceConfigMaps), + string(ResourcePersistentVolumeClaims), + string(ResourceServicesNodePorts), + string(ResourceServicesLoadBalancers), +) + +// IsIntegerResourceName returns true if the resource is measured in integer values +func IsIntegerResourceName(str string) bool { + return integerResources.Has(str) || IsOpaqueIntResourceName(ResourceName(str)) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *Service) bool { + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(FinalizerKubernetes), + metav1.FinalizerOrphanDependents, +) + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { + data, err := runtime.Encode(codec, obj) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", md5.Sum(data)), nil +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { + c := &LoadBalancerStatus{} + c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. +func ParseRFC3339(s string, nowFn func() metav1.Time) (metav1.Time, error) { + if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { + return metav1.Time{Time: t}, nil + } + t, err := time.Parse(time.RFC3339, s) + if err != nil { + return metav1.Time{}, err + } + return metav1.Time{Time: t}, nil +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case NodeSelectorOpIn: + op = selection.In + case NodeSelectorOpNotIn: + op = selection.NotIn + case NodeSelectorOpExists: + op = selection.Exists + case NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case NodeSelectorOpGt: + op = selection.GreaterThan + case NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +const ( + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" +) + +// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations +// and converts it to the []Toleration type in api. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) { + var tolerations []Toleration + if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations) + if err != nil { + return tolerations, err + } + } + return tolerations, nil +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + podTolerations := pod.Spec.Tolerations + + var newTolerations []Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if Semantic.DeepEqual(toleration, podTolerations[i]) { + return false, nil + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true, nil +} + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>, +// if the two tolerations have same <key,effect,operator,value> combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// TolerationToleratesTaint checks if the toleration tolerates the taint. +func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool { + if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { + return false + } + + if toleration.Key != taint.Key { + return false + } + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value { + return true + } + if toleration.Operator == TolerationOpExists { + return true + } + return false +} + +// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. +func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool { + tolerated := false + for i := range tolerations { + if TolerationToleratesTaint(&tolerations[i], taint) { + tolerated = true + break + } + } + return tolerated +} + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in api. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { + var taints []Taint + if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) + if err != nil { + return []Taint{}, err + } + } + return taints, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +// TODO: remove when alpha support for affinity is removed +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/pkg/api/install/OWNERS b/vendor/k8s.io/client-go/pkg/api/install/OWNERS new file mode 100755 index 000000000..01d6b3702 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/install/OWNERS @@ -0,0 +1,11 @@ +reviewers: +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- liggitt +- nikhiljindal +- dims +- krousey +- david-mcmahon +- feihujiang diff --git a/vendor/k8s.io/client-go/pkg/api/install/install.go b/vendor/k8s.io/client-go/pkg/api/install/install.go new file mode 100644 index 000000000..1b9553b70 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/install/install.go @@ -0,0 +1,70 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the v1 monolithic api, making it available as an +// option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: api.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/api", + AddInternalObjectsToScheme: api.AddToScheme, + RootScopedKinds: sets.NewString( + "Node", + "Namespace", + "PersistentVolume", + "ComponentStatus", + ), + IgnoredKinds: sets.NewString( + "ListOptions", + "DeleteOptions", + "Status", + "PodLogOptions", + "PodExecOptions", + "PodAttachOptions", + "PodPortForwardOptions", + "PodProxyOptions", + "NodeProxyOptions", + "ServiceProxyOptions", + "ThirdPartyResource", + "ThirdPartyResourceData", + "ThirdPartyResourceList", + ), + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/json.go b/vendor/k8s.io/client-go/pkg/api/json.go new file mode 100644 index 000000000..3a6e04c18 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/client-go/pkg/api/ref.go b/vendor/k8s.io/client-go/pkg/api/ref.go new file mode 100644 index 000000000..370cf5513 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/ref.go @@ -0,0 +1,132 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta meta.List + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.ListAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: /<prefix>/<version>/* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 3 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + version = parts[2] + } + + // only has list metadata + if objectMeta == nil { + return &ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(scheme, obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/register.go b/vendor/k8s.io/client-go/pkg/api/register.go new file mode 100644 index 000000000..bd842b182 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/register.go @@ -0,0 +1,135 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "os" + + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) +var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) + +// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global +// API registry. +var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// NOTE: If you are copying this file to start a new api group, STOP! Copy the +// extensions group instead. This Scheme is special and should appear ONLY in +// the api group, unless you really know what you're doing. +// TODO(lavalamp): make the above error impossible. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Unversioned is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned, + &metav1.Status{}, + &metav1.APIVersions{}, + &metav1.APIGroupList{}, + &metav1.APIGroup{}, + &metav1.APIResourceList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/resource_helpers.go b/vendor/k8s.io/client-go/pkg/api/resource_helpers.go new file mode 100644 index 000000000..88d0f80d7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/resource_helpers.go @@ -0,0 +1,229 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return ContainerStatus{}, false +} + +func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status PodStatus) *PodCondition { + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == NodeReady { + return c.Status == ConditionTrue + } + } + return false +} + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { + reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} diff --git a/vendor/k8s.io/client-go/pkg/api/types.go b/vendor/k8s.io/client-go/pkg/api/types.go new file mode 100644 index 000000000..4f4236950 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/types.go @@ -0,0 +1,3822 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +type ObjectMeta struct { + // Name is unique within a namespace. Name is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // +optional + Name string + + // GenerateName indicates that the name should be made unique by the server prior to persisting + // it. A non-empty value for the field indicates the name will be made unique (and the name + // returned to the client will be different than the name passed). The value of this field will + // be combined with a unique suffix on the server if the Name field has not been provided. + // The provided value must be valid within the rules for Name, and may be truncated by the length + // of the suffix required to make the value unique on the server. + // + // If this field is specified, and Name is not present, the server will NOT return a 409 if the + // generated name exists - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // +optional + GenerateName string + + // Namespace defines the space within which name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // +optional + Namespace string + + // SelfLink is a URL representing this object. + // +optional + SelfLink string + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // +optional + UID types.UID + + // An opaque value that represents the version of this resource. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and values may only be valid for a particular + // resource or set of resources. Only servers will generate resource versions. + // +optional + ResourceVersion string + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // +optional + CreationTimestamp metav1.Time + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time + + // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion + // was requested. Represents the most recent grace period, and may only be shortened once set. + // +optional + DeletionGracePeriodSeconds *int64 + + // Labels are key value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + // The prefix is optional. If the prefix is not specified, the key is assumed to be private + // to the user. Other system components that wish to use labels must specify a prefix. The + // "kubernetes.io/" prefix is reserved for use by kubernetes components. + // +optional + Labels map[string]string + + // Annotations are unstructured key value data stored with a resource that may be set by + // external tooling. They are not queryable and should be preserved when modifying + // objects. Annotation keys have the same formatting restrictions as Label keys. See the + // comments on Labels for details. + // +optional + Annotations map[string]string + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" +) + +// +genclient=true +// +nonNamespaced=true + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + StorageClassName *string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + Path string +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Required: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string + // Required: FC target lun number + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI the the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP" and "UDP". + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. Must not contain ':'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key <topologyKey> matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple <key,value,effect> using the matching operator <operator>. +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Required: Set DNS policy. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' + // +optional + Reason string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient=true + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#overview + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP" and "UDP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // External ID of the node assigned by some machine database (e.g. a cloud provider) + // +optional + ExternalID string + + // ID of the node assigned by the cloud provider + // Note: format is "<ProviderName>://<ProviderSpecificNodeID>" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +// These are valid address types of node. NodeLegacyHostIP is used to transit +// from out-dated HostIP field to NodeAddress. +const ( + // Deprecated: NodeLegacyHostIP will be removed in 1.7. + NodeLegacyHostIP NodeAddressType = "LegacyHostIP" + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// Binding ties one object to another - for example, a pod is bound to a node by a scheduler. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// DeletionPropagation decides whether and how garbage collection will be performed. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type DeleteOptions struct { + metav1.TypeMeta + + // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // +optional + GracePeriodSeconds *int64 + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation +} + +// ListOptions is the query options to a standard REST list call, and has future support for +// watch calls. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + // If true, watch for changes to this list + Watch bool + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string + // Timeout for the list/watch call. + TimeoutSeconds *int64 +} + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient=true + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // +optional + Data map[string]string +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParamm = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool +} + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/OWNERS b/vendor/k8s.io/client-go/pkg/api/v1/OWNERS new file mode 100755 index 000000000..fdb84b24a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- yujuhong +- brendandburns +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- gmarek +- erictune +- davidopp +- pmorie +- sttts +- kargakis +- dchen1107 +- saad-ali +- zmerlynn +- luxas +- janetkuo +- justinsb +- roberthbailey +- ncdc +- timstclair +- eparis +- timothysc +- piosz +- jsafrane +- dims +- errordeveloper +- madhusudancs +- krousey +- jayunit100 +- rootfs +- markturansky diff --git a/vendor/k8s.io/client-go/pkg/api/v1/conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go new file mode 100644 index 000000000..041517819 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/conversion.go @@ -0,0 +1,785 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/extensions" +) + +// This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are +// converted the most in the cluster. +// TODO: generate one of these for every external API group - this is to prove the impact +func addFastPathConversionFuncs(scheme *runtime.Scheme) error { + scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { + switch a := objA.(type) { + case *Pod: + switch b := objB.(type) { + case *api.Pod: + return true, Convert_v1_Pod_To_api_Pod(a, b, s) + } + case *api.Pod: + switch b := objB.(type) { + case *Pod: + return true, Convert_api_Pod_To_v1_Pod(a, b, s) + } + + case *Event: + switch b := objB.(type) { + case *api.Event: + return true, Convert_v1_Event_To_api_Event(a, b, s) + } + case *api.Event: + switch b := objB.(type) { + case *Event: + return true, Convert_api_Event_To_v1_Event(a, b, s) + } + + case *ReplicationController: + switch b := objB.(type) { + case *api.ReplicationController: + return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) + } + case *api.ReplicationController: + switch b := objB.(type) { + case *ReplicationController: + return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) + } + + case *Node: + switch b := objB.(type) { + case *api.Node: + return true, Convert_v1_Node_To_api_Node(a, b, s) + } + case *api.Node: + switch b := objB.(type) { + case *Node: + return true, Convert_api_Node_To_v1_Node(a, b, s) + } + + case *Namespace: + switch b := objB.(type) { + case *api.Namespace: + return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) + } + case *api.Namespace: + switch b := objB.(type) { + case *Namespace: + return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) + } + + case *Service: + switch b := objB.(type) { + case *api.Service: + return true, Convert_v1_Service_To_api_Service(a, b, s) + } + case *api.Service: + switch b := objB.(type) { + case *Service: + return true, Convert_api_Service_To_v1_Service(a, b, s) + } + + case *Endpoints: + switch b := objB.(type) { + case *api.Endpoints: + return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) + } + case *api.Endpoints: + switch b := objB.(type) { + case *Endpoints: + return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) + } + + case *metav1.WatchEvent: + switch b := objB.(type) { + case *metav1.InternalEvent: + return true, metav1.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) + } + case *metav1.InternalEvent: + switch b := objB.(type) { + case *metav1.WatchEvent: + return true, metav1.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) + } + } + return false, nil + }) + return nil +} + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_api_Pod_To_v1_Pod, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_api_Pod, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_v1_Secret_To_api_Secret, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_v1_ResourceList_To_api_ResourceList, + Convert_v1_ReplicationController_to_extensions_ReplicaSet, + Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSet_to_v1_ReplicationController, + Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec, + Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{ + "Endpoints", + "ResourceQuota", + "PersistentVolumeClaim", + "Service", + "ServiceAccount", + "ConfigMap", + } { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("v1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }, + ) + if err != nil { + return err + } + } + + // Add field conversion funcs. + err = scheme.AddFieldLabelConversionFunc("v1", "Pod", + func(label, value string) (string, string, error) { + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "status.phase", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "Node", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "spec.unschedulable": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "status.replicas": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + if err := AddFieldLabelConversionsForEvent(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForSecret(scheme); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_to_extensions_ReplicaSet(in *ReplicationController, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *ReplicationControllerSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = *in.Replicas + if in.Selector != nil { + metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) + } + if in.Template != nil { + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + return err + } + } + return nil +} + +func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *ReplicationControllerStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_extensions_ReplicaSet_to_v1_ReplicationController(in *extensions.ReplicaSet, out *ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + fieldErr, ok := err.(*field.Error) + if !ok { + return err + } + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[api.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() + } + if err := Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *extensions.ReplicaSetSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + out.MinReadySeconds = in.MinReadySeconds + var invalidErr error + if in.Selector != nil { + invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) + } + out.Template = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + return err + } + return invalidErr +} + +func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *extensions.ReplicaSetStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + return nil +} + +func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(api.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { + return err + } + + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Status.InitContainerStatuses) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // TODO: sometime after we move init container to stable, remove these conversions. + if old := out.Annotations; old != nil { + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + } + if len(out.Spec.InitContainers) > 0 { + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) + } else { + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + } + return nil +} + +func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainersAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.InitContainers = values + + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.InitContainers { + c := &in.Spec.InitContainers[i] + SetDefaults_Container(c) + } + } + + if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + } + return nil +} + +// The following two PodSpec conversions are done here to support ServiceAccount +// as an alias for ServiceAccountName. +func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { + return err + } + + // DeprecatedServiceAccount is an alias for ServiceAccountName. + out.DeprecatedServiceAccount = in.ServiceAccountName + + if in.SecurityContext != nil { + // the host namespace fields have to be handled here for backward compatibility + // with v1.0.0 + out.HostPID = in.SecurityContext.HostPID + out.HostNetwork = in.SecurityContext.HostNetwork + out.HostIPC = in.SecurityContext.HostIPC + } + + return nil +} + +func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { + return err + } + + // We support DeprecatedServiceAccount as an alias for ServiceAccountName. + // If both are specified, ServiceAccountName (the new field) wins. + if in.ServiceAccountName == "" { + out.ServiceAccountName = in.DeprecatedServiceAccount + } + + // the host namespace fields have to be handled specially for backward compatibility + // with v1.0.0 + if out.SecurityContext == nil { + out.SecurityContext = new(api.PodSecurityContext) + } + out.SecurityContext.HostNetwork = in.HostNetwork + out.SecurityContext.HostPID = in.HostPID + out.SecurityContext.HostIPC = in.HostIPC + + return nil +} + +func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { + return err + } + + // TODO: sometime after we move init container to stable, remove these conversions + if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + if len(out.Spec.InitContainers) > 0 { + value, err := json.Marshal(out.Spec.InitContainers) + if err != nil { + return err + } + out.Annotations[PodInitContainersAnnotationKey] = string(value) + out.Annotations[PodInitContainersBetaAnnotationKey] = string(value) + } + if len(out.Status.InitContainerStatuses) > 0 { + value, err := json.Marshal(out.Status.InitContainerStatuses) + if err != nil { + return err + } + out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) + } + + return nil +} + +func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainersBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainersAnnotationKey] = valueBeta + } + // TODO: sometime after we move init container to stable, remove these conversions + // Move the annotation to the internal repr. field + if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { + var values []Container + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Spec.InitContainers = values + // Call defaulters explicitly until annotations are removed + for i := range in.Spec.InitContainers { + c := &in.Spec.InitContainers[i] + SetDefaults_Container(c) + } + } + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { + var values []ContainerStatus + if err := json.Unmarshal([]byte(value), &values); err != nil { + return err + } + // Conversion from external to internal version exists more to + // satisfy the needs of the decoder than it does to be a general + // purpose tool. And Decode always creates an intermediate object + // to decode to. Thus the caller of UnsafeConvertToVersion is + // taking responsibility to ensure mutation of in is not exposed + // back to the caller. + in.Status.InitContainerStatuses = values + } + + if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil { + return err + } + if len(out.Annotations) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)) + for k, v := range old { + out.Annotations[k] = v + } + delete(out.Annotations, PodInitContainersAnnotationKey) + delete(out.Annotations, PodInitContainersBetaAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) + } + return nil +} + +func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + if err := autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s); err != nil { + return err + } + // Publish both externalIPs and deprecatedPublicIPs fields in v1. + out.DeprecatedPublicIPs = in.ExternalIPs + return nil +} + +func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { + return err + } + + // StringData overwrites Data + if len(in.StringData) > 0 { + if out.Data == nil { + out.Data = map[string][]byte{} + } + for k, v := range in.StringData { + out.Data[k] = []byte(v) + } + } + + return nil +} + +func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { + if err := autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s); err != nil { + return err + } + // Prefer the legacy deprecatedPublicIPs field, if provided. + if len(in.DeprecatedPublicIPs) > 0 { + out.ExternalIPs = in.DeprecatedPublicIPs + } + return nil +} + +func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(SELinuxOptions) + if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(api.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error { + if *in == nil { + return nil + } + if *out == nil { + *out = make(api.ResourceList, len(*in)) + } + for key, val := range *in { + // Moved to defaults + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + // const milliScale = -3 + // val.RoundUp(milliScale) + + (*out)[api.ResourceName(key)] = val + } + return nil +} + +func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Event", + func(label, value string) (string, string, error) { + switch label { + case "involvedObject.kind", + "involvedObject.namespace", + "involvedObject.name", + "involvedObject.uid", + "involvedObject.apiVersion", + "involvedObject.resourceVersion", + "involvedObject.fieldPath", + "reason", + "source", + "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Namespace", + func(label, value string) (string, string, error) { + switch label { + case "status.phase", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc("v1", "Secret", + func(label, value string) (string, string, error) { + switch label { + case "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go new file mode 100644 index 000000000..17b0deb01 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/defaults.go @@ -0,0 +1,389 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/util" + "k8s.io/client-go/pkg/util/parsers" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_PodExecOptions, + SetDefaults_PodAttachOptions, + SetDefaults_ReplicationController, + SetDefaults_Volume, + SetDefaults_ContainerPort, + SetDefaults_Container, + SetDefaults_ServiceSpec, + SetDefaults_Pod, + SetDefaults_PodSpec, + SetDefaults_Probe, + SetDefaults_SecretVolumeSource, + SetDefaults_ConfigMapVolumeSource, + SetDefaults_DownwardAPIVolumeSource, + SetDefaults_ProjectedVolumeSource, + SetDefaults_Secret, + SetDefaults_PersistentVolume, + SetDefaults_PersistentVolumeClaim, + SetDefaults_ISCSIVolumeSource, + SetDefaults_Endpoints, + SetDefaults_HTTPGetAction, + SetDefaults_NamespaceStatus, + SetDefaults_Node, + SetDefaults_NodeStatus, + SetDefaults_ObjectFieldSelector, + SetDefaults_LimitRangeItem, + SetDefaults_ConfigMap, + SetDefaults_RBDVolumeSource, + SetDefaults_ResourceList, + ) +} + +func SetDefaults_ResourceList(obj *ResourceList) { + for key, val := range *obj { + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + const milliScale = -3 + val.RoundUp(milliScale) + + (*obj)[ResourceName(key)] = val + } +} + +func SetDefaults_PodExecOptions(obj *PodExecOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_PodAttachOptions(obj *PodAttachOptions) { + obj.Stdout = true + obj.Stderr = true +} +func SetDefaults_ReplicationController(obj *ReplicationController) { + var labels map[string]string + if obj.Spec.Template != nil { + labels = obj.Spec.Template.Labels + } + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if len(obj.Spec.Selector) == 0 { + obj.Spec.Selector = labels + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} +func SetDefaults_Volume(obj *Volume) { + if util.AllPtrFieldsNil(&obj.VolumeSource) { + obj.VolumeSource = VolumeSource{ + EmptyDir: &EmptyDirVolumeSource{}, + } + } +} +func SetDefaults_ContainerPort(obj *ContainerPort) { + if obj.Protocol == "" { + obj.Protocol = ProtocolTCP + } +} +func SetDefaults_Container(obj *Container) { + if obj.ImagePullPolicy == "" { + // Ignore error and assume it has been validated elsewhere + _, tag, _, _ := parsers.ParseImageName(obj.Image) + + // Check image tag + if tag == "latest" { + obj.ImagePullPolicy = PullAlways + } else { + obj.ImagePullPolicy = PullIfNotPresent + } + } + if obj.TerminationMessagePath == "" { + obj.TerminationMessagePath = TerminationMessagePathDefault + } + if obj.TerminationMessagePolicy == "" { + obj.TerminationMessagePolicy = TerminationMessageReadFile + } +} +func SetDefaults_ServiceSpec(obj *ServiceSpec) { + if obj.SessionAffinity == "" { + obj.SessionAffinity = ServiceAffinityNone + } + if obj.Type == "" { + obj.Type = ServiceTypeClusterIP + } + for i := range obj.Ports { + sp := &obj.Ports[i] + if sp.Protocol == "" { + sp.Protocol = ProtocolTCP + } + if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt(int(sp.Port)) + } + } +} +func SetDefaults_Pod(obj *Pod) { + // If limits are specified, but requests are not, default requests to limits + // This is done here rather than a more specific defaulting pass on ResourceRequirements + // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate + for i := range obj.Spec.Containers { + // set requests to limits if requests are not specified, but limits are + if obj.Spec.Containers[i].Resources.Limits != nil { + if obj.Spec.Containers[i].Resources.Requests == nil { + obj.Spec.Containers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.Containers[i].Resources.Limits { + if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { + obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } + for i := range obj.Spec.InitContainers { + if obj.Spec.InitContainers[i].Resources.Limits != nil { + if obj.Spec.InitContainers[i].Resources.Requests == nil { + obj.Spec.InitContainers[i].Resources.Requests = make(ResourceList) + } + for key, value := range obj.Spec.InitContainers[i].Resources.Limits { + if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { + obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } +} +func SetDefaults_PodSpec(obj *PodSpec) { + if obj.DNSPolicy == "" { + obj.DNSPolicy = DNSClusterFirst + } + if obj.RestartPolicy == "" { + obj.RestartPolicy = RestartPolicyAlways + } + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) + } + if obj.SecurityContext == nil { + obj.SecurityContext = &PodSecurityContext{} + } + if obj.TerminationGracePeriodSeconds == nil { + period := int64(DefaultTerminationGracePeriodSeconds) + obj.TerminationGracePeriodSeconds = &period + } + if obj.SchedulerName == "" { + obj.SchedulerName = DefaultSchedulerName + } +} +func SetDefaults_Probe(obj *Probe) { + if obj.TimeoutSeconds == 0 { + obj.TimeoutSeconds = 1 + } + if obj.PeriodSeconds == 0 { + obj.PeriodSeconds = 10 + } + if obj.SuccessThreshold == 0 { + obj.SuccessThreshold = 1 + } + if obj.FailureThreshold == 0 { + obj.FailureThreshold = 3 + } +} +func SetDefaults_SecretVolumeSource(obj *SecretVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(SecretVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_ConfigMapVolumeSource(obj *ConfigMapVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ConfigMapVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_DownwardAPIVolumeSource(obj *DownwardAPIVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(DownwardAPIVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_Secret(obj *Secret) { + if obj.Type == "" { + obj.Type = SecretTypeOpaque + } +} +func SetDefaults_ProjectedVolumeSource(obj *ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_PersistentVolume(obj *PersistentVolume) { + if obj.Status.Phase == "" { + obj.Status.Phase = VolumePending + } + if obj.Spec.PersistentVolumeReclaimPolicy == "" { + obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain + } +} +func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) { + if obj.Status.Phase == "" { + obj.Status.Phase = ClaimPending + } +} +func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_AzureDiskVolumeSource(obj *AzureDiskVolumeSource) { + if obj.CachingMode == nil { + obj.CachingMode = new(AzureDataDiskCachingMode) + *obj.CachingMode = AzureDataDiskCachingNone + } + if obj.FSType == nil { + obj.FSType = new(string) + *obj.FSType = "ext4" + } + if obj.ReadOnly == nil { + obj.ReadOnly = new(bool) + *obj.ReadOnly = false + } +} +func SetDefaults_Endpoints(obj *Endpoints) { + for i := range obj.Subsets { + ss := &obj.Subsets[i] + for i := range ss.Ports { + ep := &ss.Ports[i] + if ep.Protocol == "" { + ep.Protocol = ProtocolTCP + } + } + } +} +func SetDefaults_HTTPGetAction(obj *HTTPGetAction) { + if obj.Path == "" { + obj.Path = "/" + } + if obj.Scheme == "" { + obj.Scheme = URISchemeHTTP + } +} +func SetDefaults_NamespaceStatus(obj *NamespaceStatus) { + if obj.Phase == "" { + obj.Phase = NamespaceActive + } +} +func SetDefaults_Node(obj *Node) { + if obj.Spec.ExternalID == "" { + obj.Spec.ExternalID = obj.Name + } +} +func SetDefaults_NodeStatus(obj *NodeStatus) { + if obj.Allocatable == nil && obj.Capacity != nil { + obj.Allocatable = make(ResourceList, len(obj.Capacity)) + for key, value := range obj.Capacity { + obj.Allocatable[key] = *(value.Copy()) + } + obj.Allocatable = obj.Capacity + } +} +func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) { + if obj.APIVersion == "" { + obj.APIVersion = "v1" + } +} +func SetDefaults_LimitRangeItem(obj *LimitRangeItem) { + // for container limits, we apply default values + if obj.Type == LimitTypeContainer { + + if obj.Default == nil { + obj.Default = make(ResourceList) + } + if obj.DefaultRequest == nil { + obj.DefaultRequest = make(ResourceList) + } + + // If a default limit is unspecified, but the max is specified, default the limit to the max + for key, value := range obj.Max { + if _, exists := obj.Default[key]; !exists { + obj.Default[key] = *(value.Copy()) + } + } + // If a default limit is specified, but the default request is not, default request to limit + for key, value := range obj.Default { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + // If a default request is not specified, but the min is provided, default request to the min + for key, value := range obj.Min { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + } +} +func SetDefaults_ConfigMap(obj *ConfigMap) { + if obj.Data == nil { + obj.Data = make(map[string]string) + } +} + +// With host networking default all container ports to host ports. +func defaultHostNetworkPorts(containers *[]Container) { + for i := range *containers { + for j := range (*containers)[i].Ports { + if (*containers)[i].Ports[j].HostPort == 0 { + (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort + } + } + } +} + +func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_ScaleIOVolumeSource(obj *ScaleIOVolumeSource) { + if obj.ProtectionDomain == "" { + obj.ProtectionDomain = "default" + } + if obj.StoragePool == "" { + obj.StoragePool = "default" + } + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/doc.go b/vendor/k8s.io/client-go/pkg/api/v1/doc.go new file mode 100644 index 000000000..0fdd87f75 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generate.go b/vendor/k8s.io/client-go/pkg/api/v1/generate.go new file mode 100644 index 000000000..b8c44e4c7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generate.go @@ -0,0 +1,64 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + utilrand "k8s.io/apimachinery/pkg/util/rand" +) + +// NameGenerator generates names for objects. Some backends may have more information +// available to guide selection of new names and this interface hides those details. +type NameGenerator interface { + // GenerateName generates a valid name from the base name, adding a random suffix to the + // the base. If base is valid, the returned name must also be valid. The generator is + // responsible for knowing the maximum valid name length. + GenerateName(base string) string +} + +// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if +// necessary. It expects that validation for ObjectMeta has already completed (that Base is a +// valid name) and that the NameGenerator generates a name that is also valid. +func GenerateName(u NameGenerator, meta *ObjectMeta) { + if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { + return + } + meta.Name = u.GenerateName(meta.GenerateName) +} + +// simpleNameGenerator generates random names. +type simpleNameGenerator struct{} + +// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics +// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes +// name (63 characters) +var SimpleNameGenerator NameGenerator = simpleNameGenerator{} + +const ( + // TODO: make this flexible for non-core resources with alternate naming rules. + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +func (simpleNameGenerator) GenerateName(base string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go new file mode 100644 index 000000000..20f632dc9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generated.pb.go @@ -0,0 +1,43238 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/api/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/api/v1/generated.proto + + It has these top-level messages: + AWSElasticBlockStoreVolumeSource + Affinity + AttachedVolume + AvoidPods + AzureDiskVolumeSource + AzureFileVolumeSource + Binding + Capabilities + CephFSVolumeSource + CinderVolumeSource + ComponentCondition + ComponentStatus + ComponentStatusList + ConfigMap + ConfigMapEnvSource + ConfigMapKeySelector + ConfigMapList + ConfigMapProjection + ConfigMapVolumeSource + Container + ContainerImage + ContainerPort + ContainerState + ContainerStateRunning + ContainerStateTerminated + ContainerStateWaiting + ContainerStatus + DaemonEndpoint + DeleteOptions + DownwardAPIProjection + DownwardAPIVolumeFile + DownwardAPIVolumeSource + EmptyDirVolumeSource + EndpointAddress + EndpointPort + EndpointSubset + Endpoints + EndpointsList + EnvFromSource + EnvVar + EnvVarSource + Event + EventList + EventSource + ExecAction + FCVolumeSource + FlexVolumeSource + FlockerVolumeSource + GCEPersistentDiskVolumeSource + GitRepoVolumeSource + GlusterfsVolumeSource + HTTPGetAction + HTTPHeader + Handler + HostPathVolumeSource + ISCSIVolumeSource + KeyToPath + Lifecycle + LimitRange + LimitRangeItem + LimitRangeList + LimitRangeSpec + List + ListOptions + LoadBalancerIngress + LoadBalancerStatus + LocalObjectReference + NFSVolumeSource + Namespace + NamespaceList + NamespaceSpec + NamespaceStatus + Node + NodeAddress + NodeAffinity + NodeCondition + NodeDaemonEndpoints + NodeList + NodeProxyOptions + NodeResources + NodeSelector + NodeSelectorRequirement + NodeSelectorTerm + NodeSpec + NodeStatus + NodeSystemInfo + ObjectFieldSelector + ObjectMeta + ObjectReference + PersistentVolume + PersistentVolumeClaim + PersistentVolumeClaimList + PersistentVolumeClaimSpec + PersistentVolumeClaimStatus + PersistentVolumeClaimVolumeSource + PersistentVolumeList + PersistentVolumeSource + PersistentVolumeSpec + PersistentVolumeStatus + PhotonPersistentDiskVolumeSource + Pod + PodAffinity + PodAffinityTerm + PodAntiAffinity + PodAttachOptions + PodCondition + PodExecOptions + PodList + PodLogOptions + PodPortForwardOptions + PodProxyOptions + PodSecurityContext + PodSignature + PodSpec + PodStatus + PodStatusResult + PodTemplate + PodTemplateList + PodTemplateSpec + PortworxVolumeSource + Preconditions + PreferAvoidPodsEntry + PreferredSchedulingTerm + Probe + ProjectedVolumeSource + QuobyteVolumeSource + RBDVolumeSource + RangeAllocation + ReplicationController + ReplicationControllerCondition + ReplicationControllerList + ReplicationControllerSpec + ReplicationControllerStatus + ResourceFieldSelector + ResourceQuota + ResourceQuotaList + ResourceQuotaSpec + ResourceQuotaStatus + ResourceRequirements + SELinuxOptions + ScaleIOVolumeSource + Secret + SecretEnvSource + SecretKeySelector + SecretList + SecretProjection + SecretVolumeSource + SecurityContext + SerializedReference + Service + ServiceAccount + ServiceAccountList + ServiceList + ServicePort + ServiceProxyOptions + ServiceSpec + ServiceStatus + Sysctl + TCPSocketAction + Taint + Toleration + Volume + VolumeMount + VolumeProjection + VolumeSource + VsphereVirtualDiskVolumeSource + WeightedPodAffinityTerm +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" + +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } +func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} +func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{24} +} + +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{31} +} + +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } + +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } + +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } + +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } + +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } + +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } + +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} +} + +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } + +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } + +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } + +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } + +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } + +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } + +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } + +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } + +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } + +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } + +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } + +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } + +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } + +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } + +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } + +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } + +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } + +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } + +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{81} +} + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{91} +} + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{92} +} + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{93} +} + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{94} +} + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{99} +} + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{101} } + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{103} } + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{122} +} + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{129} +} + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{130} +} + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{131} +} + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{132} +} + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{165} +} + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{166} +} + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.client-go.pkg.api.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.client-go.pkg.api.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.client-go.pkg.api.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.client-go.pkg.api.v1.Binding") + proto.RegisterType((*Capabilities)(nil), "k8s.io.client-go.pkg.api.v1.Capabilities") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CephFSVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.CinderVolumeSource") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.client-go.pkg.api.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.client-go.pkg.api.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapList") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.client-go.pkg.api.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.client-go.pkg.api.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.client-go.pkg.api.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.client-go.pkg.api.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.client-go.pkg.api.v1.DaemonEndpoint") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.client-go.pkg.api.v1.DeleteOptions") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.client-go.pkg.api.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.client-go.pkg.api.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.client-go.pkg.api.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.client-go.pkg.api.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.client-go.pkg.api.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.client-go.pkg.api.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.client-go.pkg.api.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.client-go.pkg.api.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.client-go.pkg.api.v1.EventList") + proto.RegisterType((*EventSource)(nil), "k8s.io.client-go.pkg.api.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.client-go.pkg.api.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FCVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.client-go.pkg.api.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.client-go.pkg.api.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.client-go.pkg.api.v1.Handler") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.client-go.pkg.api.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.client-go.pkg.api.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.client-go.pkg.api.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.client-go.pkg.api.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.client-go.pkg.api.v1.List") + proto.RegisterType((*ListOptions)(nil), "k8s.io.client-go.pkg.api.v1.ListOptions") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.client-go.pkg.api.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.LocalObjectReference") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.client-go.pkg.api.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.client-go.pkg.api.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.client-go.pkg.api.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.client-go.pkg.api.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.client-go.pkg.api.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.client-go.pkg.api.v1.NodeCondition") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.client-go.pkg.api.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.client-go.pkg.api.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.client-go.pkg.api.v1.NodeResources") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.client-go.pkg.api.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.client-go.pkg.api.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.client-go.pkg.api.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.client-go.pkg.api.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.client-go.pkg.api.v1.ObjectMeta") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.client-go.pkg.api.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.client-go.pkg.api.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.client-go.pkg.api.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.client-go.pkg.api.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.client-go.pkg.api.v1.PodCondition") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.client-go.pkg.api.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.PodProxyOptions") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.client-go.pkg.api.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.client-go.pkg.api.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.client-go.pkg.api.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.client-go.pkg.api.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.client-go.pkg.api.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.client-go.pkg.api.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.client-go.pkg.api.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.client-go.pkg.api.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.client-go.pkg.api.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.client-go.pkg.api.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.client-go.pkg.api.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.client-go.pkg.api.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.client-go.pkg.api.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.client-go.pkg.api.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.ScaleIOVolumeSource") + proto.RegisterType((*Secret)(nil), "k8s.io.client-go.pkg.api.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.client-go.pkg.api.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.client-go.pkg.api.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.client-go.pkg.api.v1.SecretProjection") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.client-go.pkg.api.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.client-go.pkg.api.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.client-go.pkg.api.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceAccountList") + proto.RegisterType((*ServiceList)(nil), "k8s.io.client-go.pkg.api.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.client-go.pkg.api.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.client-go.pkg.api.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.client-go.pkg.api.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.client-go.pkg.api.v1.ServiceStatus") + proto.RegisterType((*Sysctl)(nil), "k8s.io.client-go.pkg.api.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.client-go.pkg.api.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.client-go.pkg.api.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.client-go.pkg.api.v1.Toleration") + proto.RegisterType((*Volume)(nil), "k8s.io.client-go.pkg.api.v1.Volume") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.client-go.pkg.api.v1.VolumeMount") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.client-go.pkg.api.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.client-go.pkg.api.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.client-go.pkg.api.v1.WeightedPodAffinityTerm") +} +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Partition)) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Affinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Affinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeAffinity != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeAffinity.Size())) + n1, err := m.NodeAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.PodAffinity != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.PodAntiAffinity != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *AttachedVolume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AttachedVolume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DevicePath))) + i += copy(data[i:], m.DevicePath) + return i, nil +} + +func (m *AvoidPods) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AvoidPods) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, msg := range m.PreferAvoidPods { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AzureDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AzureDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DiskName))) + i += copy(data[i:], m.DiskName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DataDiskURI))) + i += copy(data[i:], m.DataDiskURI) + if m.CachingMode != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.CachingMode))) + i += copy(data[i:], *m.CachingMode) + } + if m.FSType != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.FSType))) + i += copy(data[i:], *m.FSType) + } + if m.ReadOnly != nil { + data[i] = 0x28 + i++ + if *m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *AzureFileVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ShareName))) + i += copy(data[i:], m.ShareName) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Binding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Binding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n5, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *Capabilities) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Capabilities) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *CephFSVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretFile))) + i += copy(data[i:], m.SecretFile) + if m.SecretRef != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n6, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x30 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *CinderVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CinderVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ComponentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *ComponentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ComponentStatusList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMap) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMap) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Data) > 0 { + for k := range m.Data { + data[i] = 0x12 + i++ + v := m.Data[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *ConfigMapEnvSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapEnvSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n10, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Optional != nil { + data[i] = 0x10 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n11, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + if m.Optional != nil { + data[i] = 0x18 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ConfigMapProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n13, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n14, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Container) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Container) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Image))) + i += copy(data[i:], m.Image) + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.WorkingDir))) + i += copy(data[i:], m.WorkingDir) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n15, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LivenessProbe != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) + n16, err := m.LivenessProbe.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.ReadinessProbe != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) + n17, err := m.ReadinessProbe.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.Lifecycle != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) + n18, err := m.Lifecycle.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePath))) + i += copy(data[i:], m.TerminationMessagePath) + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImagePullPolicy))) + i += copy(data[i:], m.ImagePullPolicy) + if m.SecurityContext != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) + n19, err := m.SecurityContext.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + data[i] = 0x80 + i++ + data[i] = 0x1 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x88 + i++ + data[i] = 0x1 + i++ + if m.StdinOnce { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x90 + i++ + data[i] = 0x1 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePolicy))) + i += copy(data[i:], m.TerminationMessagePolicy) + return i, nil +} + +func (m *ContainerImage) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerImage) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SizeBytes)) + return i, nil +} + +func (m *ContainerPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPort)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ContainerPort)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) + i += copy(data[i:], m.HostIP) + return i, nil +} + +func (m *ContainerState) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerState) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Waiting != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) + n20, err := m.Waiting.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.Running != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) + n21, err := m.Running.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Terminated != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) + n22, err := m.Terminated.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ContainerStateRunning) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) + n23, err := m.StartedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *ContainerStateTerminated) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExitCode)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Signal)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) + n24, err := m.StartedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) + n25, err := m.FinishedAt.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) + i += copy(data[i:], m.ContainerID) + return i, nil +} + +func (m *ContainerStateWaiting) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStateWaiting) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ContainerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.State.Size())) + n26, err := m.State.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) + n27, err := m.LastTerminationState.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + data[i] = 0x20 + i++ + if m.Ready { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RestartCount)) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Image))) + i += copy(data[i:], m.Image) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ImageID))) + i += copy(data[i:], m.ImageID) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) + i += copy(data[i:], m.ContainerID) + return i, nil +} + +func (m *DaemonEndpoint) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonEndpoint) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + return i, nil +} + +func (m *DeleteOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GracePeriodSeconds != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) + n28, err := m.Preconditions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.OrphanDependents != nil { + data[i] = 0x18 + i++ + if *m.OrphanDependents { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.PropagationPolicy != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.PropagationPolicy))) + i += copy(data[i:], *m.PropagationPolicy) + } + return i, nil +} + +func (m *DownwardAPIProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DownwardAPIVolumeFile) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + if m.FieldRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) + n29, err := m.FieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + } + if m.ResourceFieldRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) + n30, err := m.ResourceFieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Mode != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *DownwardAPIVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + return i, nil +} + +func (m *EmptyDirVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EmptyDirVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Medium))) + i += copy(data[i:], m.Medium) + return i, nil +} + +func (m *EndpointAddress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IP))) + i += copy(data[i:], m.IP) + if m.TargetRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) + n31, err := m.TargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + if m.NodeName != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.NodeName))) + i += copy(data[i:], *m.NodeName) + } + return i, nil +} + +func (m *EndpointPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + return i, nil +} + +func (m *EndpointSubset) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointSubset) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NotReadyAddresses) > 0 { + for _, msg := range m.NotReadyAddresses { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoints) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Endpoints) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n32, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + if len(m.Subsets) > 0 { + for _, msg := range m.Subsets { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EndpointsList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EndpointsList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n33, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EnvFromSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvFromSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Prefix))) + i += copy(data[i:], m.Prefix) + if m.ConfigMapRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMapRef.Size())) + n34, err := m.ConfigMapRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n35, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *EnvVar) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvVar) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + if m.ValueFrom != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) + n36, err := m.ValueFrom.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *EnvVarSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldRef != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) + n37, err := m.FieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + } + if m.ResourceFieldRef != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) + n38, err := m.ResourceFieldRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.ConfigMapKeyRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) + n39, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if m.SecretKeyRef != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) + n40, err := m.SecretKeyRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *Event) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Event) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n41, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) + n42, err := m.InvolvedObject.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) + n43, err := m.Source.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) + n44, err := m.FirstTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) + n45, err := m.LastTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Count)) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + return i, nil +} + +func (m *EventList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EventList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n46, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *EventSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Component))) + i += copy(data[i:], m.Component) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + return i, nil +} + +func (m *ExecAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ExecAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *FCVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FCVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.Lun != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Lun)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *FlexVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Driver))) + i += copy(data[i:], m.Driver) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n47, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + } + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Options) > 0 { + for k := range m.Options { + data[i] = 0x2a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *FlockerVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FlockerVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DatasetName))) + i += copy(data[i:], m.DatasetName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DatasetUUID))) + i += copy(data[i:], m.DatasetUUID) + return i, nil +} + +func (m *GCEPersistentDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GCEPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PDName))) + i += copy(data[i:], m.PDName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Partition)) + data[i] = 0x20 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *GitRepoVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GitRepoVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Repository))) + i += copy(data[i:], m.Repository) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Revision))) + i += copy(data[i:], m.Revision) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Directory))) + i += copy(data[i:], m.Directory) + return i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EndpointsName))) + i += copy(data[i:], m.EndpointsName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *HTTPGetAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n48, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Scheme))) + i += copy(data[i:], m.Scheme) + if len(m.HTTPHeaders) > 0 { + for _, msg := range m.HTTPHeaders { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPHeader) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPHeader) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + return i, nil +} + +func (m *Handler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Handler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Exec != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) + n49, err := m.Exec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.HTTPGet != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) + n50, err := m.HTTPGet.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + } + if m.TCPSocket != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) + n51, err := m.TCPSocket.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + } + return i, nil +} + +func (m *HostPathVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPathVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *ISCSIVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetPortal))) + i += copy(data[i:], m.TargetPortal) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IQN))) + i += copy(data[i:], m.IQN) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Lun)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ISCSIInterface))) + i += copy(data[i:], m.ISCSIInterface) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x30 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + data[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *KeyToPath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *KeyToPath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + if m.Mode != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *Lifecycle) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Lifecycle) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PostStart != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) + n52, err := m.PostStart.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + } + if m.PreStop != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) + n53, err := m.PreStop.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + } + return i, nil +} + +func (m *LimitRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n54, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n55, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + return i, nil +} + +func (m *LimitRangeItem) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.Max) > 0 { + for k := range m.Max { + data[i] = 0x12 + i++ + v := m.Max[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n56, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + } + } + if len(m.Min) > 0 { + for k := range m.Min { + data[i] = 0x1a + i++ + v := m.Min[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n57, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + } + } + if len(m.Default) > 0 { + for k := range m.Default { + data[i] = 0x22 + i++ + v := m.Default[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n58, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + } + } + if len(m.DefaultRequest) > 0 { + for k := range m.DefaultRequest { + data[i] = 0x2a + i++ + v := m.DefaultRequest[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n59, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k := range m.MaxLimitRequestRatio { + data[i] = 0x32 + i++ + v := m.MaxLimitRequestRatio[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n60, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + } + } + return i, nil +} + +func (m *LimitRangeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n61, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LimitRangeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LimitRangeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *List) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *List) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n62, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ListOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) + i += copy(data[i:], m.LabelSelector) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) + i += copy(data[i:], m.FieldSelector) + data[i] = 0x18 + i++ + if m.Watch { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) + } + return i, nil +} + +func (m *LoadBalancerIngress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LoadBalancerIngress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.IP))) + i += copy(data[i:], m.IP) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + return i, nil +} + +func (m *LoadBalancerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LoadBalancerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *LocalObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *NFSVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NFSVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Server))) + i += copy(data[i:], m.Server) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Namespace) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Namespace) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n63, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n63 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n64, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n64 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n65, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n65 + return i, nil +} + +func (m *NamespaceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n66, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n66 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamespaceSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *NamespaceStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NamespaceStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + return i, nil +} + +func (m *Node) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Node) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n67, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n67 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n68, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n68 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n69, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n69 + return i, nil +} + +func (m *NodeAddress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeAddress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Address))) + i += copy(data[i:], m.Address) + return i, nil +} + +func (m *NodeAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) + n70, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n70 + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) + n71, err := m.LastHeartbeatTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n71 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n72, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n72 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *NodeDaemonEndpoints) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) + n73, err := m.KubeletEndpoint.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n73 + return i, nil +} + +func (m *NodeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n74, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n74 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *NodeResources) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeResources) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n75, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n75 + } + } + return i, nil +} + +func (m *NodeSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, msg := range m.NodeSelectorTerms { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSelectorRequirement) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelectorRequirement) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *NodeSelectorTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PodCIDR))) + i += copy(data[i:], m.PodCIDR) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ExternalID))) + i += copy(data[i:], m.ExternalID) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProviderID))) + i += copy(data[i:], m.ProviderID) + data[i] = 0x20 + i++ + if m.Unschedulable { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.Taints) > 0 { + for _, msg := range m.Taints { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n76, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n76 + } + } + if len(m.Allocatable) > 0 { + for k := range m.Allocatable { + data[i] = 0x12 + i++ + v := m.Allocatable[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n77, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n77 + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) + n78, err := m.DaemonEndpoints.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n78 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) + n79, err := m.NodeInfo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n79 + if len(m.Images) > 0 { + for _, msg := range m.Images { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + data[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.VolumesAttached) > 0 { + for _, msg := range m.VolumesAttached { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeSystemInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NodeSystemInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MachineID))) + i += copy(data[i:], m.MachineID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SystemUUID))) + i += copy(data[i:], m.SystemUUID) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.BootID))) + i += copy(data[i:], m.BootID) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KernelVersion))) + i += copy(data[i:], m.KernelVersion) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OSImage))) + i += copy(data[i:], m.OSImage) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerRuntimeVersion))) + i += copy(data[i:], m.ContainerRuntimeVersion) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KubeletVersion))) + i += copy(data[i:], m.KubeletVersion) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.KubeProxyVersion))) + i += copy(data[i:], m.KubeProxyVersion) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.OperatingSystem))) + i += copy(data[i:], m.OperatingSystem) + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Architecture))) + i += copy(data[i:], m.Architecture) + return i, nil +} + +func (m *ObjectFieldSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectFieldSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) + i += copy(data[i:], m.FieldPath) + return i, nil +} + +func (m *ObjectMeta) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) + i += copy(data[i:], m.GenerateName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) + i += copy(data[i:], m.SelfLink) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Generation)) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) + n80, err := m.CreationTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n80 + if m.DeletionTimestamp != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) + n81, err := m.DeletionTimestamp.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n81 + } + if m.DeletionGracePeriodSeconds != nil { + data[i] = 0x50 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + data[i] = 0x5a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + data[i] = 0x62 + i++ + v := m.Annotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + data[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterName))) + i += copy(data[i:], m.ClusterName) + return i, nil +} + +func (m *ObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) + i += copy(data[i:], m.ResourceVersion) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) + i += copy(data[i:], m.FieldPath) + return i, nil +} + +func (m *PersistentVolume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n82, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n82 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n83, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n83 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n84, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n84 + return i, nil +} + +func (m *PersistentVolumeClaim) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n85, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n85 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n86, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n86 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n87, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n87 + return i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n88, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n88 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) + n89, err := m.Resources.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n89 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) + i += copy(data[i:], m.VolumeName) + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n90, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n90 + } + if m.StorageClassName != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.StorageClassName))) + i += copy(data[i:], *m.StorageClassName) + } + return i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0x1a + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n91, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n91 + } + } + return i, nil +} + +func (m *PersistentVolumeClaimVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClaimName))) + i += copy(data[i:], m.ClaimName) + data[i] = 0x10 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *PersistentVolumeList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n92, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n92 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PersistentVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GCEPersistentDisk != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) + n93, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n93 + } + if m.AWSElasticBlockStore != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) + n94, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n94 + } + if m.HostPath != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) + n95, err := m.HostPath.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n95 + } + if m.Glusterfs != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) + n96, err := m.Glusterfs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n96 + } + if m.NFS != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) + n97, err := m.NFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n97 + } + if m.RBD != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) + n98, err := m.RBD.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n98 + } + if m.ISCSI != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) + n99, err := m.ISCSI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n99 + } + if m.Cinder != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) + n100, err := m.Cinder.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n100 + } + if m.CephFS != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) + n101, err := m.CephFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n101 + } + if m.FC != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) + n102, err := m.FC.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n102 + } + if m.Flocker != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) + n103, err := m.Flocker.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n103 + } + if m.FlexVolume != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) + n104, err := m.FlexVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n104 + } + if m.AzureFile != nil { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) + n105, err := m.AzureFile.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n105 + } + if m.VsphereVolume != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) + n106, err := m.VsphereVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n106 + } + if m.Quobyte != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) + n107, err := m.Quobyte.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n107 + } + if m.AzureDisk != nil { + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) + n108, err := m.AzureDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n108 + } + if m.PhotonPersistentDisk != nil { + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) + n109, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n109 + } + if m.PortworxVolume != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) + n110, err := m.PortworxVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n110 + } + if m.ScaleIO != nil { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) + n111, err := m.ScaleIO.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n111 + } + return i, nil +} + +func (m *PersistentVolumeSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + for k := range m.Capacity { + data[i] = 0xa + i++ + v := m.Capacity[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n112, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n112 + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) + n113, err := m.PersistentVolumeSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n113 + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if m.ClaimRef != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) + n114, err := m.ClaimRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n114 + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i += copy(data[i:], m.PersistentVolumeReclaimPolicy) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StorageClassName))) + i += copy(data[i:], m.StorageClassName) + return i, nil +} + +func (m *PersistentVolumeStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PersistentVolumeStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + return i, nil +} + +func (m *PhotonPersistentDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PdID))) + i += copy(data[i:], m.PdID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + return i, nil +} + +func (m *Pod) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Pod) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n115, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n115 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n116, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n116 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n117, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n117 + return i, nil +} + +func (m *PodAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LabelSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) + n118, err := m.LabelSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n118 + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey))) + i += copy(data[i:], m.TopologyKey) + return i, nil +} + +func (m *PodAntiAffinity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodAttachOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + return i, nil +} + +func (m *PodCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n119, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n119 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n120, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n120 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *PodExecOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodExecOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Stdin { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x10 + i++ + if m.Stdout { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Stderr { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x20 + i++ + if m.TTY { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + if len(m.Command) > 0 { + for _, s := range m.Command { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PodList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n121, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n121 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodLogOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Container))) + i += copy(data[i:], m.Container) + data[i] = 0x10 + i++ + if m.Follow { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x18 + i++ + if m.Previous { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SinceSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) + n122, err := m.SinceTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n122 + } + data[i] = 0x30 + i++ + if m.Timestamps { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.TailLines != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes)) + } + return i, nil +} + +func (m *PodPortForwardOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPortForwardOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, num := range m.Ports { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(num)) + } + } + return i, nil +} + +func (m *PodProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *PodSecurityContext) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n123, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n123 + } + if m.RunAsUser != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + data[i] = 0x18 + i++ + if *m.RunAsNonRoot { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if len(m.SupplementalGroups) > 0 { + for _, num := range m.SupplementalGroups { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(num)) + } + } + if m.FSGroup != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.FSGroup)) + } + return i, nil +} + +func (m *PodSignature) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSignature) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodController != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodController.Size())) + n124, err := m.PodController.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n124 + } + return i, nil +} + +func (m *PodSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RestartPolicy))) + i += copy(data[i:], m.RestartPolicy) + if m.TerminationGracePeriodSeconds != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DNSPolicy))) + i += copy(data[i:], m.DNSPolicy) + if len(m.NodeSelector) > 0 { + for k := range m.NodeSelector { + data[i] = 0x3a + i++ + v := m.NodeSelector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccountName))) + i += copy(data[i:], m.ServiceAccountName) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(data[i:], m.DeprecatedServiceAccount) + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.NodeName))) + i += copy(data[i:], m.NodeName) + data[i] = 0x58 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x60 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x68 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.SecurityContext != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) + n125, err := m.SecurityContext.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n125 + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) + i += copy(data[i:], m.Hostname) + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain))) + i += copy(data[i:], m.Subdomain) + if m.Affinity != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Affinity.Size())) + n126, err := m.Affinity.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n126 + } + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SchedulerName))) + i += copy(data[i:], m.SchedulerName) + if len(m.InitContainers) > 0 { + for _, msg := range m.InitContainers { + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + data[i] = 0xa8 + i++ + data[i] = 0x1 + i++ + if *m.AutomountServiceAccountToken { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + data[i] = 0xb2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) + i += copy(data[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) + i += copy(data[i:], m.HostIP) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.PodIP))) + i += copy(data[i:], m.PodIP) + if m.StartTime != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n127, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n127 + } + if len(m.ContainerStatuses) > 0 { + for _, msg := range m.ContainerStatuses { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.QOSClass))) + i += copy(data[i:], m.QOSClass) + if len(m.InitContainerStatuses) > 0 { + for _, msg := range m.InitContainerStatuses { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodStatusResult) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n128, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n128 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n129, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n129 + return i, nil +} + +func (m *PodTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n130, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n130 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n131, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n131 + return i, nil +} + +func (m *PodTemplateList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n132, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n132 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n133, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n133 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n134, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n134 + return i, nil +} + +func (m *PortworxVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PortworxVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) + i += copy(data[i:], m.VolumeID) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Preconditions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Preconditions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) + i += copy(data[i:], *m.UID) + } + return i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PreferAvoidPodsEntry) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSignature.Size())) + n135, err := m.PodSignature.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n135 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.EvictionTime.Size())) + n136, err := m.EvictionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n136 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *PreferredSchedulingTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) + n137, err := m.Preference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n137 + return i, nil +} + +func (m *Probe) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Probe) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) + n138, err := m.Handler.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n138 + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TimeoutSeconds)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PeriodSeconds)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SuccessThreshold)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FailureThreshold)) + return i, nil +} + +func (m *ProjectedVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ProjectedVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Sources) > 0 { + for _, msg := range m.Sources { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + return i, nil +} + +func (m *QuobyteVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *QuobyteVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Registry))) + i += copy(data[i:], m.Registry) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Volume))) + i += copy(data[i:], m.Volume) + data[i] = 0x18 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + return i, nil +} + +func (m *RBDVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RBDImage))) + i += copy(data[i:], m.RBDImage) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RBDPool))) + i += copy(data[i:], m.RBDPool) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.RadosUser))) + i += copy(data[i:], m.RadosUser) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Keyring))) + i += copy(data[i:], m.Keyring) + if m.SecretRef != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n139, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n139 + } + data[i] = 0x40 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *RangeAllocation) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n140, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n140 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Range))) + i += copy(data[i:], m.Range) + if m.Data != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + return i, nil +} + +func (m *ReplicationController) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationController) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n141, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n141 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n142, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n142 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n143, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n143 + return i, nil +} + +func (m *ReplicationControllerCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n144, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n144 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ReplicationControllerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n145, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n145 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + if m.Template != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n146, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n146 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceFieldSelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContainerName))) + i += copy(data[i:], m.ContainerName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size())) + n147, err := m.Divisor.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n147 + return i, nil +} + +func (m *ResourceQuota) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n148, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n148 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n149, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n149 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n150, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n150 + return i, nil +} + +func (m *ResourceQuotaList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n151, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n151 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceQuotaSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + for k := range m.Hard { + data[i] = 0xa + i++ + v := m.Hard[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n152, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n152 + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hard) > 0 { + for k := range m.Hard { + data[i] = 0xa + i++ + v := m.Hard[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n153, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n153 + } + } + if len(m.Used) > 0 { + for k := range m.Used { + data[i] = 0x12 + i++ + v := m.Used[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n154, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n154 + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for k := range m.Limits { + data[i] = 0xa + i++ + v := m.Limits[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n155, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n155 + } + } + if len(m.Requests) > 0 { + for k := range m.Requests { + data[i] = 0x12 + i++ + v := m.Requests[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n156, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n156 + } + } + return i, nil +} + +func (m *SELinuxOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Role))) + i += copy(data[i:], m.Role) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Level))) + i += copy(data[i:], m.Level) + return i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleIOVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Gateway))) + i += copy(data[i:], m.Gateway) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.System))) + i += copy(data[i:], m.System) + if m.SecretRef != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) + n157, err := m.SecretRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n157 + } + data[i] = 0x20 + i++ + if m.SSLEnabled { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ProtectionDomain))) + i += copy(data[i:], m.ProtectionDomain) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StoragePool))) + i += copy(data[i:], m.StoragePool) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.StorageMode))) + i += copy(data[i:], m.StorageMode) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) + i += copy(data[i:], m.VolumeName) + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + data[i] = 0x50 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *Secret) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Secret) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n158, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n158 + if len(m.Data) > 0 { + for k := range m.Data { + data[i] = 0x12 + i++ + v := m.Data[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.StringData) > 0 { + for k := range m.StringData { + data[i] = 0x22 + i++ + v := m.StringData[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *SecretEnvSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n159, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n159 + if m.Optional != nil { + data[i] = 0x10 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretKeySelector) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n160, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n160 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + if m.Optional != nil { + data[i] = 0x18 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n161, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n161 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) + n162, err := m.LocalObjectReference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n162 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + data[i] = 0x20 + i++ + if *m.Optional { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecurityContext) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SecurityContext) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Capabilities != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) + n163, err := m.Capabilities.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n163 + } + if m.Privileged != nil { + data[i] = 0x10 + i++ + if *m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.SELinuxOptions != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n164, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n164 + } + if m.RunAsUser != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + data[i] = 0x28 + i++ + if *m.RunAsNonRoot { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + if m.ReadOnlyRootFilesystem != nil { + data[i] = 0x30 + i++ + if *m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SerializedReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SerializedReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) + n165, err := m.Reference.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n165 + return i, nil +} + +func (m *Service) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Service) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n166, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n166 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n167, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n167 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n168, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n168 + return i, nil +} + +func (m *ServiceAccount) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n169, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n169 + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + data[i] = 0x20 + i++ + if *m.AutomountServiceAccountToken { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ServiceAccountList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n170, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n170 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServiceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n171, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n171 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ServicePort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServicePort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) + i += copy(data[i:], m.Protocol) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port)) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) + n172, err := m.TargetPort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n172 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NodePort)) + return i, nil +} + +func (m *ServiceProxyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceProxyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + return i, nil +} + +func (m *ServiceSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ClusterIP))) + i += copy(data[i:], m.ClusterIP) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.DeprecatedPublicIPs) > 0 { + for _, s := range m.DeprecatedPublicIPs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SessionAffinity))) + i += copy(data[i:], m.SessionAffinity) + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.LoadBalancerIP))) + i += copy(data[i:], m.LoadBalancerIP) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + data[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ExternalName))) + i += copy(data[i:], m.ExternalName) + return i, nil +} + +func (m *ServiceStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n173, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n173 + return i, nil +} + +func (m *Sysctl) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Sysctl) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + return i, nil +} + +func (m *TCPSocketAction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n174, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n174 + return i, nil +} + +func (m *Taint) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Taint) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) + i += copy(data[i:], m.Effect) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TimeAdded.Size())) + n175, err := m.TimeAdded.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n175 + return i, nil +} + +func (m *Toleration) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Toleration) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Key))) + i += copy(data[i:], m.Key) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) + i += copy(data[i:], m.Operator) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Value))) + i += copy(data[i:], m.Value) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) + i += copy(data[i:], m.Effect) + if m.TolerationSeconds != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TolerationSeconds)) + } + return i, nil +} + +func (m *Volume) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Volume) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) + n176, err := m.VolumeSource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n176 + return i, nil +} + +func (m *VolumeMount) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeMount) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x10 + i++ + if m.ReadOnly { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) + i += copy(data[i:], m.MountPath) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SubPath))) + i += copy(data[i:], m.SubPath) + return i, nil +} + +func (m *VolumeProjection) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeProjection) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n177, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n177 + } + if m.DownwardAPI != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) + n178, err := m.DownwardAPI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n178 + } + if m.ConfigMap != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) + n179, err := m.ConfigMap.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n179 + } + return i, nil +} + +func (m *VolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HostPath != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) + n180, err := m.HostPath.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n180 + } + if m.EmptyDir != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) + n181, err := m.EmptyDir.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n181 + } + if m.GCEPersistentDisk != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) + n182, err := m.GCEPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n182 + } + if m.AWSElasticBlockStore != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) + n183, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n183 + } + if m.GitRepo != nil { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) + n184, err := m.GitRepo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n184 + } + if m.Secret != nil { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) + n185, err := m.Secret.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n185 + } + if m.NFS != nil { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) + n186, err := m.NFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n186 + } + if m.ISCSI != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) + n187, err := m.ISCSI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n187 + } + if m.Glusterfs != nil { + data[i] = 0x4a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) + n188, err := m.Glusterfs.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n188 + } + if m.PersistentVolumeClaim != nil { + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) + n189, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n189 + } + if m.RBD != nil { + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) + n190, err := m.RBD.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n190 + } + if m.FlexVolume != nil { + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) + n191, err := m.FlexVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n191 + } + if m.Cinder != nil { + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) + n192, err := m.Cinder.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n192 + } + if m.CephFS != nil { + data[i] = 0x72 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) + n193, err := m.CephFS.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n193 + } + if m.Flocker != nil { + data[i] = 0x7a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) + n194, err := m.Flocker.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n194 + } + if m.DownwardAPI != nil { + data[i] = 0x82 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) + n195, err := m.DownwardAPI.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n195 + } + if m.FC != nil { + data[i] = 0x8a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) + n196, err := m.FC.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n196 + } + if m.AzureFile != nil { + data[i] = 0x92 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) + n197, err := m.AzureFile.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n197 + } + if m.ConfigMap != nil { + data[i] = 0x9a + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) + n198, err := m.ConfigMap.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n198 + } + if m.VsphereVolume != nil { + data[i] = 0xa2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) + n199, err := m.VsphereVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n199 + } + if m.Quobyte != nil { + data[i] = 0xaa + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Quobyte.Size())) + n200, err := m.Quobyte.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n200 + } + if m.AzureDisk != nil { + data[i] = 0xb2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AzureDisk.Size())) + n201, err := m.AzureDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n201 + } + if m.PhotonPersistentDisk != nil { + data[i] = 0xba + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PhotonPersistentDisk.Size())) + n202, err := m.PhotonPersistentDisk.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n202 + } + if m.PortworxVolume != nil { + data[i] = 0xc2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PortworxVolume.Size())) + n203, err := m.PortworxVolume.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n203 + } + if m.ScaleIO != nil { + data[i] = 0xca + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleIO.Size())) + n204, err := m.ScaleIO.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n204 + } + if m.Projected != nil { + data[i] = 0xd2 + i++ + data[i] = 0x1 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Projected.Size())) + n205, err := m.Projected.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n205 + } + return i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.VolumePath))) + i += copy(data[i:], m.VolumePath) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) + i += copy(data[i:], m.FSType) + return i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Weight)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) + n206, err := m.PodAffinityTerm.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n206 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *Affinity) Size() (n int) { + var l int + _ = l + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AttachedVolume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AvoidPods) Size() (n int) { + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AzureDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + return n +} + +func (m *AzureFileVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Binding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Capabilities) Size() (n int) { + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CephFSVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CinderVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ComponentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ComponentStatus) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ComponentStatusList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMap) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ConfigMapEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMapProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapVolumeSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *Container) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerImage) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n +} + +func (m *ContainerPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerState) Size() (n int) { + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerStateRunning) Size() (n int) { + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateTerminated) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateWaiting) Size() (n int) { + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonEndpoint) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n +} + +func (m *DeleteOptions) Size() (n int) { + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DownwardAPIProjection) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DownwardAPIVolumeFile) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointAddress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EndpointSubset) Size() (n int) { + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Endpoints) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointsList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EnvFromSource) Size() (n int) { + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVar) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVarSource) Size() (n int) { + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSource) Size() (n int) { + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExecAction) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FCVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) + } + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *FlexVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *FlockerVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *GitRepoVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GlusterfsVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HTTPGetAction) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPHeader) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Handler) Size() (n int) { + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HostPathVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ISCSIVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *KeyToPath) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *Lifecycle) Size() (n int) { + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitRange) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitRangeItem) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *LimitRangeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LimitRangeSpec) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListOptions) Size() (n int) { + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *LoadBalancerIngress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LoadBalancerStatus) Size() (n int) { + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NFSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceSpec) Size() (n int) { + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAddress) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAffinity) Size() (n int) { + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeDaemonEndpoints) Size() (n int) { + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeResources) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeSelector) Size() (n int) { + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorTerm) Size() (n int) { + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSystemInfo) Size() (n int) { + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMeta) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolume) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaim) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *PersistentVolumeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeSource) Size() (n int) { + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodExecOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + return n +} + +func (m *SerializedReference) Size() (n int) { + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DeprecatedPublicIPs) > 0 { + for _, s := range m.DeprecatedPublicIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Sysctl) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Toleration) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *Volume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeProjection) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, + `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `ExternalID:` + fmt.Sprintf("%v", this.ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1), `&`, ``, 1) + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `DeprecatedPublicIPs:` + fmt.Sprintf("%v", this.DeprecatedPublicIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(strings.Replace(this.TimeAdded.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(data[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatusList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Data == nil { + m.Data = make(map[string]string) + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapEnvSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerImage) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.HostPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ContainerPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerState) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateRunning) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateTerminated) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Signal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + } + m.RestartCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.RestartCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonEndpoint) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(data[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSubset) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvFromSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} + } + if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVarSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InvolvedObject.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FirstTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FCVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetWWNs = append(m.TargetWWNs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Lun = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Options == nil { + m.Options = make(map[string]string) + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlockerVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetUUID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repository = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Directory = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scheme = URIScheme(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) + if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeader) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Handler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Handler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPathVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyToPath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &Handler{} + } + if err := m.PostStart.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &Handler{} + } + if err := m.PreStop.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeItem) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Max == nil { + m.Max = make(ResourceList) + } + m.Max[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Min == nil { + m.Min = make(ResourceList) + } + m.Min[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Default == nil { + m.Default = make(ResourceList) + } + m.Default[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.DefaultRequest == nil { + m.DefaultRequest = make(ResourceList) + } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.MaxLimitRequestRatio == nil { + m.MaxLimitRequestRatio = make(ResourceList) + } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LimitRange{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerIngress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, LoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Namespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, FinalizerName(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NamespacePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAddress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeAddressType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} + } + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastHeartbeatTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubeletEndpoint.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResources) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDR = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Unschedulable = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Allocatable == nil { + m.Allocatable = make(ResourceList) + } + m.Allocatable[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NodeCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, NodeAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DaemonEndpoints.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ContainerImage{}) + if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(data[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) + if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSystemInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SystemUUID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BootID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KernelVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSImage = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRuntimeVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeProxyVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OperatingSystem = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectFieldSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Generation |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Labels == nil { + m.Labels = make(map[string]string) + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolumeClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + m.StorageClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolume{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClaimRef == nil { + m.ClaimRef = &ObjectReference{} + } + if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageClassName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PhotonPersistentDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PdID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAntiAffinity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PodConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Pod{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodLogOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPortForwardOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSignature) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodController", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodController == nil { + m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} + } + if err := m.PodController.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = RestartPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = DNSPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedServiceAccount = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &Affinity{} + } + if err := m.Affinity.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PodPhase(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PodCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QOSClass = PodQOSClass(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatusResult) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortworxVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(data[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferAvoidPodsEntry) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSignature.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EvictionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Preference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Probe) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Probe: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Handler.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) + } + m.InitialDelaySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TimeoutSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.PeriodSeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) + } + m.SuccessThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.SuccessThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) + } + m.FailureThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FailureThreshold |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectedVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, VolumeProjection{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuobyteVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volume = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationController) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicationControllerConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicationController{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &PodTemplateSpec{} + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicationControllerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuota) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Hard == nil { + m.Hard = make(ResourceList) + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, ResourceQuotaScope(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Hard == nil { + m.Hard = make(ResourceList) + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Used == nil { + m.Used = make(ResourceList) + } + m.Used[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Limits == nil { + m.Limits = make(ResourceList) + } + m.Limits[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Requests == nil { + m.Requests = make(ResourceList) + } + m.Requests[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, data[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + if m.Data == nil { + m.Data = make(map[string][]byte) + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.StringData == nil { + m.StringData = make(map[string]string) + } + m.StringData[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretEnvSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeySelector) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContext) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capabilities == nil { + m.Capabilities = &Capabilities{} + } + if err := m.Capabilities.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnlyRootFilesystem = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerializedReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ServiceAccount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Port |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + } + m.NodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NodePort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceProxyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ServiceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sysctl) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPSocketAction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Taint) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Taint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimeAdded.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Toleration) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = TolerationOperator(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeProjection) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretProjection{} + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIProjection{} + } + if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapProjection{} + } + if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmptyDir == nil { + m.EmptyDir = &EmptyDirVolumeSource{} + } + if err := m.EmptyDir.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitRepo == nil { + m.GitRepo = &GitRepoVolumeSource{} + } + if err := m.GitRepo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaim == nil { + m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} + } + if err := m.PersistentVolumeClaim.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIVolumeSource{} + } + if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Projected == nil { + m.Projected = &ProjectedVolumeSource{} + } + if err := m.Projected.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumePath = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Weight |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 11000 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x24, 0xc7, + 0x75, 0x18, 0xae, 0xd9, 0xc5, 0xd7, 0x3e, 0x7c, 0x37, 0x70, 0x47, 0x10, 0x22, 0x0f, 0xc7, 0xa1, + 0x48, 0x1d, 0xc9, 0x23, 0x20, 0x1e, 0x49, 0xf1, 0x24, 0xea, 0x47, 0x09, 0xc0, 0x02, 0x77, 0xd0, + 0x7d, 0x2d, 0x7b, 0x71, 0x77, 0x14, 0xc5, 0x9f, 0xc8, 0xb9, 0x9d, 0x06, 0x30, 0xbc, 0xc1, 0xcc, + 0x72, 0x66, 0x16, 0x77, 0x90, 0xa2, 0x2a, 0x5b, 0x51, 0xc9, 0x49, 0x59, 0x49, 0xe4, 0x72, 0x54, + 0x95, 0x72, 0x52, 0xa5, 0x94, 0xab, 0xe2, 0x28, 0x9f, 0x8e, 0xa2, 0xb2, 0x24, 0x97, 0xe5, 0xa4, + 0xe2, 0x58, 0x8e, 0x5c, 0x95, 0x38, 0xaa, 0x72, 0x25, 0x76, 0xca, 0x15, 0xd8, 0x82, 0x2a, 0xfe, + 0x23, 0x7f, 0xe4, 0x8f, 0xf8, 0x3f, 0x24, 0x95, 0x4a, 0xf5, 0xe7, 0x74, 0xcf, 0xee, 0x62, 0x66, + 0xc1, 0x03, 0x7c, 0x52, 0xe5, 0xbf, 0xdd, 0x7e, 0xaf, 0x5f, 0x7f, 0x4c, 0xf7, 0xeb, 0xf7, 0x5e, + 0xbf, 0xf7, 0x1a, 0xce, 0xdf, 0xbd, 0x18, 0xcf, 0x7b, 0xe1, 0xc2, 0xdd, 0xd6, 0x1d, 0x12, 0x05, + 0x24, 0x21, 0xf1, 0x42, 0xf3, 0xee, 0xe6, 0x82, 0xd3, 0xf4, 0x16, 0x76, 0x5e, 0x58, 0xd8, 0x24, + 0x01, 0x89, 0x9c, 0x84, 0xb8, 0xf3, 0xcd, 0x28, 0x4c, 0x42, 0xf4, 0x18, 0xc7, 0x9e, 0x4f, 0xb1, + 0xe7, 0x9b, 0x77, 0x37, 0xe7, 0x9d, 0xa6, 0x37, 0xbf, 0xf3, 0xc2, 0xec, 0xf3, 0x9b, 0x5e, 0xb2, + 0xd5, 0xba, 0x33, 0xdf, 0x08, 0xb7, 0x17, 0x36, 0xc3, 0xcd, 0x70, 0x81, 0x55, 0xba, 0xd3, 0xda, + 0x60, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x13, 0x9b, 0x7d, 0x49, 0x34, 0xed, 0x34, 0xbd, 0x6d, 0xa7, + 0xb1, 0xe5, 0x05, 0x24, 0xda, 0x55, 0x8d, 0x47, 0x24, 0x0e, 0x5b, 0x51, 0x83, 0x64, 0xbb, 0x70, + 0x68, 0xad, 0x78, 0x61, 0x9b, 0x24, 0x4e, 0x87, 0x8e, 0xcf, 0x2e, 0x74, 0xab, 0x15, 0xb5, 0x82, + 0xc4, 0xdb, 0x6e, 0x6f, 0xe6, 0xa3, 0x79, 0x15, 0xe2, 0xc6, 0x16, 0xd9, 0x76, 0xda, 0xea, 0xbd, + 0xd8, 0xad, 0x5e, 0x2b, 0xf1, 0xfc, 0x05, 0x2f, 0x48, 0xe2, 0x24, 0x3a, 0x6c, 0x4c, 0x31, 0x89, + 0x76, 0x48, 0x94, 0x0e, 0x88, 0xdc, 0x77, 0xb6, 0x9b, 0x3e, 0xe9, 0x30, 0x26, 0xfb, 0x8f, 0x2c, + 0x38, 0xbb, 0x78, 0xbb, 0xbe, 0xe2, 0x3b, 0x71, 0xe2, 0x35, 0x96, 0xfc, 0xb0, 0x71, 0xb7, 0x9e, + 0x84, 0x11, 0xb9, 0x15, 0xfa, 0xad, 0x6d, 0x52, 0x67, 0xd3, 0x87, 0xce, 0xc3, 0xd0, 0x0e, 0xfb, + 0xbf, 0x56, 0x9d, 0xb1, 0xce, 0x5a, 0xe7, 0x2a, 0x4b, 0x13, 0x3f, 0xdc, 0x9b, 0xfb, 0xc0, 0xfe, + 0xde, 0xdc, 0xd0, 0x2d, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x1a, 0x06, 0x36, 0xe2, 0xf5, 0xdd, 0x26, + 0x99, 0x29, 0x31, 0xdc, 0x31, 0x81, 0x3b, 0xb0, 0x5a, 0xa7, 0xa5, 0x58, 0x40, 0xd1, 0x02, 0x54, + 0x9a, 0x4e, 0x94, 0x78, 0x89, 0x17, 0x06, 0x33, 0xe5, 0xb3, 0xd6, 0xb9, 0xfe, 0xa5, 0x49, 0x81, + 0x5a, 0xa9, 0x49, 0x00, 0x4e, 0x71, 0x68, 0x37, 0x22, 0xe2, 0xb8, 0x37, 0x02, 0x7f, 0x77, 0xa6, + 0xef, 0xac, 0x75, 0x6e, 0x28, 0xed, 0x06, 0x16, 0xe5, 0x58, 0x61, 0xd8, 0xdf, 0x2b, 0xc1, 0xd0, + 0xe2, 0xc6, 0x86, 0x17, 0x78, 0xc9, 0x2e, 0x7a, 0x07, 0x46, 0x82, 0xd0, 0x25, 0xf2, 0x3f, 0x1b, + 0xc5, 0xf0, 0x85, 0x67, 0xe7, 0x0f, 0x5b, 0x8a, 0xf3, 0xd7, 0xb5, 0x1a, 0x4b, 0x13, 0xfb, 0x7b, + 0x73, 0x23, 0x7a, 0x09, 0x36, 0x28, 0xa2, 0xb7, 0x60, 0xb8, 0x19, 0xba, 0xaa, 0x81, 0x12, 0x6b, + 0xe0, 0x99, 0xc3, 0x1b, 0xa8, 0xa5, 0x15, 0x96, 0xc6, 0xf7, 0xf7, 0xe6, 0x86, 0xb5, 0x02, 0xac, + 0x93, 0x43, 0x3e, 0x8c, 0xd3, 0xbf, 0x41, 0xe2, 0xa9, 0x16, 0xca, 0xac, 0x85, 0xe7, 0xf3, 0x5b, + 0xd0, 0x2a, 0x2d, 0x4d, 0xed, 0xef, 0xcd, 0x8d, 0x67, 0x0a, 0x71, 0x96, 0xb4, 0xfd, 0x79, 0x18, + 0x5b, 0x4c, 0x12, 0xa7, 0xb1, 0x45, 0x5c, 0xfe, 0x7d, 0xd1, 0x4b, 0xd0, 0x17, 0x38, 0xdb, 0x44, + 0x7c, 0xfd, 0xb3, 0x62, 0xda, 0xfb, 0xae, 0x3b, 0xdb, 0xe4, 0x60, 0x6f, 0x6e, 0xe2, 0x66, 0xe0, + 0xbd, 0xd7, 0x12, 0x6b, 0x86, 0x96, 0x61, 0x86, 0x8d, 0x2e, 0x00, 0xb8, 0x64, 0xc7, 0x6b, 0x90, + 0x9a, 0x93, 0x6c, 0x89, 0xd5, 0x80, 0x44, 0x5d, 0xa8, 0x2a, 0x08, 0xd6, 0xb0, 0xec, 0x2f, 0x59, + 0x50, 0x59, 0xdc, 0x09, 0x3d, 0xb7, 0x16, 0xba, 0x31, 0x6a, 0xc1, 0x78, 0x33, 0x22, 0x1b, 0x24, + 0x52, 0x45, 0x33, 0xd6, 0xd9, 0xf2, 0xb9, 0xe1, 0x0b, 0x17, 0x72, 0xc6, 0x6d, 0x56, 0x5a, 0x09, + 0x92, 0x68, 0x77, 0xe9, 0x11, 0xd1, 0xf4, 0x78, 0x06, 0x8a, 0xb3, 0x6d, 0xd8, 0xbf, 0x54, 0x82, + 0x53, 0x8b, 0x9f, 0x6f, 0x45, 0xa4, 0xea, 0xc5, 0x77, 0xb3, 0x5b, 0xc1, 0xf5, 0xe2, 0xbb, 0xd7, + 0xd3, 0xc9, 0x50, 0x6b, 0xb0, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x20, 0xfd, 0x7d, 0x13, + 0xaf, 0x89, 0xd1, 0x4f, 0x09, 0xe4, 0xe1, 0xaa, 0x93, 0x38, 0x55, 0x0e, 0xc2, 0x12, 0x07, 0x5d, + 0x83, 0xe1, 0x06, 0xdb, 0xef, 0x9b, 0xd7, 0x42, 0x97, 0xb0, 0x2f, 0x5c, 0x59, 0x7a, 0x8e, 0xa2, + 0x2f, 0xa7, 0xc5, 0x07, 0x7b, 0x73, 0x33, 0xbc, 0x6f, 0x82, 0x84, 0x06, 0xc3, 0x7a, 0x7d, 0x64, + 0xab, 0x8d, 0xd8, 0xc7, 0x28, 0x41, 0x87, 0x4d, 0x78, 0x4e, 0xdb, 0x53, 0xfd, 0x6c, 0x4f, 0x8d, + 0x74, 0xd9, 0x4f, 0xff, 0xd8, 0x12, 0x73, 0xb2, 0xea, 0xf9, 0x26, 0x7b, 0xb8, 0x00, 0x10, 0x93, + 0x46, 0x44, 0x12, 0x6d, 0x56, 0xd4, 0x67, 0xae, 0x2b, 0x08, 0xd6, 0xb0, 0xe8, 0xe6, 0x8f, 0xb7, + 0x9c, 0x88, 0xad, 0x16, 0x31, 0x37, 0x6a, 0xf3, 0xd7, 0x25, 0x00, 0xa7, 0x38, 0xc6, 0xe6, 0x2f, + 0xe7, 0x6e, 0xfe, 0xdf, 0xb1, 0x60, 0x70, 0xc9, 0x0b, 0x5c, 0x2f, 0xd8, 0x44, 0xef, 0xc0, 0x10, + 0xe5, 0xe8, 0xae, 0x93, 0x38, 0x62, 0xdf, 0x7f, 0x44, 0x2e, 0x1e, 0x9d, 0xc1, 0xca, 0xe5, 0x13, + 0xcf, 0x53, 0x6c, 0xba, 0x88, 0x6e, 0xdc, 0x79, 0x97, 0x34, 0x92, 0x6b, 0x24, 0x71, 0xd2, 0xe1, + 0xa4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x20, 0x71, 0xa2, 0x4d, 0x92, 0x88, 0x6d, 0x9f, 0xb3, + 0x29, 0x39, 0x0d, 0x4c, 0x97, 0x1c, 0x09, 0x1a, 0x24, 0x65, 0x90, 0xeb, 0x8c, 0x08, 0x16, 0xc4, + 0xec, 0x06, 0x8c, 0x2c, 0x3b, 0x4d, 0xe7, 0x8e, 0xe7, 0x7b, 0x89, 0x47, 0x62, 0xf4, 0x61, 0x28, + 0x3b, 0xae, 0xcb, 0x36, 0x40, 0x65, 0xe9, 0xd4, 0xfe, 0xde, 0x5c, 0x79, 0xd1, 0x75, 0x0f, 0xf6, + 0xe6, 0x40, 0x61, 0xed, 0x62, 0x8a, 0x81, 0x9e, 0x85, 0x3e, 0x37, 0x0a, 0x9b, 0x33, 0x25, 0x86, + 0x79, 0x9a, 0xee, 0xd4, 0x6a, 0x14, 0x36, 0x33, 0xa8, 0x0c, 0xc7, 0xfe, 0x41, 0x09, 0xd0, 0x32, + 0x69, 0x6e, 0xad, 0xd6, 0x8d, 0x6f, 0x7a, 0x0e, 0x86, 0xb6, 0xc3, 0xc0, 0x4b, 0xc2, 0x28, 0x16, + 0x0d, 0xb2, 0x75, 0x71, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x67, 0xa1, 0xaf, 0x99, 0x6e, 0xef, 0x11, + 0xc9, 0x1a, 0xd8, 0xc6, 0x66, 0x10, 0x8a, 0xd1, 0x8a, 0x49, 0x24, 0xd6, 0xb3, 0xc2, 0xb8, 0x19, + 0x93, 0x08, 0x33, 0x48, 0xba, 0x82, 0xe8, 0xda, 0x12, 0xab, 0x35, 0xb3, 0x82, 0x28, 0x04, 0x6b, + 0x58, 0xe8, 0x6d, 0xa8, 0xf0, 0x7f, 0x98, 0x6c, 0xb0, 0xa5, 0x9b, 0xcb, 0x14, 0xae, 0x86, 0x0d, + 0xc7, 0xcf, 0x4e, 0xfe, 0x28, 0x5b, 0x71, 0x92, 0x10, 0x4e, 0x69, 0x1a, 0x2b, 0x6e, 0x20, 0x77, + 0xc5, 0xfd, 0x1d, 0x0b, 0xd0, 0xb2, 0x17, 0xb8, 0x24, 0x3a, 0x81, 0xa3, 0xb3, 0xb7, 0xcd, 0xf0, + 0x27, 0xb4, 0x6b, 0xe1, 0x76, 0x33, 0x0c, 0x48, 0x90, 0x2c, 0x87, 0x81, 0xcb, 0x8f, 0xd3, 0x8f, + 0x43, 0x5f, 0x42, 0x9b, 0xe2, 0xdd, 0x7a, 0x5a, 0x7e, 0x16, 0xda, 0xc0, 0xc1, 0xde, 0xdc, 0xe9, + 0xf6, 0x1a, 0xac, 0x0b, 0xac, 0x0e, 0xfa, 0x18, 0x0c, 0xc4, 0x89, 0x93, 0xb4, 0x62, 0xd1, 0xd1, + 0x27, 0x64, 0x47, 0xeb, 0xac, 0xf4, 0x60, 0x6f, 0x6e, 0x5c, 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, + 0xcf, 0xc0, 0xe0, 0x36, 0x89, 0x63, 0x67, 0x53, 0x32, 0xb8, 0x71, 0x51, 0x77, 0xf0, 0x1a, 0x2f, + 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x7e, 0x12, 0x45, 0x61, 0x24, 0x56, 0xc4, 0xa8, 0x40, 0xec, 0x5f, + 0xa1, 0x85, 0x98, 0xc3, 0xec, 0xff, 0x62, 0xc1, 0xb8, 0xea, 0x2b, 0x6f, 0xeb, 0x04, 0xb6, 0xbc, + 0x0b, 0xd0, 0x90, 0x03, 0x8c, 0xd9, 0x46, 0xd3, 0xda, 0xe8, 0xbc, 0xfc, 0xda, 0x27, 0x34, 0x6d, + 0x43, 0x15, 0xc5, 0x58, 0xa3, 0x6b, 0xff, 0x3b, 0x0b, 0xa6, 0x32, 0x63, 0xbb, 0xea, 0xc5, 0x09, + 0x7a, 0xab, 0x6d, 0x7c, 0xf3, 0xc5, 0xc6, 0x47, 0x6b, 0xb3, 0xd1, 0xa9, 0xf5, 0x22, 0x4b, 0xb4, + 0xb1, 0x61, 0xe8, 0xf7, 0x12, 0xb2, 0x2d, 0x87, 0xf5, 0x7c, 0xc1, 0x61, 0xf1, 0xfe, 0xa5, 0x5f, + 0x69, 0x8d, 0xd2, 0xc0, 0x9c, 0x94, 0xfd, 0xbf, 0x2c, 0xa8, 0x2c, 0x87, 0xc1, 0x86, 0xb7, 0x79, + 0xcd, 0x69, 0x9e, 0xc0, 0xf7, 0xa9, 0x43, 0x1f, 0xa3, 0xce, 0x87, 0xf0, 0x42, 0xde, 0x10, 0x44, + 0xc7, 0xe6, 0xe9, 0x99, 0xca, 0x85, 0x05, 0xc5, 0xa6, 0x68, 0x11, 0x66, 0xc4, 0x66, 0x5f, 0x81, + 0x8a, 0x42, 0x40, 0x13, 0x50, 0xbe, 0x4b, 0xb8, 0x24, 0x59, 0xc1, 0xf4, 0x27, 0x9a, 0x86, 0xfe, + 0x1d, 0xc7, 0x6f, 0x89, 0xcd, 0x8b, 0xf9, 0x9f, 0x8f, 0x97, 0x2e, 0x5a, 0xf6, 0x0f, 0xd8, 0x0e, + 0x14, 0x8d, 0xac, 0x04, 0x3b, 0x82, 0x39, 0x7c, 0xd9, 0x82, 0x69, 0xbf, 0x03, 0x53, 0x12, 0x73, + 0x72, 0x14, 0x76, 0xf6, 0x98, 0xe8, 0xf6, 0x74, 0x27, 0x28, 0xee, 0xd8, 0x1a, 0xe5, 0xf5, 0x61, + 0x93, 0x2e, 0x38, 0xc7, 0x67, 0x5d, 0x17, 0x32, 0xc0, 0x0d, 0x51, 0x86, 0x15, 0xd4, 0xfe, 0x73, + 0x0b, 0xa6, 0xd5, 0x38, 0xae, 0x90, 0xdd, 0x3a, 0xf1, 0x49, 0x23, 0x09, 0xa3, 0x87, 0x65, 0x24, + 0x8f, 0xf3, 0x6f, 0xc2, 0x79, 0xd2, 0xb0, 0x20, 0x50, 0xbe, 0x42, 0x76, 0xf9, 0x07, 0xd2, 0x07, + 0x5a, 0x3e, 0x74, 0xa0, 0xbf, 0x65, 0xc1, 0xa8, 0x1a, 0xe8, 0x09, 0x6c, 0xb9, 0xab, 0xe6, 0x96, + 0xfb, 0x70, 0xc1, 0xf5, 0xda, 0x65, 0xb3, 0xfd, 0xed, 0x12, 0x65, 0x1b, 0x02, 0xa7, 0x16, 0x85, + 0x74, 0x92, 0x28, 0xc7, 0x7f, 0x48, 0xbe, 0x52, 0x6f, 0x83, 0xbd, 0x42, 0x76, 0xd7, 0x43, 0x2a, + 0x4d, 0x74, 0x1e, 0xac, 0xf1, 0x51, 0xfb, 0x0e, 0xfd, 0xa8, 0xbf, 0x5f, 0x82, 0x53, 0x6a, 0x5a, + 0x8c, 0x53, 0xfa, 0x67, 0x72, 0x62, 0x5e, 0x80, 0x61, 0x97, 0x6c, 0x38, 0x2d, 0x3f, 0x51, 0xda, + 0x44, 0x3f, 0x57, 0x33, 0xab, 0x69, 0x31, 0xd6, 0x71, 0x7a, 0x98, 0xcb, 0x6f, 0x0c, 0x33, 0x7e, + 0x9e, 0x38, 0x74, 0xd5, 0x53, 0x09, 0x4f, 0x53, 0x0f, 0x47, 0x74, 0xf5, 0x50, 0xa8, 0x82, 0x4f, + 0x42, 0xbf, 0xb7, 0x4d, 0xcf, 0xfc, 0x92, 0x79, 0x94, 0xaf, 0xd1, 0x42, 0xcc, 0x61, 0xe8, 0x29, + 0x18, 0x6c, 0x84, 0xdb, 0xdb, 0x4e, 0xe0, 0xce, 0x94, 0x99, 0xcc, 0x39, 0x4c, 0xc5, 0x82, 0x65, + 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x06, 0x7d, 0x4e, 0xb4, 0x19, 0xcf, 0xf4, 0x31, 0x9c, 0x21, 0xda, + 0xd2, 0x62, 0xb4, 0x19, 0x63, 0x56, 0x4a, 0x65, 0xc9, 0x7b, 0x61, 0x74, 0xd7, 0x0b, 0x36, 0xab, + 0x5e, 0xc4, 0x04, 0x43, 0x4d, 0x96, 0xbc, 0xad, 0x20, 0x58, 0xc3, 0x42, 0x35, 0xe8, 0x6f, 0x86, + 0x51, 0x12, 0xcf, 0x0c, 0xb0, 0x89, 0x7f, 0x2e, 0x77, 0xfb, 0xf1, 0x71, 0xd7, 0xc2, 0x28, 0x49, + 0x87, 0x42, 0xff, 0xc5, 0x98, 0x13, 0x42, 0xcb, 0x50, 0x26, 0xc1, 0xce, 0xcc, 0x20, 0xa3, 0xf7, + 0xa1, 0xc3, 0xe9, 0xad, 0x04, 0x3b, 0xb7, 0x9c, 0x28, 0xe5, 0x57, 0x2b, 0xc1, 0x0e, 0xa6, 0xb5, + 0x51, 0x03, 0x2a, 0xd2, 0x84, 0x15, 0xcf, 0x0c, 0x15, 0x59, 0x8a, 0x58, 0xa0, 0x63, 0xf2, 0x5e, + 0xcb, 0x8b, 0xc8, 0x36, 0x09, 0x92, 0x38, 0x55, 0xac, 0x24, 0x34, 0xc6, 0x29, 0x5d, 0xd4, 0x80, + 0x11, 0x2e, 0x7f, 0x5e, 0x0b, 0x5b, 0x41, 0x12, 0xcf, 0x54, 0x58, 0x97, 0x73, 0x2c, 0x17, 0xb7, + 0xd2, 0x1a, 0x4b, 0xd3, 0x82, 0xfc, 0x88, 0x56, 0x18, 0x63, 0x83, 0x28, 0x7a, 0x0b, 0x46, 0x7d, + 0x6f, 0x87, 0x04, 0x24, 0x8e, 0x6b, 0x51, 0x78, 0x87, 0xcc, 0x00, 0x1b, 0xcd, 0x93, 0x79, 0x5a, + 0x7c, 0x78, 0x87, 0x2c, 0x4d, 0xee, 0xef, 0xcd, 0x8d, 0x5e, 0xd5, 0x6b, 0x63, 0x93, 0x18, 0x7a, + 0x1b, 0xc6, 0xa8, 0xb0, 0xeb, 0xa5, 0xe4, 0x87, 0x8b, 0x93, 0x47, 0xfb, 0x7b, 0x73, 0x63, 0xd8, + 0xa8, 0x8e, 0x33, 0xe4, 0xd0, 0x3a, 0x54, 0x7c, 0x6f, 0x83, 0x34, 0x76, 0x1b, 0x3e, 0x99, 0x19, + 0x61, 0xb4, 0x73, 0x36, 0xe7, 0x55, 0x89, 0xce, 0x15, 0x0c, 0xf5, 0x17, 0xa7, 0x84, 0xd0, 0x2d, + 0x38, 0x9d, 0x90, 0x68, 0xdb, 0x0b, 0x1c, 0xba, 0xa9, 0x84, 0xf4, 0xcb, 0x4c, 0x25, 0xa3, 0x6c, + 0xd5, 0x9e, 0x11, 0x13, 0x7b, 0x7a, 0xbd, 0x23, 0x16, 0xee, 0x52, 0x1b, 0xdd, 0x80, 0x71, 0xb6, + 0x9f, 0x6a, 0x2d, 0xdf, 0xaf, 0x85, 0xbe, 0xd7, 0xd8, 0x9d, 0x19, 0x63, 0x04, 0x9f, 0x92, 0x06, + 0x90, 0x35, 0x13, 0x4c, 0x15, 0xc3, 0xf4, 0x1f, 0xce, 0xd6, 0x46, 0x3e, 0x8c, 0xc7, 0xa4, 0xd1, + 0x8a, 0xbc, 0x64, 0x97, 0xae, 0x7d, 0x72, 0x3f, 0x99, 0x19, 0x2f, 0xa2, 0xe8, 0xd6, 0xcd, 0x4a, + 0xdc, 0xfa, 0x94, 0x29, 0xc4, 0x59, 0xd2, 0x94, 0x55, 0xc4, 0x89, 0xeb, 0x05, 0x33, 0x13, 0x8c, + 0x03, 0xa9, 0xfd, 0x55, 0xa7, 0x85, 0x98, 0xc3, 0x98, 0xfd, 0x80, 0xfe, 0xb8, 0x41, 0xb9, 0xf4, + 0x24, 0x43, 0x4c, 0xed, 0x07, 0x12, 0x80, 0x53, 0x1c, 0x2a, 0x1a, 0x24, 0xc9, 0xee, 0x0c, 0x62, + 0xa8, 0x6a, 0xab, 0xad, 0xaf, 0x7f, 0x06, 0xd3, 0x72, 0x74, 0x0b, 0x06, 0x49, 0xb0, 0xb3, 0x1a, + 0x85, 0xdb, 0x33, 0x53, 0x45, 0x78, 0xc0, 0x0a, 0x47, 0xe6, 0xe7, 0x47, 0xaa, 0xc2, 0x88, 0x62, + 0x2c, 0x89, 0xa1, 0xfb, 0x30, 0xd3, 0xe1, 0x2b, 0xf1, 0x8f, 0x32, 0xcd, 0x3e, 0xca, 0x27, 0x44, + 0xdd, 0x99, 0xf5, 0x2e, 0x78, 0x07, 0x87, 0xc0, 0x70, 0x57, 0xea, 0xf6, 0x1d, 0x18, 0x53, 0x8c, + 0x8a, 0x7d, 0x6f, 0x34, 0x07, 0xfd, 0x94, 0x17, 0x4b, 0x85, 0xbe, 0x42, 0x27, 0x95, 0xb2, 0xe8, + 0x18, 0xf3, 0x72, 0x36, 0xa9, 0xde, 0xe7, 0xc9, 0xd2, 0x6e, 0x42, 0xb8, 0x62, 0x57, 0xd6, 0x26, + 0x55, 0x02, 0x70, 0x8a, 0x63, 0xff, 0x1f, 0x2e, 0x26, 0xa5, 0xdc, 0xb0, 0xc0, 0x49, 0x70, 0x1e, + 0x86, 0xb6, 0xc2, 0x38, 0xa1, 0xd8, 0xac, 0x8d, 0xfe, 0x54, 0x30, 0xba, 0x2c, 0xca, 0xb1, 0xc2, + 0x40, 0xaf, 0xc2, 0x68, 0x43, 0x6f, 0x40, 0x1c, 0x63, 0xa7, 0x44, 0x15, 0xb3, 0x75, 0x6c, 0xe2, + 0xa2, 0x8b, 0x30, 0xc4, 0xac, 0xdc, 0x8d, 0xd0, 0x17, 0x2a, 0xa4, 0x3c, 0x95, 0x87, 0x6a, 0xa2, + 0xfc, 0x40, 0xfb, 0x8d, 0x15, 0x36, 0x55, 0xc4, 0x69, 0x17, 0xd6, 0x6a, 0xe2, 0x00, 0x51, 0x8a, + 0xf8, 0x65, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x17, 0x25, 0x6d, 0x96, 0xa9, 0x02, 0x44, 0xd0, 0x9b, + 0x30, 0x78, 0xcf, 0xf1, 0x12, 0x2f, 0xd8, 0x14, 0xd2, 0xc3, 0x8b, 0x05, 0x4f, 0x13, 0x56, 0xfd, + 0x36, 0xaf, 0xca, 0x4f, 0x3e, 0xf1, 0x07, 0x4b, 0x82, 0x94, 0x76, 0xd4, 0x0a, 0x02, 0x4a, 0xbb, + 0xd4, 0x3b, 0x6d, 0xcc, 0xab, 0x72, 0xda, 0xe2, 0x0f, 0x96, 0x04, 0xd1, 0x06, 0x80, 0x5c, 0x4b, + 0xc4, 0x15, 0xd6, 0xe5, 0x8f, 0xf6, 0x42, 0x7e, 0x5d, 0xd5, 0x5e, 0x1a, 0xa3, 0x67, 0x6d, 0xfa, + 0x1f, 0x6b, 0x94, 0xed, 0x84, 0x09, 0x61, 0xed, 0xdd, 0x42, 0x9f, 0xa5, 0x5b, 0xda, 0x89, 0x12, + 0xe2, 0x2e, 0x26, 0x59, 0x03, 0xfd, 0xe1, 0x22, 0xf6, 0xba, 0xb7, 0x4d, 0xf4, 0xed, 0x2f, 0x88, + 0xe0, 0x94, 0x9e, 0xfd, 0xdd, 0x32, 0xcc, 0x74, 0xeb, 0x2e, 0x5d, 0x92, 0xe4, 0xbe, 0x97, 0x2c, + 0x53, 0x31, 0xc9, 0x32, 0x97, 0xe4, 0x8a, 0x28, 0xc7, 0x0a, 0x83, 0xae, 0x8d, 0xd8, 0xdb, 0x94, + 0xca, 0x52, 0x7f, 0xba, 0x36, 0xea, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x8b, 0x88, 0x13, 0x8b, 0xcb, + 0x0d, 0x6d, 0x0d, 0x61, 0x56, 0x8a, 0x05, 0x54, 0x37, 0x88, 0xf4, 0xe5, 0x18, 0x44, 0x8c, 0x29, + 0xea, 0x7f, 0xb0, 0x53, 0x84, 0x3e, 0x07, 0xb0, 0xe1, 0x05, 0x5e, 0xbc, 0xc5, 0xa8, 0x0f, 0xf4, + 0x4c, 0x5d, 0x09, 0x59, 0xab, 0x8a, 0x0a, 0xd6, 0x28, 0xa2, 0x97, 0x61, 0x58, 0x6d, 0xcf, 0xb5, + 0xea, 0xcc, 0xa0, 0x69, 0x10, 0x4f, 0x79, 0x55, 0x15, 0xeb, 0x78, 0xf6, 0xbb, 0xd9, 0xf5, 0x22, + 0x76, 0x85, 0x36, 0xbf, 0x56, 0xd1, 0xf9, 0x2d, 0x1d, 0x3e, 0xbf, 0xf6, 0x7f, 0x2e, 0xc3, 0xb8, + 0xd1, 0x58, 0x2b, 0x2e, 0xc0, 0xd1, 0x5e, 0xa7, 0x07, 0x96, 0x93, 0x10, 0xb1, 0x27, 0xcf, 0xf7, + 0xb2, 0x69, 0xf4, 0xe3, 0x8d, 0xee, 0x05, 0x4e, 0x09, 0x6d, 0x41, 0xc5, 0x77, 0x62, 0x66, 0x52, + 0x21, 0x62, 0x2f, 0xf6, 0x46, 0x36, 0x55, 0x3f, 0x9c, 0x38, 0xd1, 0x4e, 0x0f, 0xde, 0x4a, 0x4a, + 0x9c, 0x9e, 0xb6, 0x54, 0xd8, 0x91, 0x37, 0x6a, 0xaa, 0x3b, 0x54, 0x22, 0xda, 0xc5, 0x1c, 0x86, + 0x2e, 0xc2, 0x48, 0x44, 0xd8, 0x4a, 0x59, 0xa6, 0xf2, 0x1c, 0x5b, 0x7a, 0xfd, 0xa9, 0xe0, 0x87, + 0x35, 0x18, 0x36, 0x30, 0x53, 0xb9, 0x7f, 0xe0, 0x10, 0xb9, 0xff, 0x19, 0x18, 0x64, 0x3f, 0xd4, + 0xaa, 0x50, 0x5f, 0x68, 0x8d, 0x17, 0x63, 0x09, 0xcf, 0x2e, 0xa2, 0xa1, 0x82, 0x8b, 0xe8, 0x59, + 0x18, 0xab, 0x3a, 0x64, 0x3b, 0x0c, 0x56, 0x02, 0xb7, 0x19, 0x7a, 0x41, 0x82, 0x66, 0xa0, 0x8f, + 0x9d, 0x27, 0x7c, 0xbf, 0xf7, 0x51, 0x0a, 0xb8, 0x8f, 0xca, 0xee, 0xf6, 0x9f, 0x94, 0x60, 0xb4, + 0x4a, 0x7c, 0x92, 0x10, 0xae, 0xf7, 0xc4, 0x68, 0x15, 0xd0, 0x66, 0xe4, 0x34, 0x48, 0x8d, 0x44, + 0x5e, 0xe8, 0xd6, 0x49, 0x23, 0x0c, 0xd8, 0x45, 0x14, 0x3d, 0x20, 0x4f, 0xef, 0xef, 0xcd, 0xa1, + 0x4b, 0x6d, 0x50, 0xdc, 0xa1, 0x06, 0x72, 0x61, 0xb4, 0x19, 0x11, 0xc3, 0x6e, 0x68, 0xe5, 0x8b, + 0x1a, 0x35, 0xbd, 0x0a, 0x97, 0x86, 0x8d, 0x22, 0x6c, 0x12, 0x45, 0x9f, 0x82, 0x89, 0x30, 0x6a, + 0x6e, 0x39, 0x41, 0x95, 0x34, 0x49, 0xe0, 0x52, 0x15, 0x40, 0x58, 0x3b, 0xa6, 0xf7, 0xf7, 0xe6, + 0x26, 0x6e, 0x64, 0x60, 0xb8, 0x0d, 0x1b, 0xbd, 0x09, 0x93, 0xcd, 0x28, 0x6c, 0x3a, 0x9b, 0x6c, + 0xc9, 0x08, 0x69, 0x85, 0xf3, 0xa6, 0xf3, 0xfb, 0x7b, 0x73, 0x93, 0xb5, 0x2c, 0xf0, 0x60, 0x6f, + 0x6e, 0x8a, 0x4d, 0x19, 0x2d, 0x49, 0x81, 0xb8, 0x9d, 0x8c, 0xfd, 0x1e, 0x9c, 0xaa, 0x86, 0xf7, + 0x82, 0x7b, 0x4e, 0xe4, 0x2e, 0xd6, 0xd6, 0x34, 0xe3, 0xc4, 0x1b, 0x52, 0xf9, 0xe5, 0x17, 0x7c, + 0x39, 0x27, 0x9b, 0x46, 0x83, 0xab, 0x1d, 0xab, 0x9e, 0x4f, 0xba, 0x98, 0x43, 0xfe, 0x49, 0xc9, + 0x68, 0x33, 0xc5, 0x57, 0x77, 0x17, 0x56, 0xd7, 0xbb, 0x8b, 0xcf, 0xc2, 0xd0, 0x86, 0x47, 0x7c, + 0x17, 0x93, 0x0d, 0xf1, 0xb5, 0x5e, 0x28, 0x72, 0xb9, 0xb3, 0x4a, 0xeb, 0x48, 0xeb, 0x18, 0x57, + 0xa2, 0x57, 0x05, 0x19, 0xac, 0x08, 0xa2, 0x16, 0x4c, 0x48, 0x3d, 0x4c, 0x42, 0xc5, 0x66, 0x7f, + 0xb1, 0x98, 0x9a, 0x67, 0x36, 0xc3, 0x3e, 0x2f, 0xce, 0x10, 0xc4, 0x6d, 0x4d, 0x50, 0xfd, 0x79, + 0x9b, 0x1e, 0x75, 0x7d, 0x6c, 0xe9, 0x33, 0xfd, 0x99, 0x99, 0x02, 0x58, 0xa9, 0xfd, 0x6b, 0x16, + 0x3c, 0xd2, 0x36, 0x5b, 0xc2, 0x4e, 0x72, 0x6c, 0xdf, 0x28, 0x6b, 0xac, 0x28, 0xe5, 0x1b, 0x2b, + 0xec, 0x1b, 0x30, 0xbd, 0xb2, 0xdd, 0x4c, 0x76, 0xab, 0x9e, 0x79, 0xe5, 0xf2, 0x0a, 0x0c, 0x6c, + 0x13, 0xd7, 0x6b, 0x6d, 0x8b, 0xcf, 0x3a, 0x27, 0xcf, 0x85, 0x6b, 0xac, 0xf4, 0x60, 0x6f, 0x6e, + 0xb4, 0x9e, 0x84, 0x91, 0xb3, 0x49, 0x78, 0x01, 0x16, 0xe8, 0xf6, 0x8f, 0x2d, 0x18, 0x97, 0xfc, + 0x61, 0xd1, 0x75, 0x23, 0x12, 0xc7, 0x68, 0x16, 0x4a, 0x5e, 0x53, 0x10, 0x02, 0x41, 0xa8, 0xb4, + 0x56, 0xc3, 0x25, 0xaf, 0x89, 0xde, 0x84, 0x0a, 0xbf, 0xa9, 0x4b, 0x17, 0x47, 0x8f, 0x37, 0x7f, + 0x4c, 0x37, 0x5c, 0x97, 0x34, 0x70, 0x4a, 0x4e, 0x4a, 0xc9, 0xec, 0xe4, 0x29, 0x9b, 0xf7, 0x46, + 0x97, 0x45, 0x39, 0x56, 0x18, 0xe8, 0x1c, 0x0c, 0x05, 0xa1, 0xcb, 0x2f, 0x53, 0xf9, 0x3e, 0x65, + 0x4b, 0xee, 0xba, 0x28, 0xc3, 0x0a, 0x6a, 0x7f, 0xd5, 0x82, 0x11, 0x39, 0xc6, 0x82, 0x02, 0x3b, + 0xdd, 0x24, 0xa9, 0xb0, 0x9e, 0x6e, 0x12, 0x2a, 0x70, 0x33, 0x88, 0x21, 0x67, 0x97, 0x7b, 0x91, + 0xb3, 0xed, 0xdf, 0x2c, 0xc1, 0x98, 0xec, 0x4e, 0xbd, 0x75, 0x27, 0x26, 0x54, 0x0c, 0xa9, 0x38, + 0x7c, 0xf2, 0x89, 0x5c, 0x67, 0xcf, 0xe7, 0xe9, 0x62, 0xc6, 0x37, 0x4b, 0xc5, 0x9c, 0x45, 0x49, + 0x07, 0xa7, 0x24, 0xd1, 0x0e, 0x4c, 0x06, 0x61, 0xc2, 0x8e, 0x37, 0x05, 0x2f, 0x76, 0xd3, 0x91, + 0x6d, 0xe7, 0x51, 0xd1, 0xce, 0xe4, 0xf5, 0x2c, 0x3d, 0xdc, 0xde, 0x04, 0xba, 0x21, 0x6d, 0x4c, + 0x65, 0xd6, 0xd6, 0xb3, 0xc5, 0xda, 0xea, 0x6e, 0x62, 0xb2, 0x7f, 0xcf, 0x82, 0x8a, 0x44, 0x3b, + 0x89, 0x2b, 0xaf, 0xdb, 0x30, 0x18, 0xb3, 0x4f, 0x24, 0xa7, 0xeb, 0x7c, 0xb1, 0x21, 0xf0, 0xef, + 0x9a, 0x9e, 0xe9, 0xfc, 0x7f, 0x8c, 0x25, 0x35, 0x66, 0x6c, 0x57, 0x03, 0x79, 0xe8, 0x8c, 0xed, + 0xaa, 0x67, 0xdd, 0x6f, 0xb6, 0x46, 0x0d, 0x6b, 0x00, 0x15, 0x4c, 0x9b, 0x11, 0xd9, 0xf0, 0xee, + 0x67, 0x05, 0xd3, 0x1a, 0x2b, 0xc5, 0x02, 0x8a, 0x36, 0x60, 0xa4, 0x21, 0xcd, 0xd1, 0x29, 0x0b, + 0xf9, 0x48, 0x41, 0xdb, 0xbf, 0xba, 0x46, 0xe2, 0xae, 0x49, 0xcb, 0x1a, 0x25, 0x6c, 0xd0, 0xa5, + 0x7c, 0x2a, 0xbd, 0x29, 0x2f, 0x17, 0x34, 0xdc, 0x44, 0x24, 0x49, 0x5b, 0xe8, 0x7a, 0x49, 0x6e, + 0x7f, 0xd3, 0x82, 0x01, 0x6e, 0xbf, 0x2c, 0x66, 0x04, 0xd6, 0x2e, 0xc8, 0xd2, 0xf9, 0xbc, 0x45, + 0x0b, 0xc5, 0x7d, 0x19, 0xba, 0x0d, 0x15, 0xf6, 0x83, 0xd9, 0x62, 0xca, 0x45, 0xfc, 0xb4, 0x78, + 0xfb, 0x7a, 0x57, 0x6f, 0x49, 0x02, 0x38, 0xa5, 0x65, 0x7f, 0xbf, 0x4c, 0x59, 0x5f, 0x8a, 0x6a, + 0x9c, 0xed, 0xd6, 0x49, 0x9c, 0xed, 0xa5, 0xe3, 0x3f, 0xdb, 0xdf, 0x83, 0xf1, 0x86, 0x76, 0x41, + 0x97, 0x7e, 0xf1, 0x0b, 0x05, 0x97, 0x95, 0x76, 0xab, 0xc7, 0xed, 0x75, 0xcb, 0x26, 0x39, 0x9c, + 0xa5, 0x8f, 0x08, 0x8c, 0xf0, 0xf5, 0x20, 0xda, 0xeb, 0x63, 0xed, 0x2d, 0x14, 0x59, 0x61, 0x7a, + 0x63, 0x6c, 0x15, 0xd7, 0x35, 0x42, 0xd8, 0x20, 0x6b, 0xff, 0x4a, 0x3f, 0xf4, 0xaf, 0xec, 0x90, + 0x20, 0x39, 0x01, 0x56, 0xb7, 0x0d, 0x63, 0x5e, 0xb0, 0x13, 0xfa, 0x3b, 0xc4, 0xe5, 0xf0, 0xa3, + 0x1d, 0xef, 0xa7, 0x45, 0x23, 0x63, 0x6b, 0x06, 0x31, 0x9c, 0x21, 0x7e, 0x1c, 0x96, 0x82, 0xd7, + 0x61, 0x80, 0xaf, 0x0c, 0x61, 0x26, 0xc8, 0xb1, 0xe7, 0xb3, 0x89, 0x15, 0x3b, 0x28, 0xb5, 0x67, + 0xf0, 0xab, 0x04, 0x41, 0x08, 0xbd, 0x0b, 0x63, 0x1b, 0x5e, 0x14, 0x27, 0x54, 0xd9, 0x8f, 0x13, + 0x67, 0xbb, 0x79, 0x04, 0x1b, 0x81, 0x9a, 0x91, 0x55, 0x83, 0x12, 0xce, 0x50, 0x46, 0x9b, 0x30, + 0x4a, 0x55, 0xd4, 0xb4, 0xa9, 0xc1, 0x9e, 0x9b, 0x52, 0x26, 0xc2, 0xab, 0x3a, 0x21, 0x6c, 0xd2, + 0xa5, 0x2c, 0xa9, 0xc1, 0x54, 0xda, 0x21, 0x26, 0xdd, 0x28, 0x96, 0xc4, 0x75, 0x59, 0x0e, 0xa3, + 0x9c, 0x8d, 0x79, 0xca, 0x54, 0x4c, 0xce, 0x96, 0xfa, 0xc3, 0xd8, 0xdf, 0xa6, 0x67, 0x31, 0x9d, + 0xc3, 0x13, 0x38, 0xbe, 0x2e, 0x9b, 0xc7, 0xd7, 0x93, 0x05, 0xbe, 0x6c, 0x97, 0xa3, 0xeb, 0x1d, + 0x18, 0xd6, 0x3e, 0x3c, 0x5a, 0x80, 0x4a, 0x43, 0x3a, 0x73, 0x08, 0x2e, 0xae, 0x44, 0x29, 0xe5, + 0xe5, 0x81, 0x53, 0x1c, 0x3a, 0x2f, 0x54, 0x04, 0xcd, 0xba, 0x7e, 0x51, 0x01, 0x15, 0x33, 0x88, + 0xfd, 0x22, 0xc0, 0xca, 0x7d, 0xd2, 0x58, 0xe4, 0x2a, 0x9e, 0x76, 0xbf, 0x67, 0x75, 0xbf, 0xdf, + 0xb3, 0xbf, 0x65, 0xc1, 0xd8, 0xea, 0xb2, 0x21, 0xd3, 0xcf, 0x03, 0x70, 0xd9, 0xf8, 0xf6, 0xed, + 0xeb, 0xd2, 0x7e, 0xcd, 0x8d, 0x8c, 0xaa, 0x14, 0x6b, 0x18, 0xe8, 0x51, 0x28, 0xfb, 0xad, 0x40, + 0x88, 0xac, 0x83, 0xfb, 0x7b, 0x73, 0xe5, 0xab, 0xad, 0x00, 0xd3, 0x32, 0xcd, 0xc7, 0xaa, 0x5c, + 0xd8, 0xc7, 0x2a, 0xdf, 0xdb, 0xf8, 0xeb, 0x65, 0x98, 0x58, 0xf5, 0xc9, 0x7d, 0xa3, 0xd7, 0x4f, + 0xc3, 0x80, 0x1b, 0x79, 0x3b, 0x24, 0xca, 0x0a, 0x02, 0x55, 0x56, 0x8a, 0x05, 0xb4, 0xb0, 0xdb, + 0xd7, 0xdb, 0xed, 0x07, 0xf9, 0xf1, 0xb9, 0xbc, 0xe5, 0x8e, 0x19, 0x6d, 0xc0, 0x20, 0xbf, 0x0f, + 0x8e, 0x67, 0xfa, 0xd9, 0x52, 0x7c, 0xf5, 0xf0, 0xce, 0x64, 0xe7, 0x67, 0x5e, 0xd8, 0x57, 0xb8, + 0xc3, 0x8d, 0xe2, 0x65, 0xa2, 0x14, 0x4b, 0xe2, 0xb3, 0x1f, 0x87, 0x11, 0x1d, 0xb3, 0x27, 0xcf, + 0x9b, 0xbf, 0x6a, 0xc1, 0xd4, 0xaa, 0x1f, 0x36, 0xee, 0x66, 0xfc, 0xf2, 0x5e, 0x86, 0x61, 0xba, + 0x99, 0x62, 0xc3, 0x69, 0xd5, 0xf0, 0xce, 0x15, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x6e, 0xde, 0x5c, + 0xab, 0x76, 0x72, 0xea, 0x15, 0x20, 0xac, 0xe3, 0xd9, 0x7f, 0x60, 0xc1, 0xe3, 0x97, 0x96, 0x57, + 0x6a, 0x24, 0x8a, 0xbd, 0x38, 0x21, 0x41, 0xd2, 0xe6, 0x57, 0x4c, 0x65, 0x46, 0x57, 0xeb, 0x4a, + 0x2a, 0x33, 0x56, 0x59, 0x2f, 0x04, 0xf4, 0x61, 0x71, 0xae, 0xff, 0xa6, 0x05, 0x53, 0x97, 0xbc, + 0x04, 0x93, 0x66, 0x98, 0x75, 0x05, 0x8e, 0x48, 0x33, 0x8c, 0xbd, 0x24, 0x8c, 0x76, 0xb3, 0xae, + 0xc0, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf1, 0x62, 0xda, 0xd3, 0x92, 0xa9, 0xea, 0x62, + 0x51, 0x8e, 0x15, 0x06, 0x1d, 0x98, 0xeb, 0x45, 0x4c, 0x64, 0xd8, 0x15, 0x3b, 0x58, 0x0d, 0xac, + 0x2a, 0x01, 0x38, 0xc5, 0xb1, 0xff, 0x9e, 0x05, 0xa7, 0x2e, 0xf9, 0xad, 0x38, 0x21, 0xd1, 0x46, + 0x6c, 0x74, 0xf6, 0x45, 0xa8, 0x10, 0x29, 0xdc, 0x8b, 0xbe, 0xaa, 0x43, 0x43, 0x49, 0xfd, 0xdc, + 0x0f, 0x59, 0xe1, 0x15, 0x70, 0x77, 0xed, 0xcd, 0x39, 0xf3, 0xb7, 0x4b, 0x30, 0x7a, 0x79, 0x7d, + 0xbd, 0x76, 0x89, 0x24, 0x82, 0x4b, 0xe6, 0x1b, 0xa5, 0xb0, 0xa6, 0x91, 0x1f, 0x26, 0xfc, 0xb4, + 0x12, 0xcf, 0x9f, 0xe7, 0xe1, 0x22, 0xf3, 0x6b, 0x41, 0x72, 0x23, 0xaa, 0x27, 0x91, 0x17, 0x6c, + 0x76, 0xd4, 0xe1, 0x25, 0x2f, 0x2f, 0x77, 0xe3, 0xe5, 0xe8, 0x45, 0x18, 0x60, 0xf1, 0x2a, 0x52, + 0xf8, 0xf8, 0xa0, 0x92, 0x13, 0x58, 0xe9, 0xc1, 0xde, 0x5c, 0xe5, 0x26, 0x5e, 0xe3, 0x7f, 0xb0, + 0x40, 0x45, 0x6f, 0xc3, 0xf0, 0x56, 0x92, 0x34, 0x2f, 0x13, 0xc7, 0x25, 0x91, 0xe4, 0x13, 0xe7, + 0x0e, 0xe7, 0x13, 0x74, 0x3a, 0x78, 0x85, 0x74, 0x6b, 0xa5, 0x65, 0x31, 0xd6, 0x29, 0xda, 0x75, + 0x80, 0x14, 0xf6, 0x80, 0x74, 0x10, 0xfb, 0xe7, 0x4b, 0x30, 0x78, 0xd9, 0x09, 0x5c, 0x9f, 0x44, + 0x68, 0x15, 0xfa, 0xc8, 0x7d, 0xd2, 0x10, 0x07, 0x79, 0x4e, 0xd7, 0xd3, 0xc3, 0x8e, 0xdb, 0xd5, + 0xe8, 0x7f, 0xcc, 0xea, 0x23, 0x0c, 0x83, 0xb4, 0xdf, 0x97, 0x94, 0x97, 0xf8, 0x73, 0xf9, 0xb3, + 0xa0, 0x16, 0x05, 0x3f, 0x29, 0x45, 0x11, 0x96, 0x84, 0x98, 0x05, 0xaa, 0xd1, 0xac, 0x53, 0xf6, + 0x96, 0x14, 0xd3, 0xec, 0xd6, 0x97, 0x6b, 0x1c, 0x5d, 0xd0, 0xe5, 0x16, 0x28, 0x59, 0x88, 0x53, + 0x72, 0xf6, 0x45, 0x98, 0x66, 0xf7, 0xb1, 0x4e, 0xb2, 0x65, 0xec, 0x9a, 0xdc, 0xe5, 0x69, 0xff, + 0xb0, 0x04, 0x93, 0x6b, 0xf5, 0xe5, 0xba, 0x69, 0x3b, 0xbc, 0x08, 0x23, 0xfc, 0x80, 0xa6, 0x8b, + 0xce, 0xf1, 0x45, 0x7d, 0x75, 0x87, 0xb0, 0xae, 0xc1, 0xb0, 0x81, 0x89, 0x1e, 0x87, 0xb2, 0xf7, + 0x5e, 0x90, 0xf5, 0xea, 0x5b, 0x7b, 0xfd, 0x3a, 0xa6, 0xe5, 0x14, 0x4c, 0xcf, 0x7a, 0xce, 0xe4, + 0x14, 0x58, 0x9d, 0xf7, 0xaf, 0xc1, 0x98, 0x17, 0x37, 0x62, 0x6f, 0x2d, 0xa0, 0x1c, 0xc0, 0x69, + 0xc8, 0xe5, 0x9b, 0x0a, 0xe7, 0xb4, 0xab, 0x0a, 0x8a, 0x33, 0xd8, 0x1a, 0xc7, 0xed, 0x2f, 0x2c, + 0x2f, 0xe4, 0xba, 0x8b, 0x53, 0x51, 0xa8, 0xc9, 0x46, 0x17, 0x33, 0x1f, 0x21, 0x21, 0x0a, 0xf1, + 0x01, 0xc7, 0x58, 0xc2, 0xec, 0x77, 0xa1, 0xa2, 0xdc, 0xbc, 0xa4, 0x77, 0xa3, 0xd5, 0xc5, 0xbb, + 0x31, 0x9f, 0x33, 0x49, 0xc3, 0x6f, 0xb9, 0xa3, 0xe1, 0xf7, 0x9f, 0x59, 0x90, 0xfa, 0xa9, 0x20, + 0x0c, 0x95, 0x66, 0xc8, 0x2e, 0x89, 0x22, 0x79, 0x1b, 0xfb, 0x54, 0xce, 0x82, 0xe5, 0x1b, 0x86, + 0x2f, 0xa9, 0x9a, 0xac, 0x8b, 0x53, 0x32, 0xe8, 0x2a, 0x0c, 0x36, 0x23, 0x52, 0x4f, 0x58, 0x68, + 0x42, 0x0f, 0x14, 0xf9, 0xdc, 0xf0, 0x9a, 0x58, 0x92, 0xb0, 0xff, 0x95, 0x05, 0x70, 0xd5, 0xdb, + 0xf6, 0x12, 0xec, 0x04, 0x9b, 0xe4, 0x04, 0xb4, 0xc2, 0xeb, 0xd0, 0x17, 0x37, 0x49, 0xa3, 0xd8, + 0x35, 0x5f, 0xda, 0xb3, 0x7a, 0x93, 0x34, 0xd2, 0xcf, 0x41, 0xff, 0x61, 0x46, 0xc7, 0xfe, 0x1e, + 0xc0, 0x58, 0x8a, 0x46, 0x25, 0x73, 0xf4, 0xbc, 0xe1, 0x93, 0xff, 0x68, 0xc6, 0x27, 0xbf, 0xc2, + 0xb0, 0x35, 0x37, 0xfc, 0x04, 0xca, 0xdb, 0xce, 0x7d, 0xa1, 0x08, 0xbc, 0x5c, 0xb4, 0x43, 0xb4, + 0xa5, 0xf9, 0x6b, 0xce, 0x7d, 0x2e, 0x77, 0x3d, 0x27, 0x17, 0xd2, 0x35, 0xe7, 0xfe, 0x01, 0xbf, + 0xcc, 0x63, 0x1b, 0x96, 0x6a, 0x1e, 0x5f, 0xfa, 0xd3, 0xf4, 0x3f, 0xe3, 0xa1, 0xb4, 0x39, 0xd6, + 0xaa, 0x17, 0x08, 0x3b, 0x66, 0x8f, 0xad, 0x7a, 0x41, 0xb6, 0x55, 0x2f, 0x28, 0xd0, 0xaa, 0xc7, + 0x9c, 0x57, 0x07, 0x85, 0xf9, 0x9f, 0x79, 0xfe, 0x0d, 0x5f, 0xf8, 0x58, 0x4f, 0x4d, 0x8b, 0x7b, + 0x04, 0xde, 0xfc, 0x82, 0x14, 0x36, 0x45, 0x69, 0x6e, 0x17, 0x64, 0xd3, 0xe8, 0xef, 0x5b, 0x30, + 0x26, 0x7e, 0x63, 0xf2, 0x5e, 0x8b, 0xc4, 0x89, 0x38, 0xd4, 0x3e, 0x75, 0x94, 0xde, 0x08, 0x12, + 0xbc, 0x53, 0x1f, 0x95, 0x1c, 0xc9, 0x04, 0xe6, 0xf6, 0x2d, 0xd3, 0x1f, 0xf4, 0x3d, 0x0b, 0xa6, + 0xb7, 0x9d, 0xfb, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc4, 0x0b, 0x85, 0x77, 0xe3, 0x6a, 0xaf, 0xeb, + 0xa4, 0x8d, 0x10, 0xef, 0xae, 0x74, 0x5c, 0x9a, 0xee, 0x84, 0x92, 0xdb, 0xe9, 0x8e, 0x3d, 0x9c, + 0xdd, 0x80, 0x21, 0xb9, 0x30, 0x3b, 0x88, 0xf9, 0x55, 0xfd, 0xec, 0xce, 0x51, 0xaa, 0xe7, 0xa5, + 0x69, 0x6c, 0xfe, 0xf5, 0x96, 0x13, 0x24, 0x5e, 0xb2, 0xab, 0xa9, 0x05, 0xac, 0x1d, 0xb1, 0x14, + 0x8f, 0xb5, 0x9d, 0x77, 0x61, 0x44, 0x5f, 0x77, 0xc7, 0xda, 0xd6, 0x7b, 0x30, 0xd5, 0x61, 0x55, + 0x1d, 0x6b, 0x93, 0xf7, 0xe0, 0xd1, 0xae, 0xeb, 0xe3, 0x38, 0x1b, 0xb6, 0x7f, 0xdb, 0xd2, 0x59, + 0xe7, 0x09, 0x18, 0x5d, 0xae, 0x99, 0x46, 0x97, 0x73, 0x45, 0xf7, 0x50, 0x17, 0xcb, 0xcb, 0x86, + 0xde, 0x7d, 0x7a, 0x24, 0xa0, 0x75, 0x18, 0xf0, 0x69, 0x89, 0xbc, 0xf3, 0x3a, 0xdf, 0xcb, 0x2e, + 0x4d, 0x85, 0x12, 0x56, 0x1e, 0x63, 0x41, 0xcb, 0xfe, 0x9e, 0x05, 0x7d, 0x7f, 0x89, 0x11, 0x43, + 0x6d, 0xa4, 0x45, 0xe0, 0xfb, 0x3c, 0x76, 0xee, 0xad, 0xdc, 0x4f, 0x48, 0x10, 0x33, 0x19, 0xb4, + 0xdb, 0xad, 0xfd, 0x30, 0x6d, 0x4a, 0x3a, 0x61, 0xbc, 0x0a, 0xa3, 0xbe, 0x73, 0x87, 0xf8, 0xd2, + 0x60, 0x9c, 0xd5, 0xd8, 0xae, 0xea, 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x1b, 0xba, 0x3d, 0x5d, 0x08, + 0x49, 0xaa, 0xb2, 0x61, 0x6c, 0xc7, 0x26, 0x2e, 0x55, 0x19, 0xee, 0x39, 0x49, 0x63, 0x4b, 0x68, + 0x73, 0xaa, 0xbb, 0xb7, 0x69, 0x21, 0xe6, 0x30, 0xb4, 0x08, 0xe3, 0x72, 0xc5, 0xde, 0xa2, 0x6a, + 0x7e, 0x18, 0x08, 0x39, 0x53, 0x45, 0x1d, 0x63, 0x13, 0x8c, 0xb3, 0xf8, 0xe8, 0xe3, 0x30, 0x46, + 0x27, 0x27, 0x6c, 0x25, 0xd2, 0xc5, 0xa4, 0x9f, 0xb9, 0x98, 0x30, 0x0f, 0xe5, 0x75, 0x03, 0x82, + 0x33, 0x98, 0xf6, 0xdb, 0x30, 0x75, 0x35, 0x74, 0xdc, 0x25, 0xc7, 0x77, 0x82, 0x06, 0x89, 0xd6, + 0x82, 0xcd, 0xdc, 0xeb, 0x6b, 0xfd, 0x8a, 0xb9, 0x94, 0x77, 0xc5, 0x6c, 0x47, 0x80, 0xf4, 0x06, + 0x84, 0x73, 0xd4, 0x5b, 0x30, 0xe8, 0xf1, 0xa6, 0xc4, 0xb2, 0x7d, 0x21, 0xcf, 0x1e, 0xd5, 0xd6, + 0x47, 0xcd, 0xd9, 0x87, 0x17, 0x60, 0x49, 0x92, 0xaa, 0x20, 0x9d, 0x0c, 0x58, 0xf9, 0x5a, 0x9e, + 0xfd, 0xd7, 0x2d, 0x18, 0xbf, 0x9e, 0x09, 0x69, 0x7d, 0x1a, 0x06, 0x78, 0x62, 0x84, 0xac, 0x89, + 0xa5, 0xce, 0x4a, 0xb1, 0x80, 0x3e, 0x70, 0x0d, 0xff, 0x97, 0x4b, 0x50, 0x61, 0x6e, 0xb6, 0x4d, + 0xaa, 0x4e, 0x1c, 0xbf, 0x98, 0x7a, 0xcd, 0x10, 0x53, 0x73, 0xb4, 0x4c, 0xd5, 0xb1, 0x6e, 0x52, + 0x2a, 0xba, 0xa9, 0x42, 0x3d, 0x0b, 0x29, 0x98, 0x29, 0x41, 0x1e, 0x0e, 0x38, 0x66, 0x46, 0x86, + 0xca, 0x30, 0x50, 0x76, 0xe9, 0xab, 0x70, 0x1f, 0xba, 0x4b, 0x5f, 0xd5, 0xb3, 0x2e, 0xcc, 0xa9, + 0xa6, 0x75, 0x9e, 0xb1, 0xef, 0x4f, 0x32, 0xe7, 0x49, 0xc7, 0xf7, 0x3e, 0x4f, 0x54, 0xc4, 0xf4, + 0x9c, 0x70, 0x86, 0x14, 0xa5, 0x07, 0x8c, 0xcf, 0x88, 0x7f, 0x3c, 0x20, 0x3e, 0xad, 0x62, 0x5f, + 0x86, 0xf1, 0xcc, 0xd4, 0xa1, 0x97, 0xa1, 0xbf, 0xb9, 0xe5, 0xc4, 0x24, 0xe3, 0xc7, 0xd2, 0x5f, + 0xa3, 0x85, 0x07, 0x7b, 0x73, 0x63, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, 0xfe, 0x72, 0x09, 0xfa, + 0xae, 0x87, 0xee, 0x49, 0x2c, 0xb5, 0xcb, 0xc6, 0x52, 0x7b, 0x3a, 0x3f, 0x9d, 0x46, 0xd7, 0x55, + 0x56, 0xcb, 0xac, 0xb2, 0x73, 0x05, 0x68, 0x1d, 0xbe, 0xc0, 0xb6, 0x61, 0x98, 0xa5, 0xeb, 0x10, + 0x8e, 0x3c, 0x2f, 0x1a, 0x9a, 0xd5, 0x5c, 0x46, 0xb3, 0x1a, 0xd7, 0x50, 0x35, 0xfd, 0xea, 0x19, + 0x18, 0x14, 0x8e, 0x23, 0x59, 0xd7, 0x51, 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0x97, 0x65, 0x30, 0xd2, + 0x83, 0xa0, 0xdf, 0xb3, 0x60, 0x3e, 0xe2, 0x61, 0x38, 0x6e, 0xb5, 0x15, 0x79, 0xc1, 0x66, 0xbd, + 0xb1, 0x45, 0xdc, 0x96, 0xef, 0x05, 0x9b, 0x6b, 0x9b, 0x41, 0xa8, 0x8a, 0x57, 0xee, 0x93, 0x46, + 0x8b, 0xd9, 0x69, 0x0b, 0x67, 0x25, 0x51, 0x97, 0xa6, 0x17, 0xf6, 0xf7, 0xe6, 0xe6, 0x71, 0x4f, + 0xad, 0xe0, 0x1e, 0x7b, 0x85, 0xfe, 0xd8, 0x82, 0x05, 0x9e, 0x20, 0xa3, 0xf8, 0x48, 0x0a, 0x69, + 0xa4, 0x35, 0x49, 0x34, 0x25, 0xb7, 0x4e, 0xa2, 0xed, 0xa5, 0x57, 0xc4, 0x24, 0x2f, 0xd4, 0x7a, + 0x6b, 0x15, 0xf7, 0xda, 0x4d, 0xfb, 0xdf, 0x94, 0x61, 0x94, 0xce, 0x67, 0x1a, 0x14, 0xff, 0xb2, + 0xb1, 0x4c, 0x9e, 0xc8, 0x2c, 0x93, 0x49, 0x03, 0xf9, 0xc1, 0xc4, 0xc3, 0xc7, 0x30, 0xe9, 0x3b, + 0x71, 0x72, 0x99, 0x38, 0x51, 0x72, 0x87, 0x38, 0xec, 0x6e, 0x32, 0xeb, 0xf7, 0x50, 0xe0, 0xba, + 0x53, 0x39, 0x23, 0x5d, 0xcd, 0x12, 0xc3, 0xed, 0xf4, 0xd1, 0x0e, 0x20, 0x76, 0x0f, 0x1a, 0x39, + 0x41, 0xcc, 0xc7, 0xe2, 0x09, 0xbb, 0x6e, 0x6f, 0xad, 0xce, 0x8a, 0x56, 0xd1, 0xd5, 0x36, 0x6a, + 0xb8, 0x43, 0x0b, 0xda, 0x4d, 0x77, 0x7f, 0xd1, 0x9b, 0xee, 0x81, 0x1c, 0x9f, 0xed, 0xaf, 0x58, + 0x30, 0x45, 0x3f, 0x8b, 0xe9, 0xdf, 0x1b, 0xa3, 0x10, 0xc6, 0xe9, 0xb2, 0xf3, 0x49, 0x22, 0xcb, + 0xc4, 0xfe, 0xca, 0x91, 0xac, 0x4d, 0x3a, 0xa9, 0xf8, 0x76, 0xc5, 0x24, 0x86, 0xb3, 0xd4, 0xed, + 0x6f, 0x59, 0xc0, 0x3c, 0xee, 0x4e, 0xe0, 0x30, 0xbb, 0x64, 0x1e, 0x66, 0x76, 0x3e, 0xc7, 0xe8, + 0x72, 0x8e, 0xbd, 0x04, 0x13, 0x14, 0x5a, 0x8b, 0xc2, 0xfb, 0xbb, 0x52, 0xd0, 0xce, 0x37, 0xf0, + 0x7e, 0xa5, 0xc4, 0xb7, 0x8d, 0x8a, 0x27, 0x44, 0xbf, 0x60, 0xc1, 0x50, 0xc3, 0x69, 0x3a, 0x0d, + 0x9e, 0x5c, 0xa9, 0x80, 0x75, 0xc6, 0xa8, 0x3f, 0xbf, 0x2c, 0xea, 0x72, 0xcb, 0xc2, 0x47, 0xe4, + 0xd0, 0x65, 0x71, 0xae, 0x35, 0x41, 0x35, 0x3e, 0x7b, 0x17, 0x46, 0x0d, 0x62, 0xc7, 0xaa, 0x86, + 0xfe, 0x82, 0xc5, 0x99, 0xbe, 0x52, 0x15, 0xee, 0xc1, 0x64, 0xa0, 0xfd, 0xa7, 0xec, 0x4c, 0x4a, + 0xc6, 0xf3, 0xc5, 0xd9, 0x3a, 0xe3, 0x82, 0x9a, 0x77, 0x61, 0x86, 0x20, 0x6e, 0x6f, 0xc3, 0xfe, + 0x55, 0x0b, 0x1e, 0xd1, 0x11, 0xb5, 0x00, 0xd0, 0x3c, 0xbb, 0x71, 0x15, 0x86, 0xc2, 0x26, 0x89, + 0x9c, 0x54, 0x2d, 0x3a, 0x27, 0xe7, 0xff, 0x86, 0x28, 0x3f, 0xd8, 0x9b, 0x9b, 0xd6, 0xa9, 0xcb, + 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x06, 0xd8, 0xbc, 0xc4, 0x22, 0x74, 0x97, 0x25, 0x1b, 0x62, 0x97, + 0x2a, 0x31, 0x16, 0x10, 0xfb, 0x6f, 0x59, 0x7c, 0xb9, 0xe9, 0x5d, 0x47, 0x5f, 0x80, 0x89, 0x6d, + 0xaa, 0x41, 0xad, 0xdc, 0x6f, 0xd2, 0x83, 0x94, 0x5d, 0x27, 0x5b, 0x45, 0x8e, 0x8f, 0x2e, 0xc3, + 0x5d, 0x9a, 0x11, 0xbd, 0x9f, 0xb8, 0x96, 0x21, 0x8b, 0xdb, 0x1a, 0xb2, 0xff, 0x61, 0x89, 0xef, + 0x59, 0x26, 0xc3, 0x3d, 0x03, 0x83, 0xcd, 0xd0, 0x5d, 0x5e, 0xab, 0x62, 0x31, 0x57, 0x8a, 0xe9, + 0xd4, 0x78, 0x31, 0x96, 0x70, 0x74, 0x01, 0x80, 0xdc, 0x4f, 0x48, 0x14, 0x38, 0xbe, 0xba, 0x06, + 0x56, 0xa2, 0xd2, 0x8a, 0x82, 0x60, 0x0d, 0x8b, 0xd6, 0x69, 0x46, 0xe1, 0x8e, 0xe7, 0xb2, 0xc8, + 0x85, 0xb2, 0x59, 0xa7, 0xa6, 0x20, 0x58, 0xc3, 0xa2, 0x7a, 0x6b, 0x2b, 0x88, 0xf9, 0x31, 0xe6, + 0xdc, 0x11, 0xb9, 0x71, 0x86, 0x52, 0xbd, 0xf5, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x02, 0x03, + 0x89, 0xc3, 0x2e, 0x37, 0xfb, 0x8b, 0x78, 0x8a, 0xac, 0x53, 0x5c, 0x3d, 0x19, 0x11, 0xad, 0x8a, + 0x05, 0x09, 0xfb, 0x3f, 0x55, 0x00, 0x52, 0xa9, 0x0b, 0x7d, 0xb9, 0x7d, 0xc3, 0x7f, 0xb4, 0xa8, + 0xc8, 0xf6, 0xe0, 0x76, 0x3b, 0xfa, 0x9a, 0x05, 0xc3, 0x8e, 0xef, 0x87, 0x0d, 0x27, 0x61, 0xd3, + 0x53, 0x2a, 0xca, 0x7a, 0x44, 0x4f, 0x16, 0xd3, 0xba, 0xbc, 0x33, 0x2f, 0xca, 0x0b, 0x47, 0x0d, + 0x92, 0xdb, 0x1f, 0xbd, 0x0b, 0xe8, 0x23, 0x52, 0x6a, 0xe7, 0x5f, 0x78, 0x36, 0x2b, 0xb5, 0x57, + 0x18, 0xc3, 0xd5, 0x04, 0x76, 0xf4, 0xb6, 0x91, 0x4b, 0xa6, 0xaf, 0x48, 0xf8, 0xa9, 0x21, 0x87, + 0xe4, 0xa5, 0x91, 0x41, 0x6f, 0xea, 0x2e, 0xd5, 0xfd, 0x45, 0xe2, 0xbb, 0x35, 0x71, 0x38, 0xc7, + 0x9d, 0x3a, 0x81, 0x71, 0xd7, 0x3c, 0x79, 0x85, 0x5b, 0xd8, 0x0b, 0xf9, 0x2d, 0x64, 0x8e, 0xec, + 0xf4, 0xac, 0xcd, 0x00, 0x70, 0xb6, 0x09, 0xf4, 0x26, 0x77, 0x78, 0x5f, 0x0b, 0x36, 0x42, 0xe1, + 0x1a, 0x76, 0xbe, 0xc0, 0x37, 0xdf, 0x8d, 0x13, 0xb2, 0x4d, 0xeb, 0xa4, 0x87, 0xeb, 0x75, 0x41, + 0x05, 0x2b, 0x7a, 0x68, 0x1d, 0x06, 0x58, 0xb4, 0x51, 0x3c, 0x33, 0x54, 0xc4, 0x12, 0x67, 0x06, + 0xd9, 0xa6, 0xfb, 0x87, 0xfd, 0x8d, 0xb1, 0xa0, 0x85, 0x2e, 0xcb, 0x30, 0xfb, 0x78, 0x2d, 0xb8, + 0x19, 0x13, 0x16, 0x66, 0x5f, 0x59, 0xfa, 0x50, 0x1a, 0x37, 0xcf, 0xcb, 0x3b, 0x66, 0xd3, 0x33, + 0x6a, 0x52, 0xc1, 0x46, 0xfc, 0x97, 0x49, 0xfa, 0x66, 0xa0, 0x48, 0x47, 0xcd, 0x94, 0x7e, 0xe9, + 0x64, 0xdf, 0x32, 0x89, 0xe1, 0x2c, 0xf5, 0x13, 0x3d, 0x52, 0x67, 0x03, 0x98, 0xc8, 0x6e, 0xca, + 0x63, 0x3d, 0xc2, 0x7f, 0xd2, 0x07, 0x63, 0xe6, 0xe2, 0x40, 0x0b, 0x50, 0x11, 0x44, 0x54, 0xd2, + 0x2e, 0xb5, 0x07, 0xae, 0x49, 0x00, 0x4e, 0x71, 0x58, 0xfa, 0x32, 0x56, 0x5d, 0x73, 0x0a, 0x4a, + 0xd3, 0x97, 0x29, 0x08, 0xd6, 0xb0, 0xa8, 0x24, 0x7c, 0x27, 0x0c, 0x13, 0x75, 0x12, 0xa8, 0x75, + 0xb3, 0xc4, 0x4a, 0xb1, 0x80, 0xd2, 0x13, 0xe0, 0x2e, 0xfd, 0x98, 0xbe, 0x69, 0x55, 0x54, 0x27, + 0xc0, 0x15, 0x1d, 0x88, 0x4d, 0x5c, 0x7a, 0xa2, 0x85, 0x31, 0x5b, 0x88, 0x42, 0xde, 0x4e, 0x9d, + 0xac, 0xea, 0x3c, 0x02, 0x4f, 0xc2, 0xd1, 0x67, 0xe0, 0x11, 0x15, 0x30, 0x87, 0xb9, 0x95, 0x56, + 0xb6, 0x38, 0x60, 0xa8, 0xcc, 0x8f, 0x2c, 0x77, 0x46, 0xc3, 0xdd, 0xea, 0xa3, 0xd7, 0x60, 0x4c, + 0xc8, 0xca, 0x92, 0xe2, 0xa0, 0x79, 0x03, 0x7f, 0xc5, 0x80, 0xe2, 0x0c, 0x36, 0xaa, 0xc2, 0x04, + 0x2d, 0x61, 0x42, 0xaa, 0xa4, 0xc0, 0x03, 0xff, 0xd4, 0x51, 0x7f, 0x25, 0x03, 0xc7, 0x6d, 0x35, + 0xd0, 0x22, 0x8c, 0x73, 0x61, 0x85, 0x2a, 0x86, 0xec, 0x3b, 0x08, 0x7f, 0x4e, 0xb5, 0x11, 0x6e, + 0x98, 0x60, 0x9c, 0xc5, 0x47, 0x17, 0x61, 0xc4, 0x89, 0x1a, 0x5b, 0x5e, 0x42, 0x1a, 0x49, 0x2b, + 0xe2, 0x49, 0x2c, 0x34, 0x17, 0x86, 0x45, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0xf3, 0x30, 0xd5, 0xc1, + 0x79, 0x9c, 0x2e, 0x1c, 0xa7, 0xe9, 0xc9, 0x31, 0x65, 0xdc, 0xa5, 0x16, 0x6b, 0x6b, 0x72, 0x34, + 0x1a, 0x16, 0x5d, 0x9d, 0xcc, 0x3c, 0xad, 0xe5, 0xd4, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x8a, + 0x63, 0xff, 0x45, 0x05, 0x34, 0xeb, 0x4d, 0x01, 0x17, 0x99, 0x8b, 0x30, 0x22, 0xd3, 0xc4, 0x6a, + 0xe9, 0x19, 0xd5, 0x30, 0x2f, 0x69, 0x30, 0x6c, 0x60, 0xd2, 0xbe, 0x05, 0xd2, 0x26, 0x95, 0x75, + 0xce, 0x52, 0xc6, 0x2a, 0x9c, 0xe2, 0xa0, 0xf3, 0x30, 0x14, 0x13, 0x7f, 0xe3, 0xaa, 0x17, 0xdc, + 0x15, 0x0b, 0x5b, 0x71, 0xe6, 0xba, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0x41, 0xb9, 0xe5, 0xb9, 0x62, + 0x29, 0x4b, 0xb1, 0xa1, 0x7c, 0x73, 0xad, 0x7a, 0xb0, 0x37, 0xf7, 0x44, 0xb7, 0x9c, 0xb9, 0x54, + 0x3f, 0x8f, 0xe7, 0xe9, 0xf6, 0xa3, 0x95, 0x3b, 0xd9, 0xe9, 0x07, 0x7a, 0xb4, 0xd3, 0x5f, 0x00, + 0x10, 0xa3, 0x96, 0x6b, 0xb9, 0x9c, 0x7e, 0xb5, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xaa, 0xe5, 0x37, + 0x22, 0xe2, 0x48, 0x45, 0x98, 0x3b, 0x35, 0x0f, 0x1d, 0x5d, 0xcb, 0x5f, 0xce, 0x12, 0xc3, 0xed, + 0xf4, 0x51, 0x08, 0x93, 0xae, 0x88, 0xca, 0x4c, 0x1b, 0xad, 0xf4, 0xee, 0x49, 0x4d, 0x1b, 0xac, + 0x66, 0x09, 0xe1, 0x76, 0xda, 0xe8, 0x73, 0x30, 0x2b, 0x0b, 0xdb, 0x43, 0x62, 0xd9, 0x76, 0x29, + 0x2f, 0x9d, 0xd9, 0xdf, 0x9b, 0x9b, 0xad, 0x76, 0xc5, 0xc2, 0x87, 0x50, 0x40, 0x6f, 0xc1, 0x00, + 0xbb, 0xd7, 0x89, 0x67, 0x86, 0xd9, 0x89, 0xf7, 0x52, 0x11, 0x7f, 0x7c, 0xba, 0xea, 0xe7, 0xd9, + 0xed, 0x90, 0xf0, 0x34, 0x4d, 0x2f, 0xcb, 0x58, 0x21, 0x16, 0x34, 0x51, 0x13, 0x86, 0x9d, 0x20, + 0x08, 0x13, 0x87, 0x0b, 0x62, 0x23, 0x45, 0x64, 0x49, 0xad, 0x89, 0xc5, 0xb4, 0x2e, 0x6f, 0x47, + 0x39, 0xaf, 0x69, 0x10, 0xac, 0x37, 0x81, 0xee, 0xc1, 0x78, 0x78, 0x8f, 0x32, 0x4c, 0x79, 0xb5, + 0x11, 0xcf, 0x8c, 0x9a, 0x03, 0xcb, 0x31, 0xd4, 0x1a, 0x95, 0x35, 0x4e, 0x66, 0x12, 0xc5, 0xd9, + 0x56, 0xd0, 0xbc, 0x61, 0xae, 0x1e, 0x4b, 0xfd, 0xa9, 0x53, 0x73, 0xb5, 0x6e, 0x9d, 0x66, 0x61, + 0xd7, 0xdc, 0x87, 0x92, 0x71, 0x84, 0xf1, 0x4c, 0xd8, 0x75, 0x0a, 0xc2, 0x3a, 0xde, 0xec, 0xc7, + 0x60, 0x58, 0x9b, 0xf8, 0x5e, 0x1c, 0x77, 0x67, 0x5f, 0x83, 0x89, 0xec, 0x84, 0xf6, 0xe4, 0xf8, + 0xfb, 0x3f, 0x4b, 0x30, 0xde, 0xe1, 0xde, 0xe8, 0xae, 0xc7, 0x9c, 0xcf, 0x0d, 0xd6, 0x77, 0xc5, + 0x0b, 0x5c, 0xcc, 0x20, 0x26, 0x03, 0x2b, 0x15, 0x60, 0x60, 0x92, 0x9b, 0x96, 0xbb, 0x72, 0x53, + 0xc1, 0xb4, 0xfa, 0xde, 0x0f, 0xd3, 0x32, 0xcf, 0x89, 0xfe, 0x42, 0xe7, 0xc4, 0x03, 0x60, 0x74, + 0xc6, 0x51, 0x33, 0x58, 0xe0, 0xa8, 0xf9, 0x66, 0x09, 0x26, 0x52, 0x27, 0x67, 0x91, 0x3b, 0xfa, + 0xf8, 0xaf, 0x21, 0xd6, 0x8d, 0x6b, 0x88, 0xbc, 0xd4, 0xd0, 0x99, 0xfe, 0x75, 0xbd, 0x92, 0x78, + 0x2b, 0x73, 0x25, 0xf1, 0x52, 0x8f, 0x74, 0x0f, 0xbf, 0x9e, 0xf8, 0x6e, 0x09, 0x4e, 0x65, 0xab, + 0x2c, 0xfb, 0x8e, 0xb7, 0x7d, 0x02, 0xf3, 0xf5, 0x19, 0x63, 0xbe, 0x5e, 0xe9, 0x6d, 0x5c, 0xac, + 0x93, 0x5d, 0x27, 0xcd, 0xc9, 0x4c, 0xda, 0xc7, 0x8e, 0x42, 0xfc, 0xf0, 0x99, 0xfb, 0x43, 0x0b, + 0x1e, 0xed, 0x58, 0xef, 0x04, 0x0c, 0xaf, 0x6f, 0x98, 0x86, 0xd7, 0x17, 0x8f, 0x30, 0xba, 0x2e, + 0x96, 0xd8, 0x5f, 0x2b, 0x77, 0x19, 0x15, 0x33, 0x4d, 0xdd, 0x80, 0x61, 0xa7, 0xd1, 0x20, 0x71, + 0x7c, 0x2d, 0x74, 0x55, 0x02, 0xa7, 0xe7, 0xd9, 0xd9, 0x92, 0x16, 0x1f, 0xec, 0xcd, 0xcd, 0x66, + 0x49, 0xa4, 0x60, 0xac, 0x53, 0x30, 0x53, 0xcb, 0x95, 0x8e, 0x29, 0xb5, 0xdc, 0x05, 0x80, 0x1d, + 0xa5, 0xc5, 0x66, 0x2d, 0x5e, 0x9a, 0x7e, 0xab, 0x61, 0xa1, 0xff, 0x9f, 0x49, 0x84, 0xdc, 0x49, + 0xa3, 0xcf, 0x8c, 0x97, 0xcc, 0xf9, 0x7e, 0xba, 0xc3, 0x07, 0x0f, 0xcb, 0x54, 0xd6, 0x41, 0x45, + 0x12, 0x7d, 0x0a, 0x26, 0x62, 0x1e, 0xfc, 0xbf, 0xec, 0x3b, 0x31, 0xf3, 0xee, 0x17, 0xfc, 0x94, + 0x45, 0x58, 0xd6, 0x33, 0x30, 0xdc, 0x86, 0x6d, 0x7f, 0xa7, 0x0c, 0x1f, 0x3c, 0x64, 0xd9, 0xa2, + 0x45, 0xf3, 0xd6, 0xf6, 0xb9, 0xac, 0xfd, 0x67, 0xb6, 0x63, 0x65, 0xc3, 0x20, 0x94, 0xf9, 0xda, + 0xa5, 0xf7, 0xfd, 0xb5, 0xbf, 0xae, 0x5b, 0xeb, 0xb8, 0xdf, 0xe6, 0xa5, 0x23, 0x6f, 0xcc, 0x9f, + 0x56, 0x63, 0xfd, 0x97, 0x2c, 0x78, 0xa2, 0xe3, 0xb0, 0x0c, 0x2f, 0x91, 0x05, 0xa8, 0x34, 0x68, + 0xa1, 0x16, 0x8b, 0x93, 0x06, 0xc1, 0x49, 0x00, 0x4e, 0x71, 0x0c, 0x67, 0x90, 0x52, 0xae, 0x33, + 0xc8, 0xef, 0x5b, 0x30, 0x9d, 0xed, 0xc4, 0x09, 0xf0, 0xad, 0xba, 0xc9, 0xb7, 0xe6, 0x7b, 0xfb, + 0xf8, 0x5d, 0x58, 0xd6, 0x7f, 0x1f, 0x83, 0xd3, 0x6d, 0xa7, 0x1e, 0x9f, 0xc5, 0x9f, 0xb3, 0x60, + 0x72, 0x93, 0x49, 0xef, 0x5a, 0xc0, 0x93, 0x18, 0x57, 0x4e, 0x94, 0xd8, 0xa1, 0x71, 0x52, 0x5c, + 0x17, 0x69, 0x43, 0xc1, 0xed, 0x8d, 0xa1, 0xaf, 0x5a, 0x30, 0xed, 0xdc, 0x8b, 0xdb, 0x5e, 0x36, + 0x11, 0x0b, 0xe9, 0xb5, 0x1c, 0x63, 0x59, 0xce, 0x9b, 0x28, 0x4b, 0x33, 0xfb, 0x7b, 0x73, 0xd3, + 0x9d, 0xb0, 0x70, 0xc7, 0x56, 0xe9, 0xf7, 0xdd, 0x12, 0xe1, 0x14, 0xc5, 0x42, 0xf7, 0x3a, 0x05, + 0x5f, 0x70, 0xb6, 0x26, 0x21, 0x58, 0x51, 0x44, 0xef, 0x40, 0x65, 0x53, 0xc6, 0x38, 0x65, 0xd9, + 0x66, 0x97, 0x69, 0xee, 0x14, 0x12, 0xc5, 0x7d, 0xf7, 0x15, 0x08, 0xa7, 0x44, 0xd1, 0x65, 0x28, + 0x07, 0x1b, 0xb1, 0x88, 0x26, 0xce, 0xf3, 0x01, 0x32, 0x3d, 0xaf, 0x78, 0x00, 0xe6, 0xf5, 0xd5, + 0x3a, 0xa6, 0x24, 0x28, 0xa5, 0xe8, 0x8e, 0x2b, 0xac, 0xc4, 0x39, 0x94, 0xf0, 0x52, 0xb5, 0x9d, + 0x12, 0x5e, 0xaa, 0x62, 0x4a, 0x02, 0xd5, 0xa0, 0x9f, 0x05, 0x6b, 0x08, 0x13, 0x70, 0x4e, 0xc8, + 0x79, 0x5b, 0x48, 0x0a, 0xcf, 0x80, 0xc8, 0x8a, 0x31, 0x27, 0x84, 0xd6, 0x61, 0xa0, 0xc1, 0x92, + 0xf8, 0x0b, 0xdd, 0x3c, 0x2f, 0x19, 0x43, 0x5b, 0xc2, 0x7f, 0x7e, 0xef, 0xc5, 0xcb, 0xb1, 0xa0, + 0xc5, 0xa8, 0x92, 0xe6, 0xd6, 0x46, 0x2c, 0x94, 0xef, 0x3c, 0xaa, 0x6d, 0xcf, 0x31, 0x08, 0xaa, + 0xac, 0x1c, 0x0b, 0x5a, 0xa8, 0x0a, 0xa5, 0x8d, 0x86, 0x48, 0xa4, 0x9a, 0x63, 0xfa, 0x35, 0xa3, + 0x69, 0x97, 0x06, 0xf6, 0xf7, 0xe6, 0x4a, 0xab, 0xcb, 0xb8, 0xb4, 0xd1, 0x40, 0x6f, 0xc0, 0xe0, + 0x06, 0x8f, 0x8f, 0x14, 0x49, 0x53, 0x5f, 0xc8, 0x0b, 0xe2, 0x6c, 0x0b, 0xa6, 0xe4, 0xf1, 0x19, + 0x02, 0x80, 0x25, 0x39, 0x96, 0x4f, 0x4e, 0x45, 0x7c, 0x8a, 0xac, 0xa9, 0xf3, 0xbd, 0x45, 0x88, + 0x0a, 0x9d, 0x54, 0x95, 0x62, 0x8d, 0x22, 0x5d, 0xf3, 0x8e, 0x7c, 0x8f, 0x84, 0x65, 0x4c, 0xcd, + 0x5d, 0xf3, 0x1d, 0x9f, 0x2f, 0xe1, 0x6b, 0x5e, 0x81, 0x70, 0x4a, 0x14, 0xb5, 0x60, 0x74, 0x27, + 0x6e, 0x6e, 0x11, 0xb9, 0xf5, 0x59, 0x1a, 0xd5, 0xe1, 0x0b, 0x9f, 0xc8, 0xc9, 0x8d, 0x2b, 0xaa, + 0x78, 0x51, 0xd2, 0x72, 0xfc, 0x36, 0x0e, 0xc6, 0x12, 0x78, 0xdd, 0xd2, 0xc9, 0x62, 0xb3, 0x15, + 0xfa, 0x49, 0xde, 0x6b, 0x85, 0x77, 0x76, 0x13, 0x22, 0xd2, 0xac, 0xe6, 0x7c, 0x92, 0xd7, 0x39, + 0x72, 0xfb, 0x27, 0x11, 0x00, 0x2c, 0xc9, 0xa9, 0x29, 0x63, 0xdc, 0x78, 0xa2, 0xf0, 0x94, 0xb5, + 0x8d, 0x21, 0x9d, 0x32, 0xc6, 0x7d, 0x53, 0xa2, 0x8c, 0xeb, 0x36, 0xb7, 0xc2, 0x24, 0x0c, 0x32, + 0xbc, 0x7f, 0xb2, 0x08, 0xd7, 0xad, 0x75, 0xa8, 0xd9, 0xce, 0x75, 0x3b, 0x61, 0xe1, 0x8e, 0xad, + 0xa2, 0x00, 0xc6, 0x9a, 0x61, 0x94, 0xdc, 0x0b, 0x23, 0xb9, 0x0e, 0x51, 0x21, 0x1d, 0xd1, 0xa8, + 0x23, 0xda, 0x66, 0x6e, 0xb8, 0x26, 0x04, 0x67, 0xa8, 0xd3, 0x4f, 0x17, 0x37, 0x1c, 0x9f, 0xac, + 0xdd, 0x98, 0x99, 0x2a, 0xf2, 0xe9, 0xea, 0x1c, 0xb9, 0xfd, 0xd3, 0x09, 0x00, 0x96, 0xe4, 0xec, + 0x5f, 0x1d, 0x68, 0x17, 0x1c, 0x98, 0x6a, 0xf0, 0x37, 0xdb, 0x6f, 0x62, 0x3f, 0xd5, 0xbb, 0x06, + 0xfc, 0x00, 0xef, 0x64, 0xbf, 0x6a, 0xc1, 0xe9, 0x66, 0x47, 0xb1, 0x40, 0x1c, 0xbd, 0xbd, 0x2a, + 0xd2, 0x7c, 0x5a, 0x54, 0x36, 0xe4, 0xce, 0x70, 0xdc, 0xa5, 0xcd, 0xac, 0x30, 0x5d, 0x7e, 0xdf, + 0xc2, 0xf4, 0x6d, 0x18, 0x62, 0xd2, 0x5f, 0x9a, 0xeb, 0xa4, 0xc7, 0xb4, 0x20, 0xec, 0x10, 0x5f, + 0x16, 0x24, 0xb0, 0x22, 0x46, 0x27, 0xee, 0xf1, 0xec, 0x20, 0x30, 0x61, 0x60, 0x91, 0x83, 0x8f, + 0x6b, 0x2a, 0xab, 0x62, 0x26, 0x1e, 0xaf, 0x1d, 0x86, 0x7c, 0x90, 0x87, 0x80, 0x0f, 0x6f, 0x0c, + 0x55, 0x3b, 0xa8, 0x4a, 0x03, 0xe6, 0xb5, 0x4b, 0xbe, 0xba, 0x74, 0xb2, 0x22, 0xfe, 0x3f, 0xb2, + 0x3a, 0x48, 0xa4, 0x5c, 0x2d, 0xfb, 0x84, 0xa9, 0x96, 0x3d, 0x9d, 0x55, 0xcb, 0xda, 0x8c, 0x31, + 0x86, 0x46, 0x56, 0x3c, 0x87, 0x68, 0xd1, 0x64, 0x2e, 0xb6, 0x0f, 0x67, 0xf3, 0xd8, 0x1d, 0x73, + 0xc5, 0x72, 0xd5, 0x25, 0x64, 0xea, 0x8a, 0xe5, 0xae, 0x55, 0x31, 0x83, 0x14, 0xcd, 0x07, 0x60, + 0xff, 0x7c, 0x09, 0xca, 0xb5, 0xd0, 0x3d, 0x01, 0xe3, 0xd2, 0x25, 0xc3, 0xb8, 0xf4, 0x54, 0xee, + 0xfb, 0x74, 0x5d, 0x4d, 0x49, 0x37, 0x32, 0xa6, 0xa4, 0x0f, 0xe7, 0x93, 0x3a, 0xdc, 0x70, 0xf4, + 0xbd, 0x32, 0xe8, 0x2f, 0xec, 0xa1, 0xff, 0x70, 0x14, 0x0f, 0xdd, 0x72, 0xb1, 0x47, 0xf7, 0x44, + 0x1b, 0xcc, 0x93, 0x4b, 0xc6, 0xf5, 0xfd, 0xd4, 0x3a, 0xea, 0xde, 0x26, 0xde, 0xe6, 0x56, 0x42, + 0xdc, 0xec, 0xc0, 0x4e, 0xce, 0x51, 0xf7, 0xcf, 0x2d, 0x18, 0xcf, 0xb4, 0x8e, 0xfc, 0x4e, 0x01, + 0x41, 0x47, 0x34, 0x17, 0x4d, 0xe6, 0x46, 0x10, 0xcd, 0x03, 0x28, 0xab, 0xbf, 0x34, 0xc9, 0x30, + 0xe9, 0x54, 0x5d, 0x0b, 0xc4, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x70, 0x12, 0x36, 0x43, 0x3f, 0xdc, + 0xdc, 0xbd, 0x42, 0x64, 0xa6, 0x0a, 0x75, 0x63, 0xb2, 0x9e, 0x82, 0xb0, 0x8e, 0x67, 0x7f, 0xbf, + 0x0c, 0xd9, 0xf7, 0x19, 0xff, 0xdf, 0x3a, 0xfd, 0xe9, 0x59, 0xa7, 0x7f, 0x64, 0xc1, 0x04, 0x6d, + 0x9d, 0xb9, 0xce, 0x48, 0x87, 0x5a, 0xf5, 0xa0, 0x81, 0x75, 0xc8, 0x83, 0x06, 0x4f, 0x53, 0x6e, + 0xe7, 0x86, 0xad, 0x44, 0x18, 0x91, 0x34, 0x26, 0x46, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x45, + 0x22, 0xf2, 0x48, 0xc7, 0x23, 0x51, 0x84, 0x05, 0x54, 0xbe, 0x77, 0xd0, 0xd7, 0xe5, 0xbd, 0x03, + 0x96, 0xeb, 0x49, 0xb8, 0x6b, 0x08, 0xb1, 0x42, 0xcb, 0xf5, 0x24, 0xfd, 0x38, 0x52, 0x1c, 0xfb, + 0xdb, 0x65, 0x18, 0xa9, 0x85, 0x6e, 0xea, 0x29, 0xff, 0x92, 0xe1, 0x29, 0x7f, 0x36, 0xe3, 0x29, + 0x3f, 0xa1, 0xe3, 0x3e, 0x18, 0x47, 0x79, 0x91, 0x13, 0x8c, 0xbd, 0xc8, 0x71, 0x44, 0x27, 0x79, + 0x23, 0x27, 0x98, 0x22, 0x84, 0x4d, 0xba, 0x3f, 0x4b, 0xce, 0xf1, 0xff, 0xdb, 0x82, 0xb1, 0x5a, + 0xe8, 0xd2, 0x05, 0xfa, 0xb3, 0xb4, 0x1a, 0xf5, 0x4c, 0x62, 0x03, 0x87, 0x64, 0x12, 0xfb, 0x75, + 0x0b, 0x06, 0x6b, 0xa1, 0x7b, 0x02, 0x06, 0xd6, 0x55, 0xd3, 0xc0, 0xfa, 0x44, 0x2e, 0xe7, 0xed, + 0x62, 0x53, 0xfd, 0x4e, 0x19, 0x46, 0x69, 0x8f, 0xc3, 0x4d, 0xf9, 0xbd, 0x8c, 0xb9, 0xb1, 0x0a, + 0xcc, 0x0d, 0x15, 0x09, 0x43, 0xdf, 0x0f, 0xef, 0x65, 0xbf, 0xdd, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, + 0xce, 0xc3, 0x50, 0x33, 0x22, 0x3b, 0x5e, 0xd8, 0x8a, 0xb3, 0x51, 0x8c, 0x35, 0x51, 0x8e, 0x15, + 0x06, 0x7a, 0x09, 0x46, 0x62, 0x2f, 0x68, 0x10, 0xe9, 0xcc, 0xd1, 0xc7, 0x9c, 0x39, 0x78, 0xd2, + 0x46, 0xad, 0x1c, 0x1b, 0x58, 0xe8, 0x36, 0x54, 0xd8, 0x7f, 0xb6, 0x83, 0x7a, 0x7f, 0xb0, 0x80, + 0x67, 0x2a, 0x93, 0x04, 0x70, 0x4a, 0x0b, 0x5d, 0x00, 0x48, 0xa4, 0xdb, 0x49, 0x2c, 0xf2, 0xad, + 0x28, 0xb9, 0x54, 0x39, 0xa4, 0xc4, 0x58, 0xc3, 0x42, 0xcf, 0x41, 0x25, 0x71, 0x3c, 0xff, 0xaa, + 0x17, 0x90, 0x58, 0xb8, 0xed, 0x88, 0x04, 0xcc, 0xa2, 0x10, 0xa7, 0x70, 0x7a, 0xde, 0xb3, 0x18, + 0x6a, 0xfe, 0x18, 0xca, 0x10, 0xc3, 0x66, 0xe7, 0xfd, 0x55, 0x55, 0x8a, 0x35, 0x0c, 0xfb, 0x22, + 0x9c, 0xaa, 0x85, 0x6e, 0x2d, 0x8c, 0x92, 0xd5, 0x30, 0xba, 0xe7, 0x44, 0xae, 0xfc, 0x7e, 0x73, + 0x32, 0xef, 0x2f, 0x3d, 0x93, 0xfb, 0xb9, 0xcd, 0xd1, 0xc8, 0xe3, 0xfb, 0x22, 0x3b, 0xf1, 0x7b, + 0x0c, 0xc1, 0xf8, 0x51, 0x09, 0x50, 0x8d, 0x39, 0xc6, 0x18, 0x6f, 0xe7, 0x6c, 0xc1, 0x58, 0x4c, + 0xae, 0x7a, 0x41, 0xeb, 0xbe, 0x20, 0x55, 0x2c, 0xe6, 0xa5, 0xbe, 0xa2, 0xd7, 0xe1, 0x96, 0x0e, + 0xb3, 0x0c, 0x67, 0xe8, 0xd2, 0xc9, 0x8c, 0x5a, 0xc1, 0x62, 0x7c, 0x33, 0x26, 0x91, 0x78, 0x2b, + 0x86, 0x4d, 0x26, 0x96, 0x85, 0x38, 0x85, 0xd3, 0xc5, 0xc3, 0xfe, 0x5c, 0x0f, 0x03, 0x1c, 0x86, + 0x89, 0x5c, 0x6e, 0xec, 0xed, 0x00, 0xad, 0x1c, 0x1b, 0x58, 0x68, 0x15, 0x50, 0xdc, 0x6a, 0x36, + 0x7d, 0x76, 0xd7, 0xe8, 0xf8, 0x97, 0xa2, 0xb0, 0xd5, 0xe4, 0xfe, 0xd1, 0x22, 0xed, 0x7e, 0xbd, + 0x0d, 0x8a, 0x3b, 0xd4, 0xa0, 0xcc, 0x62, 0x23, 0x66, 0xbf, 0x45, 0x40, 0x35, 0xb7, 0x57, 0xd6, + 0x59, 0x11, 0x96, 0x30, 0xfb, 0x8b, 0xec, 0x80, 0x63, 0x8f, 0x78, 0x24, 0xad, 0x88, 0xa0, 0x6d, + 0x18, 0x6d, 0xb2, 0x43, 0x2c, 0x89, 0x42, 0xdf, 0x27, 0x52, 0xbe, 0x3c, 0x9a, 0x6b, 0x0e, 0x4f, + 0xdb, 0xaf, 0x93, 0xc3, 0x26, 0x75, 0xfb, 0x17, 0xc7, 0x18, 0xaf, 0x12, 0xd7, 0xbd, 0x83, 0xc2, + 0x09, 0x57, 0x48, 0x72, 0x1f, 0x2a, 0xf2, 0x1c, 0x57, 0x7a, 0x0e, 0x08, 0x97, 0x5e, 0x2c, 0xa9, + 0xa0, 0xcf, 0x32, 0x17, 0x73, 0xce, 0x20, 0x8a, 0x3f, 0x32, 0xc8, 0xf1, 0x0d, 0xf7, 0x72, 0x41, + 0x02, 0x6b, 0xe4, 0xd0, 0x55, 0x18, 0x15, 0x6f, 0x3e, 0x08, 0x33, 0x45, 0xd9, 0x50, 0xb1, 0x47, + 0xb1, 0x0e, 0x3c, 0xc8, 0x16, 0x60, 0xb3, 0x32, 0xda, 0x84, 0xc7, 0xb5, 0x37, 0x8d, 0x3a, 0xb8, + 0x91, 0x71, 0xce, 0xf3, 0xc4, 0xfe, 0xde, 0xdc, 0xe3, 0xeb, 0x87, 0x21, 0xe2, 0xc3, 0xe9, 0xa0, + 0x1b, 0x70, 0xca, 0x69, 0x24, 0xde, 0x0e, 0xa9, 0x12, 0xc7, 0xf5, 0xbd, 0x80, 0x98, 0x51, 0xf7, + 0x8f, 0xee, 0xef, 0xcd, 0x9d, 0x5a, 0xec, 0x84, 0x80, 0x3b, 0xd7, 0x43, 0x9f, 0x80, 0x8a, 0x1b, + 0xc4, 0x62, 0x0e, 0x06, 0x8c, 0x27, 0xbc, 0x2a, 0xd5, 0xeb, 0x75, 0x35, 0xfe, 0xf4, 0x0f, 0x4e, + 0x2b, 0xa0, 0xf7, 0xf8, 0x13, 0xf5, 0x4a, 0x9b, 0xe1, 0x4f, 0xc7, 0xbd, 0x52, 0x48, 0x7f, 0x36, + 0x62, 0x61, 0xb8, 0x05, 0x4f, 0xb9, 0x6b, 0x1a, 0x61, 0x32, 0x46, 0x13, 0xe8, 0xd3, 0x80, 0x62, + 0x12, 0xed, 0x78, 0x0d, 0xb2, 0xd8, 0x60, 0x99, 0x4e, 0x99, 0x8d, 0x67, 0xc8, 0x88, 0x5b, 0x40, + 0xf5, 0x36, 0x0c, 0xdc, 0xa1, 0x16, 0xba, 0x4c, 0x39, 0x8f, 0x5e, 0x2a, 0xbc, 0x6b, 0xa5, 0x60, + 0x38, 0x53, 0x25, 0xcd, 0x88, 0x34, 0x9c, 0x84, 0xb8, 0x26, 0x45, 0x9c, 0xa9, 0x47, 0xcf, 0x25, + 0x95, 0xcc, 0x1e, 0x4c, 0x9f, 0xd0, 0xf6, 0x84, 0xf6, 0x54, 0xcf, 0xda, 0x0a, 0xe3, 0xe4, 0x3a, + 0x49, 0xee, 0x85, 0xd1, 0x5d, 0x76, 0x87, 0x31, 0xa4, 0xa5, 0x8d, 0x4b, 0x41, 0x58, 0xc7, 0xa3, + 0x32, 0x14, 0xbb, 0x3c, 0x5b, 0xab, 0xb2, 0x9b, 0x89, 0xa1, 0x74, 0xef, 0x5c, 0xe6, 0xc5, 0x58, + 0xc2, 0x25, 0xea, 0x5a, 0x6d, 0x99, 0xdd, 0x32, 0x64, 0x50, 0xd7, 0x6a, 0xcb, 0x58, 0xc2, 0x51, + 0xd8, 0xfe, 0x50, 0xda, 0x58, 0x91, 0x1b, 0x9f, 0x76, 0x4e, 0x5e, 0xf0, 0xad, 0xb4, 0xfb, 0x30, + 0xa1, 0x1e, 0x6b, 0xe3, 0x19, 0x3d, 0xe3, 0x99, 0xf1, 0x22, 0x0f, 0xe4, 0x77, 0x4c, 0x0c, 0xaa, + 0xec, 0x7a, 0x6b, 0x19, 0x9a, 0xb8, 0xad, 0x15, 0x23, 0x7b, 0xc4, 0x44, 0xee, 0x03, 0x05, 0x0b, + 0x50, 0x89, 0x5b, 0x77, 0xdc, 0x70, 0xdb, 0xf1, 0x02, 0x76, 0x15, 0xa0, 0x3f, 0xf7, 0x2e, 0x01, + 0x38, 0xc5, 0x41, 0x35, 0x18, 0x72, 0x84, 0x0a, 0x27, 0x4c, 0xf6, 0x39, 0xd1, 0xe5, 0x52, 0xe1, + 0xe3, 0xd6, 0x55, 0xf9, 0x0f, 0x2b, 0x2a, 0xe8, 0x55, 0x18, 0x15, 0xc1, 0x51, 0xc2, 0x89, 0x71, + 0xca, 0x74, 0xa4, 0xaf, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x4d, 0x18, 0xa3, 0x54, 0x52, 0x06, 0x38, + 0x33, 0xdd, 0x1b, 0x0f, 0xd5, 0x52, 0x41, 0xeb, 0x64, 0x70, 0x86, 0x2c, 0x72, 0xe1, 0x31, 0xa7, + 0x95, 0x84, 0xdb, 0x74, 0x27, 0x98, 0xfb, 0x64, 0x3d, 0xbc, 0x4b, 0x82, 0x99, 0x53, 0x6c, 0x05, + 0x9e, 0xdd, 0xdf, 0x9b, 0x7b, 0x6c, 0xf1, 0x10, 0x3c, 0x7c, 0x28, 0x15, 0xf4, 0x36, 0x0c, 0x27, + 0xa1, 0x2f, 0x7c, 0x93, 0xe3, 0x99, 0xd3, 0x45, 0x72, 0xda, 0xac, 0xab, 0x0a, 0xba, 0x19, 0x43, + 0x11, 0xc1, 0x3a, 0xc5, 0xd9, 0x4f, 0xc2, 0x64, 0x1b, 0x4b, 0xea, 0xc9, 0x7d, 0xf3, 0x3f, 0xf6, + 0x43, 0x45, 0x59, 0xf4, 0xd0, 0x82, 0x69, 0xbc, 0x7d, 0x34, 0x6b, 0xbc, 0x1d, 0xa2, 0x02, 0x94, + 0x6e, 0xaf, 0xfd, 0x5c, 0x87, 0xe7, 0xb9, 0x9f, 0xcd, 0xdd, 0x83, 0xc5, 0x23, 0xaa, 0x7a, 0x78, + 0xc4, 0x3c, 0xd5, 0xea, 0xfa, 0x0e, 0xd5, 0xea, 0x0a, 0x3e, 0x39, 0x47, 0xf5, 0xb7, 0x66, 0xe8, + 0xae, 0xd5, 0xb2, 0x2f, 0x2a, 0xd5, 0x68, 0x21, 0xe6, 0x30, 0x26, 0x77, 0xd3, 0x33, 0x95, 0xc9, + 0xdd, 0x83, 0x47, 0x94, 0xbb, 0x25, 0x01, 0x9c, 0xd2, 0x42, 0x3b, 0x30, 0xd9, 0x30, 0x1f, 0xc8, + 0x52, 0x71, 0x52, 0xcf, 0xf7, 0xf0, 0x40, 0x55, 0x4b, 0x7b, 0x3d, 0x63, 0x39, 0x4b, 0x0f, 0xb7, + 0x37, 0x81, 0x5e, 0x85, 0xa1, 0xf7, 0xc2, 0x98, 0x5d, 0x2b, 0x88, 0x83, 0x45, 0xc6, 0xa3, 0x0c, + 0xbd, 0x7e, 0xa3, 0xce, 0xca, 0x0f, 0xf6, 0xe6, 0x86, 0x6b, 0xa1, 0x2b, 0xff, 0x62, 0x55, 0x01, + 0x7d, 0xc9, 0x82, 0x53, 0xc6, 0x3e, 0x53, 0x3d, 0x87, 0xa3, 0xf4, 0xfc, 0x71, 0xd1, 0xf2, 0xa9, + 0xb5, 0x4e, 0x34, 0x71, 0xe7, 0xa6, 0xec, 0xdf, 0xe5, 0x26, 0x4c, 0x61, 0xd4, 0x20, 0x71, 0xcb, + 0x3f, 0x89, 0x4c, 0xf6, 0x37, 0x0c, 0x7b, 0xcb, 0x03, 0x30, 0xa2, 0xff, 0x7b, 0x8b, 0x19, 0xd1, + 0xd7, 0xc9, 0x76, 0xd3, 0x77, 0x92, 0x93, 0xf0, 0xee, 0xfd, 0x2c, 0x0c, 0x25, 0xa2, 0xb5, 0x62, + 0x69, 0xf8, 0xb5, 0xee, 0xb1, 0xcb, 0x05, 0x75, 0x30, 0xc9, 0x52, 0xac, 0x08, 0xda, 0xff, 0x9a, + 0x7f, 0x15, 0x09, 0x39, 0x01, 0x4b, 0xc1, 0x75, 0xd3, 0x52, 0xf0, 0x4c, 0xe1, 0xb1, 0x74, 0xb1, + 0x18, 0x7c, 0xdf, 0x1c, 0x01, 0xd3, 0x1f, 0x7e, 0x7a, 0x6e, 0x79, 0xec, 0x5f, 0xb1, 0x60, 0xba, + 0xd3, 0x75, 0x3b, 0x15, 0x30, 0xb8, 0xf6, 0xa2, 0xee, 0xbf, 0xd4, 0xac, 0xde, 0x12, 0xe5, 0x58, + 0x61, 0x14, 0xce, 0x8b, 0xdd, 0x5b, 0xea, 0xa6, 0x1b, 0x60, 0x3e, 0xb5, 0x86, 0x5e, 0xe3, 0xce, + 0xfc, 0x96, 0x7a, 0x0b, 0xad, 0x37, 0x47, 0x7e, 0xfb, 0x37, 0x4a, 0x30, 0xcd, 0x8d, 0xd0, 0x8b, + 0x3b, 0xa1, 0xe7, 0xd6, 0x42, 0x57, 0x84, 0x36, 0xb8, 0x30, 0xd2, 0xd4, 0x94, 0xcf, 0x62, 0xa9, + 0x60, 0x74, 0x75, 0x35, 0x15, 0xf8, 0xf5, 0x52, 0x6c, 0x50, 0xa5, 0xad, 0x90, 0x1d, 0xaf, 0xa1, + 0x6c, 0x9a, 0xa5, 0x9e, 0x4f, 0x06, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x8f, 0xe1, 0x39, + 0x0b, 0xfb, 0x1f, 0x58, 0xf0, 0x48, 0x97, 0x74, 0x31, 0xb4, 0xb9, 0x7b, 0xcc, 0xf0, 0x2f, 0xde, + 0xf2, 0x53, 0xcd, 0xf1, 0xeb, 0x00, 0x2c, 0xa0, 0xe8, 0x0e, 0x00, 0x37, 0xe7, 0xb3, 0x97, 0xdd, + 0x4b, 0x45, 0xfc, 0x91, 0xda, 0x92, 0x32, 0x68, 0xf1, 0xfa, 0xea, 0x2d, 0x77, 0x8d, 0xaa, 0xfd, + 0xad, 0x32, 0xf4, 0xf3, 0x27, 0xa3, 0x6b, 0x30, 0xb8, 0xc5, 0xd3, 0xd7, 0xf6, 0x96, 0x3d, 0x37, + 0x55, 0x2e, 0x78, 0x01, 0x96, 0x64, 0xd0, 0x35, 0x98, 0xa2, 0x27, 0x8b, 0xe7, 0xf8, 0x55, 0xe2, + 0x3b, 0xbb, 0x52, 0x5b, 0xe5, 0x6f, 0x1c, 0xc8, 0x64, 0xdc, 0x53, 0x6b, 0xed, 0x28, 0xb8, 0x53, + 0x3d, 0xf4, 0x5a, 0x5b, 0xb6, 0x39, 0x9e, 0x16, 0x58, 0x49, 0xaa, 0x87, 0x67, 0x9c, 0xa3, 0xf2, + 0x74, 0xb3, 0x4d, 0x2f, 0xd7, 0x5e, 0xe6, 0x35, 0x75, 0x71, 0x13, 0x97, 0xf9, 0x16, 0xb4, 0x98, + 0x4f, 0xc5, 0xfa, 0x56, 0x44, 0xe2, 0xad, 0xd0, 0x77, 0xc5, 0xa3, 0x92, 0xa9, 0x6f, 0x41, 0x06, + 0x8e, 0xdb, 0x6a, 0x50, 0x2a, 0x1b, 0x8e, 0xe7, 0xb7, 0x22, 0x92, 0x52, 0x19, 0x30, 0xa9, 0xac, + 0x66, 0xe0, 0xb8, 0xad, 0x06, 0x5d, 0x5b, 0xa7, 0xc4, 0x3b, 0x84, 0x32, 0x38, 0x5a, 0xb0, 0xa0, + 0xcf, 0xc0, 0xa0, 0x74, 0x91, 0x2f, 0x94, 0xc3, 0x43, 0x38, 0x0e, 0xa8, 0x37, 0x0d, 0xb5, 0x37, + 0xaf, 0x84, 0x73, 0xbc, 0xa4, 0x77, 0x94, 0xf7, 0xee, 0xfe, 0xcc, 0x82, 0xa9, 0x0e, 0xae, 0x5e, + 0x9c, 0xa5, 0x6d, 0x7a, 0x71, 0xa2, 0x32, 0xee, 0x6b, 0x2c, 0x8d, 0x97, 0x63, 0x85, 0x41, 0x77, + 0x0b, 0x67, 0x9a, 0x59, 0x46, 0x29, 0x5c, 0x40, 0x04, 0xb4, 0x37, 0x46, 0x89, 0xce, 0x42, 0x5f, + 0x2b, 0x26, 0x91, 0x7c, 0x7c, 0x4e, 0xf2, 0x79, 0x66, 0x07, 0x64, 0x10, 0x2a, 0xb6, 0x6e, 0x2a, + 0x13, 0x9c, 0x26, 0xb6, 0x72, 0x23, 0x1c, 0x87, 0xd9, 0x5f, 0x2f, 0xc3, 0x78, 0xc6, 0xe5, 0x93, + 0x76, 0x64, 0x3b, 0x0c, 0xbc, 0x24, 0x54, 0x79, 0xd5, 0xf8, 0x7b, 0x57, 0xa4, 0xb9, 0x75, 0x4d, + 0x94, 0x63, 0x85, 0x81, 0x9e, 0x96, 0xef, 0x8d, 0x66, 0x5f, 0x12, 0x58, 0xaa, 0x1a, 0x4f, 0x8e, + 0x16, 0x7d, 0x05, 0xe4, 0x49, 0xe8, 0x6b, 0x86, 0xea, 0xf9, 0x68, 0xf5, 0x3d, 0xf1, 0x52, 0xb5, + 0x16, 0x86, 0x3e, 0x66, 0x40, 0xf4, 0x94, 0x18, 0x7d, 0xe6, 0xe6, 0x02, 0x3b, 0x6e, 0x18, 0x6b, + 0x53, 0xf0, 0x0c, 0x0c, 0xde, 0x25, 0xbb, 0x91, 0x17, 0x6c, 0x66, 0xef, 0x6d, 0xae, 0xf0, 0x62, + 0x2c, 0xe1, 0xe6, 0x4b, 0x1f, 0x83, 0xc7, 0xfc, 0xd2, 0xc7, 0x50, 0xee, 0x39, 0xf8, 0x1d, 0x0b, + 0xc6, 0x59, 0xb2, 0x51, 0x11, 0x9a, 0xef, 0x85, 0xc1, 0x09, 0xc8, 0x18, 0x4f, 0x42, 0x7f, 0x44, + 0x1b, 0xcd, 0xa6, 0xea, 0x67, 0x3d, 0xc1, 0x1c, 0x86, 0x1e, 0x83, 0x3e, 0xd6, 0x05, 0xfa, 0x19, + 0x47, 0x78, 0x4e, 0xf3, 0xaa, 0x93, 0x38, 0x98, 0x95, 0xb2, 0x28, 0x2b, 0x4c, 0x9a, 0xbe, 0xc7, + 0x3b, 0x9d, 0x9a, 0x5b, 0x1f, 0xb6, 0x28, 0xab, 0x8e, 0x9d, 0x7c, 0x50, 0x51, 0x56, 0x9d, 0x89, + 0x1f, 0x2e, 0xe7, 0xff, 0x8f, 0x12, 0x9c, 0xe9, 0x58, 0x2f, 0xbd, 0x01, 0x5e, 0x35, 0x6e, 0x80, + 0x2f, 0x64, 0x6e, 0x80, 0xed, 0xc3, 0x6b, 0x3f, 0x98, 0x3b, 0xe1, 0xce, 0x57, 0xb5, 0xe5, 0x13, + 0xbc, 0xaa, 0xed, 0x2b, 0x2a, 0xe2, 0xf4, 0xe7, 0x88, 0x38, 0x7f, 0x68, 0xc1, 0xa3, 0x1d, 0xa7, + 0xec, 0xa1, 0x0b, 0x6b, 0xeb, 0xd8, 0xcb, 0x2e, 0xda, 0xc9, 0x2f, 0x97, 0xbb, 0x8c, 0x8a, 0xe9, + 0x29, 0xe7, 0x28, 0x17, 0x62, 0xc0, 0x58, 0x08, 0x6f, 0x23, 0x9c, 0x03, 0xf1, 0x32, 0xac, 0xa0, + 0x28, 0xd6, 0xc2, 0xc2, 0x78, 0x27, 0x57, 0x8e, 0xb8, 0xa1, 0xe6, 0x4d, 0x3b, 0xb9, 0x9e, 0x6f, + 0x20, 0x1b, 0x2c, 0x76, 0x5b, 0xd3, 0x3c, 0xcb, 0x47, 0xd1, 0x3c, 0x47, 0x3a, 0x6b, 0x9d, 0x68, + 0x11, 0xc6, 0xb7, 0xbd, 0x80, 0x3d, 0x10, 0x6a, 0x4a, 0x4f, 0x2a, 0x36, 0xf7, 0x9a, 0x09, 0xc6, + 0x59, 0xfc, 0xd9, 0x57, 0x61, 0xf4, 0xe8, 0xd6, 0xb5, 0x1f, 0x97, 0xe1, 0x83, 0x87, 0x30, 0x05, + 0x7e, 0x3a, 0x18, 0xdf, 0x45, 0x3b, 0x1d, 0xda, 0xbe, 0x4d, 0x0d, 0xa6, 0x37, 0x5a, 0xbe, 0xbf, + 0xcb, 0xfc, 0xa7, 0x88, 0x2b, 0x31, 0x84, 0x50, 0xa3, 0x5e, 0x22, 0x5f, 0xed, 0x80, 0x83, 0x3b, + 0xd6, 0x44, 0x9f, 0x06, 0x14, 0xde, 0x61, 0xe9, 0x78, 0xdd, 0x34, 0x9f, 0x02, 0xfb, 0x04, 0xe5, + 0x74, 0xab, 0xde, 0x68, 0xc3, 0xc0, 0x1d, 0x6a, 0x51, 0x39, 0x95, 0x3d, 0x62, 0xae, 0xba, 0x95, + 0x91, 0x53, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x04, 0x93, 0xce, 0x8e, 0xe3, 0xf1, 0xf4, 0x5a, + 0x92, 0x00, 0x17, 0x54, 0x95, 0xfd, 0x6a, 0x31, 0x8b, 0x80, 0xdb, 0xeb, 0xa0, 0xa6, 0x61, 0x90, + 0xe4, 0x89, 0xf8, 0x3f, 0x71, 0x84, 0x15, 0x5c, 0xd8, 0x44, 0x69, 0xff, 0x57, 0x8b, 0x1e, 0x7d, + 0x1d, 0xde, 0x92, 0xa4, 0x33, 0xa2, 0x0c, 0x6c, 0x5a, 0x98, 0x9b, 0x9a, 0x91, 0x65, 0x1d, 0x88, + 0x4d, 0x5c, 0xbe, 0x34, 0xe2, 0xd4, 0x9d, 0xdb, 0x90, 0x36, 0x45, 0x84, 0xa8, 0xc2, 0xa0, 0x12, + 0xb4, 0xeb, 0xed, 0x78, 0x71, 0x18, 0x89, 0x0d, 0xd4, 0xa3, 0x73, 0x6f, 0xca, 0x2f, 0xab, 0x9c, + 0x0c, 0x96, 0xf4, 0xec, 0x6f, 0x94, 0x60, 0x54, 0xb6, 0xf8, 0x7a, 0x2b, 0x4c, 0x9c, 0x13, 0x38, + 0xd2, 0x5f, 0x37, 0x8e, 0xf4, 0x85, 0x62, 0x01, 0xb3, 0xac, 0x73, 0x5d, 0x8f, 0xf2, 0xcf, 0x64, + 0x8e, 0xf2, 0x17, 0x7a, 0x21, 0x7a, 0xf8, 0x11, 0xfe, 0x6f, 0x2d, 0x98, 0x34, 0xf0, 0x4f, 0xe0, + 0x24, 0xa9, 0x99, 0x27, 0xc9, 0x73, 0x3d, 0x8c, 0xa6, 0xcb, 0x09, 0xf2, 0xed, 0x52, 0x66, 0x14, + 0xec, 0xe4, 0xf8, 0x02, 0xf4, 0x6d, 0x39, 0x91, 0x5b, 0x2c, 0xd7, 0x64, 0x5b, 0xf5, 0xf9, 0xcb, + 0x4e, 0xe4, 0x72, 0xfe, 0x7f, 0x5e, 0xbd, 0x74, 0xe5, 0x44, 0x6e, 0x6e, 0x94, 0x03, 0x6b, 0x14, + 0x5d, 0x84, 0x81, 0xb8, 0x11, 0x36, 0x95, 0x1f, 0xe8, 0x59, 0xfe, 0x0a, 0x16, 0x2d, 0x39, 0xd8, + 0x9b, 0x43, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0xfc, 0xd9, 0x4d, 0xa8, 0xa8, 0xa6, 0x8f, 0xd5, 0x13, + 0xfe, 0xbf, 0x95, 0x61, 0xaa, 0xc3, 0x5a, 0x41, 0x5f, 0x34, 0xe6, 0xed, 0xd5, 0x9e, 0x17, 0xdb, + 0xfb, 0x9c, 0xb9, 0x2f, 0x32, 0x4d, 0xc9, 0x15, 0xab, 0xe3, 0x08, 0xcd, 0xdf, 0x8c, 0x49, 0xb6, + 0x79, 0x5a, 0x94, 0xdf, 0x3c, 0x6d, 0xf6, 0xc4, 0xa6, 0x9f, 0x36, 0xa4, 0x7a, 0x7a, 0xac, 0xdf, + 0xf9, 0xaf, 0xf5, 0xc1, 0x74, 0xa7, 0xc8, 0x7c, 0xf4, 0x15, 0x2b, 0xf3, 0xa0, 0xc4, 0x6b, 0xbd, + 0x87, 0xf7, 0xf3, 0x57, 0x26, 0x44, 0x36, 0x9b, 0x79, 0xf3, 0x89, 0x89, 0xdc, 0x19, 0x17, 0xad, + 0xb3, 0xf8, 0xa4, 0x88, 0x3f, 0x0e, 0x22, 0xb9, 0xc2, 0xa7, 0x8e, 0xd0, 0x15, 0xf1, 0xbe, 0x48, + 0x9c, 0x89, 0x4f, 0x92, 0xc5, 0xf9, 0xf1, 0x49, 0xb2, 0x0f, 0xb3, 0x1e, 0x0c, 0x6b, 0xe3, 0x3a, + 0xd6, 0x65, 0x70, 0x97, 0x1e, 0x51, 0x5a, 0xbf, 0x8f, 0x75, 0x29, 0xfc, 0x5d, 0x0b, 0x32, 0x4e, + 0x5b, 0xca, 0x2c, 0x63, 0x75, 0x35, 0xcb, 0x9c, 0x85, 0xbe, 0x28, 0xf4, 0x49, 0xf6, 0xb1, 0x03, + 0x1c, 0xfa, 0x04, 0x33, 0x88, 0x7a, 0xfc, 0xb6, 0xdc, 0xed, 0xf1, 0x5b, 0xaa, 0xa7, 0xfb, 0x64, + 0x87, 0x48, 0x23, 0x89, 0x62, 0xe3, 0x57, 0x69, 0x21, 0xe6, 0x30, 0xfb, 0xb7, 0xfa, 0x60, 0xaa, + 0x43, 0xb4, 0x1b, 0xd5, 0x90, 0x36, 0x9d, 0x84, 0xdc, 0x73, 0x76, 0xb3, 0x49, 0x57, 0x2f, 0xf1, + 0x62, 0x2c, 0xe1, 0xcc, 0xd9, 0x94, 0x27, 0x6e, 0xcb, 0x98, 0xae, 0x44, 0xbe, 0x36, 0x01, 0x3d, + 0xfe, 0x67, 0x52, 0x2f, 0x00, 0xc4, 0xb1, 0xbf, 0x12, 0x50, 0x09, 0xcf, 0x15, 0x4e, 0xad, 0x69, + 0xbe, 0xbf, 0xfa, 0x55, 0x01, 0xc1, 0x1a, 0x16, 0xaa, 0xc2, 0x44, 0x33, 0x0a, 0x13, 0x6e, 0x18, + 0xac, 0x72, 0x47, 0x88, 0x7e, 0x33, 0x9a, 0xaa, 0x96, 0x81, 0xe3, 0xb6, 0x1a, 0xe8, 0x65, 0x18, + 0x16, 0x11, 0x56, 0xb5, 0x30, 0xf4, 0x85, 0x19, 0x49, 0x5d, 0xc7, 0xd7, 0x53, 0x10, 0xd6, 0xf1, + 0xb4, 0x6a, 0xcc, 0xda, 0x38, 0xd8, 0xb1, 0x1a, 0xb7, 0x38, 0x6a, 0x78, 0x99, 0xfc, 0x1d, 0x43, + 0x85, 0xf2, 0x77, 0xa4, 0x86, 0xb5, 0x4a, 0xe1, 0x8b, 0x18, 0xc8, 0x35, 0x40, 0xfd, 0x41, 0x19, + 0x06, 0xf8, 0xa7, 0x38, 0x01, 0x29, 0xaf, 0x26, 0x4c, 0x4a, 0x85, 0x72, 0x25, 0xf0, 0x5e, 0xcd, + 0x57, 0x9d, 0xc4, 0xe1, 0xac, 0x49, 0xed, 0x90, 0xd4, 0x0c, 0x85, 0xe6, 0x8d, 0x3d, 0x34, 0x9b, + 0xb1, 0x94, 0x00, 0xa7, 0xa1, 0xed, 0xa8, 0x2d, 0x80, 0x98, 0x3d, 0xd5, 0x49, 0x69, 0x88, 0x8c, + 0xb0, 0x2f, 0x15, 0xea, 0x47, 0x5d, 0x55, 0xe3, 0xbd, 0x49, 0x97, 0xa5, 0x02, 0x60, 0x8d, 0xf6, + 0xec, 0x2b, 0x50, 0x51, 0xc8, 0x79, 0x2a, 0xe4, 0x88, 0xce, 0xda, 0xfe, 0x3f, 0x18, 0xcf, 0xb4, + 0xd5, 0x93, 0x06, 0xfa, 0x3b, 0x16, 0x8c, 0xf3, 0x2e, 0xaf, 0x04, 0x3b, 0x82, 0x15, 0x7c, 0xd9, + 0x82, 0x69, 0xbf, 0xc3, 0x4e, 0x14, 0x9f, 0xf9, 0x28, 0x7b, 0x58, 0x29, 0x9f, 0x9d, 0xa0, 0xb8, + 0x63, 0x6b, 0xe8, 0x1c, 0x0c, 0xf1, 0x97, 0x87, 0x1d, 0x5f, 0x78, 0x50, 0x8f, 0xf0, 0x5c, 0xd8, + 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0x13, 0x0b, 0x26, 0xdb, 0x1e, 0xb2, 0x7f, 0x58, 0x86, 0x21, 0xb2, + 0x7e, 0x97, 0xba, 0x64, 0xfd, 0xd6, 0x47, 0x59, 0x3e, 0x74, 0x94, 0xbf, 0x61, 0x81, 0x58, 0xa1, + 0x27, 0xa0, 0x3f, 0xac, 0x99, 0xfa, 0xc3, 0x87, 0x8a, 0x2c, 0xfa, 0x2e, 0x8a, 0xc3, 0x2f, 0x95, + 0x60, 0x82, 0x23, 0xa4, 0x37, 0x32, 0x0f, 0xcb, 0xc7, 0xe9, 0xed, 0x35, 0x1a, 0xf5, 0x04, 0x68, + 0xe7, 0x91, 0x1a, 0xdf, 0xb2, 0xef, 0xd0, 0x6f, 0xf9, 0x17, 0x16, 0x20, 0x3e, 0x27, 0xd9, 0x67, + 0x9b, 0xf9, 0xe9, 0xa6, 0x99, 0x03, 0x52, 0xce, 0xa1, 0x20, 0x58, 0xc3, 0x7a, 0xc0, 0x43, 0xc8, + 0xdc, 0x87, 0x95, 0xf3, 0xef, 0xc3, 0x7a, 0x18, 0xf5, 0xef, 0x96, 0x21, 0xeb, 0x4a, 0x89, 0xde, + 0x81, 0x91, 0x86, 0xd3, 0x74, 0xee, 0x78, 0xbe, 0x97, 0x78, 0x24, 0x2e, 0x76, 0xe1, 0xbe, 0xac, + 0xd5, 0x10, 0xd7, 0x50, 0x5a, 0x09, 0x36, 0x28, 0xa2, 0x79, 0x80, 0x66, 0xe4, 0xed, 0x78, 0x3e, + 0xd9, 0x64, 0x1a, 0x0f, 0x8b, 0xc5, 0xe0, 0x77, 0xc7, 0xb2, 0x14, 0x6b, 0x18, 0x1d, 0x7c, 0xf7, + 0xcb, 0x27, 0xe1, 0xbb, 0xdf, 0xd7, 0xa3, 0xef, 0x7e, 0x7f, 0x21, 0xdf, 0x7d, 0x0c, 0xa7, 0xe5, + 0xe1, 0x4d, 0xff, 0xaf, 0x7a, 0x3e, 0x11, 0xb2, 0x1b, 0x8f, 0xd5, 0x98, 0xdd, 0xdf, 0x9b, 0x3b, + 0x8d, 0x3b, 0x62, 0xe0, 0x2e, 0x35, 0xed, 0x16, 0x4c, 0xd5, 0x49, 0xe4, 0xb1, 0x9c, 0x94, 0x6e, + 0xba, 0x97, 0x3e, 0x07, 0x95, 0x28, 0xb3, 0x8d, 0x7b, 0x0c, 0xc8, 0xd7, 0xb2, 0x98, 0xc9, 0x6d, + 0x9b, 0x92, 0xb4, 0xff, 0x46, 0x09, 0x06, 0x85, 0x13, 0xe5, 0x09, 0x08, 0x1f, 0x57, 0x0c, 0x13, + 0xd3, 0x33, 0x79, 0xfc, 0x8f, 0x75, 0xab, 0xab, 0x71, 0xa9, 0x9e, 0x31, 0x2e, 0x3d, 0x57, 0x8c, + 0xdc, 0xe1, 0x66, 0xa5, 0x7f, 0x5a, 0x86, 0x31, 0xd3, 0xa9, 0xf4, 0x04, 0xa6, 0xe5, 0x0d, 0x18, + 0x8c, 0x85, 0x7f, 0x73, 0xa9, 0x88, 0xcf, 0x5e, 0xf6, 0x13, 0xa7, 0x37, 0xf1, 0xc2, 0xa3, 0x59, + 0x92, 0xeb, 0xe8, 0x42, 0x5d, 0x3e, 0x11, 0x17, 0xea, 0x3c, 0x5f, 0xdf, 0xbe, 0x07, 0xe1, 0xeb, + 0x6b, 0xff, 0x80, 0xb1, 0x7c, 0xbd, 0xfc, 0x04, 0x8e, 0xf1, 0xd7, 0xcd, 0xc3, 0xe1, 0x7c, 0xa1, + 0x75, 0x27, 0xba, 0xd7, 0xe5, 0x38, 0xff, 0xae, 0x05, 0xc3, 0x02, 0xf1, 0x04, 0x06, 0xf0, 0x69, + 0x73, 0x00, 0x4f, 0x15, 0x1a, 0x40, 0x97, 0x9e, 0x7f, 0xa3, 0xa4, 0x7a, 0x5e, 0x13, 0x4f, 0xed, + 0xe7, 0x66, 0xe0, 0x1e, 0xa2, 0xaa, 0x5f, 0xd8, 0x08, 0x7d, 0x21, 0xc0, 0x3d, 0x96, 0x86, 0xe6, + 0xf1, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0x2c, 0x72, 0x2c, 0x8c, 0x12, 0x71, 0x80, 0x76, 0x7a, + 0xe8, 0xdf, 0x05, 0x48, 0x5f, 0x57, 0x17, 0x51, 0xad, 0xdd, 0x77, 0x6b, 0x2b, 0xf1, 0xfc, 0x79, + 0x2f, 0x48, 0xe2, 0x24, 0x9a, 0x5f, 0x0b, 0x92, 0x1b, 0x11, 0x17, 0xfa, 0xb5, 0x58, 0x3b, 0x45, + 0x0b, 0x6b, 0x74, 0x65, 0x10, 0x07, 0x6b, 0xa3, 0xdf, 0xbc, 0x41, 0xba, 0x2e, 0xca, 0xb1, 0xc2, + 0xb0, 0x5f, 0x61, 0x9c, 0x9d, 0x4d, 0x50, 0x6f, 0x61, 0x70, 0xbf, 0x38, 0xa0, 0xa6, 0x96, 0x99, + 0x85, 0xaf, 0xeb, 0xc1, 0x76, 0x45, 0xd9, 0x27, 0xed, 0x82, 0xee, 0x47, 0x9d, 0xc6, 0xe6, 0x21, + 0xd2, 0x76, 0xed, 0xf8, 0x4a, 0x61, 0x8e, 0xdc, 0xc3, 0x45, 0x23, 0x4b, 0x3a, 0xc8, 0x32, 0xad, + 0xad, 0xd5, 0xb2, 0x79, 0xd3, 0x97, 0x25, 0x00, 0xa7, 0x38, 0x68, 0x41, 0x28, 0x94, 0xdc, 0xe2, + 0xf2, 0xc1, 0x8c, 0x42, 0x29, 0xa7, 0x44, 0xd3, 0x28, 0x5f, 0x80, 0x61, 0xf5, 0x14, 0x4d, 0x8d, + 0x3f, 0x02, 0x52, 0xe1, 0xf2, 0xd5, 0x4a, 0x5a, 0x8c, 0x75, 0x1c, 0xb4, 0x06, 0x53, 0xae, 0x8a, + 0xd9, 0xa9, 0xb5, 0xee, 0xf8, 0x5e, 0x83, 0x56, 0xe5, 0xf1, 0xb6, 0x8f, 0xec, 0xef, 0xcd, 0x4d, + 0x55, 0xdb, 0xc1, 0xb8, 0x53, 0x1d, 0xb4, 0x0e, 0xe3, 0x31, 0x7f, 0x72, 0x47, 0x06, 0x66, 0x08, + 0x1b, 0xc4, 0xb3, 0xf2, 0xbe, 0xb3, 0x6e, 0x82, 0x0f, 0x58, 0x11, 0xe7, 0x0a, 0x32, 0x94, 0x23, + 0x4b, 0x02, 0xbd, 0x06, 0x63, 0xbe, 0xfe, 0x9e, 0x68, 0x4d, 0x98, 0x28, 0x94, 0x07, 0x9b, 0xf1, + 0xda, 0x68, 0x0d, 0x67, 0xb0, 0xd1, 0x1b, 0x30, 0xa3, 0x97, 0x88, 0x3c, 0x42, 0x4e, 0xb0, 0x49, + 0x62, 0xf1, 0x3c, 0xc7, 0x63, 0xfb, 0x7b, 0x73, 0x33, 0x57, 0xbb, 0xe0, 0xe0, 0xae, 0xb5, 0xd1, + 0x45, 0x18, 0x91, 0x33, 0xa9, 0x85, 0x31, 0xa5, 0xbe, 0x93, 0x1a, 0x0c, 0x1b, 0x98, 0xef, 0xef, + 0x5a, 0xf7, 0x0b, 0xb4, 0xb2, 0x76, 0x84, 0xa3, 0x77, 0x61, 0x44, 0xef, 0x63, 0xf6, 0x6c, 0xce, + 0x7f, 0xa3, 0x55, 0x88, 0x02, 0xaa, 0xe7, 0x3a, 0x0c, 0x1b, 0xb4, 0xed, 0x1b, 0x30, 0x50, 0xdf, + 0x8d, 0x1b, 0x89, 0x5f, 0x80, 0xbf, 0x3d, 0x69, 0x0c, 0x21, 0xdd, 0x7b, 0xec, 0xbd, 0x28, 0x31, + 0x22, 0x9b, 0xc0, 0xf8, 0xfa, 0x72, 0xad, 0x1e, 0x36, 0xee, 0x92, 0x64, 0x91, 0x6b, 0x6f, 0x58, + 0x70, 0x37, 0xeb, 0x88, 0x5c, 0xab, 0x03, 0x3f, 0xb4, 0xff, 0xd4, 0x82, 0x7e, 0xf6, 0xd6, 0x51, + 0xde, 0x3b, 0x59, 0x45, 0x3a, 0x8d, 0x5e, 0x86, 0x01, 0xb2, 0xb1, 0x41, 0x1a, 0x89, 0xd8, 0xc6, + 0x32, 0x56, 0x60, 0x60, 0x85, 0x95, 0xd2, 0xcd, 0xc9, 0x1a, 0xe3, 0x7f, 0xb1, 0x40, 0x46, 0x9f, + 0x85, 0x4a, 0xe2, 0x6d, 0x93, 0x45, 0xd7, 0x15, 0x06, 0xc5, 0xde, 0xdc, 0x57, 0x14, 0xb3, 0x58, + 0x97, 0x44, 0x70, 0x4a, 0xcf, 0xfe, 0x5a, 0x09, 0x20, 0x8d, 0xd4, 0xc9, 0x1b, 0xe6, 0x52, 0xdb, + 0x73, 0x60, 0x4f, 0x77, 0x78, 0x0e, 0x0c, 0xa5, 0x04, 0x3b, 0x3c, 0x06, 0xa6, 0xa6, 0xaa, 0x5c, + 0x68, 0xaa, 0xfa, 0x7a, 0x99, 0xaa, 0x65, 0x98, 0x4c, 0x23, 0x8d, 0xcc, 0x90, 0x4d, 0x96, 0x1c, + 0x74, 0x3d, 0x0b, 0xc4, 0xed, 0xf8, 0xf6, 0xd7, 0x2c, 0x10, 0x0e, 0x8f, 0x05, 0x56, 0xab, 0x2b, + 0x9f, 0xee, 0x31, 0xb2, 0x98, 0x3d, 0x5b, 0xc4, 0x17, 0x54, 0xe4, 0x2e, 0x53, 0xfb, 0xc7, 0xc8, + 0x58, 0x66, 0x50, 0xb5, 0x7f, 0xd3, 0x82, 0x61, 0x0e, 0xbe, 0xc6, 0x64, 0xea, 0xfc, 0x7e, 0xf5, + 0x94, 0x79, 0x96, 0xbd, 0x6a, 0x43, 0x09, 0xab, 0x0c, 0xa4, 0xfa, 0xab, 0x36, 0x12, 0x80, 0x53, + 0x1c, 0xf4, 0x0c, 0x0c, 0xc6, 0xad, 0x3b, 0x0c, 0x3d, 0xe3, 0xfd, 0x58, 0xe7, 0xc5, 0x58, 0xc2, + 0xed, 0x7f, 0x5e, 0x82, 0x89, 0xac, 0xf3, 0x2b, 0xc2, 0x30, 0xc0, 0x65, 0xec, 0xac, 0x78, 0x76, + 0x98, 0x2d, 0x47, 0x73, 0x9e, 0x05, 0xfe, 0x36, 0x33, 0x33, 0xba, 0x0b, 0x4a, 0x68, 0x03, 0x86, + 0xdd, 0xf0, 0x5e, 0x70, 0xcf, 0x89, 0xdc, 0xc5, 0xda, 0x9a, 0xf8, 0x12, 0x39, 0xee, 0x4a, 0xd5, + 0xb4, 0x82, 0xee, 0x9a, 0xcb, 0x6c, 0x0b, 0x29, 0x08, 0xeb, 0x84, 0xa9, 0x4e, 0xd9, 0x08, 0x83, + 0x0d, 0x6f, 0xf3, 0x9a, 0xd3, 0x2c, 0x76, 0x31, 0xbf, 0x2c, 0xd1, 0xb5, 0x36, 0x46, 0x45, 0x8e, + 0x06, 0x0e, 0xc0, 0x29, 0x49, 0xfb, 0xd7, 0xa7, 0xc1, 0x58, 0x0b, 0x46, 0x7a, 0x58, 0xeb, 0x81, + 0xa7, 0x87, 0x7d, 0x0b, 0x86, 0xc8, 0x76, 0x33, 0xd9, 0xad, 0x7a, 0x51, 0xb1, 0x64, 0xdf, 0x2b, + 0x02, 0xbb, 0x9d, 0xba, 0x84, 0x60, 0x45, 0xb1, 0x4b, 0xb2, 0xdf, 0xf2, 0x43, 0x91, 0xec, 0xb7, + 0xef, 0x2f, 0x25, 0xd9, 0xef, 0x1b, 0x30, 0xb8, 0xe9, 0x25, 0x98, 0x34, 0x43, 0x91, 0xf4, 0x22, + 0x67, 0xf1, 0x5c, 0xe2, 0xc8, 0xed, 0x69, 0x20, 0x05, 0x00, 0x4b, 0x72, 0x68, 0x5d, 0x6d, 0xaa, + 0x81, 0x22, 0x67, 0x79, 0xbb, 0xad, 0xaf, 0xe3, 0xb6, 0x12, 0xc9, 0x7d, 0x07, 0xdf, 0x7f, 0x72, + 0x5f, 0x95, 0x92, 0x77, 0xe8, 0x41, 0xa5, 0xe4, 0x35, 0x52, 0x1b, 0x57, 0x8e, 0x23, 0xb5, 0xf1, + 0xd7, 0x2c, 0x38, 0xd5, 0xec, 0x94, 0x18, 0x5c, 0x24, 0xd7, 0xfd, 0xe4, 0x11, 0x52, 0xa5, 0x1b, + 0x4d, 0xb3, 0x54, 0x02, 0x1d, 0xd1, 0x70, 0xe7, 0x86, 0x65, 0x8e, 0xe4, 0xe1, 0xf7, 0x9f, 0x23, + 0xf9, 0xb8, 0xb3, 0xf0, 0xa6, 0x19, 0x93, 0x47, 0x8f, 0x25, 0x63, 0xf2, 0xd8, 0x03, 0xcc, 0x98, + 0xac, 0xe5, 0x3a, 0x1e, 0x7f, 0xb0, 0xb9, 0x8e, 0xb7, 0xcc, 0x73, 0x89, 0xa7, 0xd6, 0x7d, 0xb9, + 0xf0, 0xb9, 0x64, 0xb4, 0x70, 0xf8, 0xc9, 0xc4, 0xb3, 0x3e, 0x4f, 0xbe, 0xcf, 0xac, 0xcf, 0x46, + 0xee, 0x64, 0x74, 0x1c, 0xb9, 0x93, 0xdf, 0xd1, 0x4f, 0xd0, 0xa9, 0x22, 0x2d, 0xa8, 0x83, 0xb2, + 0xbd, 0x85, 0x4e, 0x67, 0x68, 0x7b, 0x76, 0xe6, 0xe9, 0x93, 0xce, 0xce, 0x7c, 0xea, 0x18, 0xb3, + 0x33, 0x9f, 0x3e, 0xd1, 0xec, 0xcc, 0x8f, 0x3c, 0x24, 0xd9, 0x99, 0x67, 0x4e, 0x2a, 0x3b, 0xf3, + 0xa3, 0x0f, 0x34, 0x3b, 0x33, 0xfd, 0x74, 0x4d, 0x19, 0x42, 0x36, 0x33, 0x5b, 0xe4, 0xd3, 0x75, + 0x8c, 0x38, 0xe3, 0x9f, 0x4e, 0x81, 0x70, 0x4a, 0xd4, 0xfe, 0x2b, 0x70, 0xe6, 0xf0, 0xa5, 0x9b, + 0x7a, 0x6b, 0xd4, 0x52, 0x9b, 0x59, 0xc6, 0x5b, 0x83, 0x89, 0x85, 0x1a, 0x56, 0xe1, 0xf4, 0xb1, + 0xdf, 0xb6, 0xe0, 0x91, 0x2e, 0xd9, 0x15, 0x0b, 0xc7, 0x5f, 0x36, 0x61, 0xbc, 0x69, 0x56, 0x2d, + 0x1c, 0xce, 0x6d, 0x64, 0x73, 0x54, 0x3e, 0xf2, 0x19, 0x00, 0xce, 0x92, 0x5f, 0xfa, 0xd0, 0x0f, + 0x7f, 0x7c, 0xe6, 0x03, 0x3f, 0xfa, 0xf1, 0x99, 0x0f, 0xfc, 0xf1, 0x8f, 0xcf, 0x7c, 0xe0, 0xe7, + 0xf6, 0xcf, 0x58, 0x3f, 0xdc, 0x3f, 0x63, 0xfd, 0x68, 0xff, 0x8c, 0xf5, 0x67, 0xfb, 0x67, 0xac, + 0xaf, 0xfd, 0xe4, 0xcc, 0x07, 0xde, 0x2c, 0xed, 0xbc, 0xf0, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, + 0x5f, 0x32, 0x87, 0xa4, 0xbe, 0xc8, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/generated.proto b/vendor/k8s.io/client-go/pkg/api/v1/generated.proto new file mode 100644 index 000000000..9c48148aa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/generated.proto @@ -0,0 +1,3922 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.api.v1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +message AWSElasticBlockStoreVolumeSource { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + optional string volumeID = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + optional int32 partition = 3; + + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + optional bool readOnly = 4; +} + +// Affinity is a group of affinity scheduling rules. +message Affinity { + // Describes node affinity scheduling rules for the pod. + // +optional + optional NodeAffinity nodeAffinity = 1; + + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAffinity podAffinity = 2; + + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAntiAffinity podAntiAffinity = 3; +} + +// AttachedVolume describes a volume attached to a node +message AttachedVolume { + // Name of the attached volume + optional string name = 1; + + // DevicePath represents the device path where the volume should be available + optional string devicePath = 2; +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +message AvoidPods { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + repeated PreferAvoidPodsEntry preferAvoidPods = 1; +} + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +message AzureDiskVolumeSource { + // The Name of the data disk in the blob storage + optional string diskName = 1; + + // The URI the data disk in the blob storage + optional string diskURI = 2; + + // Host Caching mode: None, Read Only, Read Write. + // +optional + optional string cachingMode = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 4; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 5; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFileVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Binding ties one object to another. +// For example, a pod is bound to a node by a scheduler. +message Binding { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The target object that you want to bind to the standard object. + optional ObjectReference target = 2; +} + +// Adds and removes POSIX capabilities from running containers. +message Capabilities { + // Added capabilities + // +optional + repeated string add = 1; + + // Removed capabilities + // +optional + repeated string drop = 2; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + optional bool readOnly = 6; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderVolumeSource { + // volume id used to identify the volume in cinder + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional bool readOnly = 3; +} + +// Information about the condition of a component. +message ComponentCondition { + // Type of condition for a component. + // Valid value: "Healthy" + optional string type = 1; + + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + optional string status = 2; + + // Message about the condition for a component. + // For example, information about a health check. + // +optional + optional string message = 3; + + // Condition error code for a component. + // For example, a health check error code. + // +optional + optional string error = 4; +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +message ComponentStatus { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // List of component conditions observed + // +optional + repeated ComponentCondition conditions = 2; +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +message ComponentStatusList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ComponentStatus objects. + repeated ComponentStatus items = 2; +} + +// ConfigMap holds configuration data for pods to consume. +message ConfigMap { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // +optional + map<string, string> data = 2; +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +message ConfigMapEnvSource { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the ConfigMap must be defined + // +optional + optional bool optional = 2; +} + +// Selects a key from a ConfigMap. +message ConfigMapKeySelector { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key to select. + optional string key = 2; + + // Specify whether the ConfigMap or it's key must be defined + // +optional + optional bool optional = 3; +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +message ConfigMapList { + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ConfigMaps. + repeated ConfigMap items = 2; +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +message ConfigMapProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +message ConfigMapVolumeSource { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the ConfigMap or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// A single application container that you want to run within a pod. +message Container { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + optional string name = 1; + + // Docker image name. + // More info: http://kubernetes.io/docs/user-guide/images + // +optional + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + repeated EnvVar env = 7; + + // Compute Resources required by this container. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + repeated VolumeMount volumeMounts = 9; + + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional Probe livenessProbe = 10; + + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional Probe readinessProbe = 11; + + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // Security options the pod should run with. + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// Describe a container image +message ContainerImage { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + repeated string names = 1; + + // The size of the image in bytes. + // +optional + optional int64 sizeBytes = 2; +} + +// ContainerPort represents a network port in a single container. +message ContainerPort { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + optional string name = 1; + + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + optional int32 hostPort = 2; + + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + optional int32 containerPort = 3; + + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + optional string protocol = 4; + + // What host IP to bind the external port to. + // +optional + optional string hostIP = 5; +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +message ContainerState { + // Details about a waiting container + // +optional + optional ContainerStateWaiting waiting = 1; + + // Details about a running container + // +optional + optional ContainerStateRunning running = 2; + + // Details about a terminated container + // +optional + optional ContainerStateTerminated terminated = 3; +} + +// ContainerStateRunning is a running state of a container. +message ContainerStateRunning { + // Time at which the container was last (re-)started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; +} + +// ContainerStateTerminated is a terminated state of a container. +message ContainerStateTerminated { + // Exit status from the last termination of the container + optional int32 exitCode = 1; + + // Signal from the last termination of the container + // +optional + optional int32 signal = 2; + + // (brief) reason from the last termination of the container + // +optional + optional string reason = 3; + + // Message regarding the last termination of the container + // +optional + optional string message = 4; + + // Time at which previous execution of the container started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + + // Time at which the container last terminated + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + + // Container's ID in the format 'docker://<container_id>' + // +optional + optional string containerID = 7; +} + +// ContainerStateWaiting is a waiting state of a container. +message ContainerStateWaiting { + // (brief) reason the container is not yet running. + // +optional + optional string reason = 1; + + // Message regarding why the container is not yet running. + // +optional + optional string message = 2; +} + +// ContainerStatus contains details for the current status of this container. +message ContainerStatus { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + optional string name = 1; + + // Details about the container's current condition. + // +optional + optional ContainerState state = 2; + + // Details about the container's last termination condition. + // +optional + optional ContainerState lastState = 3; + + // Specifies whether the container has passed its readiness probe. + optional bool ready = 4; + + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + optional int32 restartCount = 5; + + // The image the container is running. + // More info: http://kubernetes.io/docs/user-guide/images + // TODO(dchen1107): Which image the container is running with? + optional string image = 6; + + // ImageID of the container's image. + optional string imageID = 7; + + // Container's ID in the format 'docker://<container_id>'. + // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information + // +optional + optional string containerID = 8; +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +message DaemonEndpoint { + // Port number of the given endpoint. + optional int32 Port = 1; +} + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + optional string propagationPolicy = 4; +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +message DownwardAPIProjection { + // Items is a list of DownwardAPIVolume file + // +optional + repeated DownwardAPIVolumeFile items = 1; +} + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +message DownwardAPIVolumeFile { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + optional string path = 1; + + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + optional ObjectFieldSelector fieldRef = 2; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 3; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 4; +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +message DownwardAPIVolumeSource { + // Items is a list of downward API volume file + // +optional + repeated DownwardAPIVolumeFile items = 1; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +message EmptyDirVolumeSource { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional string medium = 1; +} + +// EndpointAddress is a tuple that describes single IP address. +message EndpointAddress { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + optional string ip = 1; + + // The Hostname of this endpoint + // +optional + optional string hostname = 3; + + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + optional string nodeName = 4; + + // Reference to object providing the endpoint. + // +optional + optional ObjectReference targetRef = 2; +} + +// EndpointPort is a tuple that describes a single port. +message EndpointPort { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + optional string name = 1; + + // The port number of the endpoint. + optional int32 port = 2; + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + // +optional + optional string protocol = 3; +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +message EndpointSubset { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + repeated EndpointAddress addresses = 1; + + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + repeated EndpointAddress notReadyAddresses = 2; + + // Port numbers available on the related IP addresses. + // +optional + repeated EndpointPort ports = 3; +} + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +message Endpoints { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + repeated EndpointSubset subsets = 2; +} + +// EndpointsList is a list of endpoints. +message EndpointsList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoints. + repeated Endpoints items = 2; +} + +// EnvFromSource represents the source of a set of ConfigMaps +message EnvFromSource { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + optional string prefix = 1; + + // The ConfigMap to select from + // +optional + optional ConfigMapEnvSource configMapRef = 2; + + // The Secret to select from + // +optional + optional SecretEnvSource secretRef = 3; +} + +// EnvVar represents an environment variable present in a Container. +message EnvVar { + // Name of the environment variable. Must be a C_IDENTIFIER. + optional string name = 1; + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + optional string value = 2; + + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + optional EnvVarSource valueFrom = 3; +} + +// EnvVarSource represents a source for the value of an EnvVar. +message EnvVarSource { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.podIP. + // +optional + optional ObjectFieldSelector fieldRef = 1; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 2; + + // Selects a key of a ConfigMap. + // +optional + optional ConfigMapKeySelector configMapKeyRef = 3; + + // Selects a key of a secret in the pod's namespace + // +optional + optional SecretKeySelector secretKeyRef = 4; +} + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +message Event { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The object that this event is about. + optional ObjectReference involvedObject = 2; + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + optional string reason = 3; + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + optional string message = 4; + + // The component reporting this event. Should be a short machine understandable string. + // +optional + optional EventSource source = 5; + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + + // The time at which the most recent occurrence of this event was recorded. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + + // The number of times this event has occurred. + // +optional + optional int32 count = 8; + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + optional string type = 9; +} + +// EventList is a list of events. +message EventList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of events + repeated Event items = 2; +} + +// EventSource contains information for an event. +message EventSource { + // Component from which the event is generated. + // +optional + optional string component = 1; + + // Node name on which the event is generated. + // +optional + optional string host = 2; +} + +// ExecAction describes a "run in container" action. +message ExecAction { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + repeated string command = 1; +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +message FCVolumeSource { + // Required: FC target worldwide names (WWNs) + repeated string targetWWNs = 1; + + // Required: FC target lun number + optional int32 lun = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +message FlexVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional LocalObjectReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map<string, string> options = 5; +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +message FlockerVolumeSource { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + optional string datasetName = 1; + + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + optional string datasetUUID = 2; +} + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +message GCEPersistentDiskVolumeSource { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + optional string pdName = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional int32 partition = 3; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional bool readOnly = 4; +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +message GitRepoVolumeSource { + // Repository URL + optional string repository = 1; + + // Commit hash for the specified revision. + // +optional + optional string revision = 2; + + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + optional string directory = 3; +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; +} + +// HTTPGetAction describes an action based on HTTP Get requests. +message HTTPGetAction { + // Path to access on the HTTP server. + // +optional + optional string path = 1; + + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + optional string host = 3; + + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + optional string scheme = 4; + + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + repeated HTTPHeader httpHeaders = 5; +} + +// HTTPHeader describes a custom header to be used in HTTP probes +message HTTPHeader { + // The header field name + optional string name = 1; + + // The header field value + optional string value = 2; +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +message Handler { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + optional TCPSocketAction tcpSocket = 3; +} + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +message HostPathVolumeSource { + // Path of the directory on the host. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + optional string path = 1; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI target lun number. + optional int32 lun = 3; + + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; +} + +// Maps a string key to a path within a volume. +message KeyToPath { + // The key to project. + optional string key = 1; + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + optional string path = 2; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 3; +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +message Lifecycle { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + optional Handler postStart = 1; + + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + optional Handler preStop = 2; +} + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +message LimitRange { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the limits enforced. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional LimitRangeSpec spec = 2; +} + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +message LimitRangeItem { + // Type of resource that this limit applies to. + // +optional + optional string type = 1; + + // Max usage constraints on this kind by resource name. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> max = 2; + + // Min usage constraints on this kind by resource name. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> min = 3; + + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> default = 4; + + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> defaultRequest = 5; + + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> maxLimitRequestRatio = 6; +} + +// LimitRangeList is a list of LimitRange items. +message LimitRangeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of LimitRange objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + repeated LimitRange items = 2; +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +message LimitRangeSpec { + // Limits is the list of LimitRangeItem objects that are enforced. + repeated LimitRangeItem limits = 1; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + +// ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // +optional + optional int64 timeoutSeconds = 5; +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +message LoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + optional string hostname = 2; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message LoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + repeated LoadBalancerIngress ingress = 1; +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +message LocalObjectReference { + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + optional string name = 1; +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +message NFSVolumeSource { + // Server is the hostname or IP address of the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + optional string server = 1; + + // Path that is exported by the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + optional string path = 2; + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + optional bool readOnly = 3; +} + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +message Namespace { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NamespaceSpec spec = 2; + + // Status describes the current status of a Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NamespaceStatus status = 3; +} + +// NamespaceList is a list of Namespaces. +message NamespaceList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Namespace objects in the list. + // More info: http://kubernetes.io/docs/user-guide/namespaces + repeated Namespace items = 2; +} + +// NamespaceSpec describes the attributes on a Namespace. +message NamespaceSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // +optional + repeated string finalizers = 1; +} + +// NamespaceStatus is information about the current status of a Namespace. +message NamespaceStatus { + // Phase is the current lifecycle phase of the namespace. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // +optional + optional string phase = 1; +} + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +message Node { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a node. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NodeSpec spec = 2; + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional NodeStatus status = 3; +} + +// NodeAddress contains information for the node's address. +message NodeAddress { + // Node address type, one of Hostname, ExternalIP or InternalIP. + optional string type = 1; + + // The node address. + optional string address = 2; +} + +// Node affinity is a group of node affinity scheduling rules. +message NodeAffinity { + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// NodeCondition contains condition information for a node. +message NodeCondition { + // Type of node condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time we got an update on a given condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +message NodeDaemonEndpoints { + // Endpoint on which Kubelet is listening. + // +optional + optional DaemonEndpoint kubeletEndpoint = 1; +} + +// NodeList is the whole list of all Nodes which have been registered with master. +message NodeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of nodes + repeated Node items = 2; +} + +// NodeProxyOptions is the query options to a Node's proxy call. +message NodeProxyOptions { + // Path is the URL path to use for the current proxy request to node. + // +optional + optional string path = 1; +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +message NodeResources { + // Capacity represents the available resources of a node + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1; +} + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +message NodeSelector { + // Required. A list of node selector terms. The terms are ORed. + repeated NodeSelectorTerm nodeSelectorTerms = 1; +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +message NodeSelectorRequirement { + // The label key that the selector applies to. + optional string key = 1; + + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + repeated string values = 3; +} + +// A null or empty node selector term matches no objects. +message NodeSelectorTerm { + // Required. A list of node selector requirements. The requirements are ANDed. + repeated NodeSelectorRequirement matchExpressions = 1; +} + +// NodeSpec describes the attributes that a node is created with. +message NodeSpec { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + optional string podCIDR = 1; + + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + // +optional + optional string externalID = 2; + + // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> + // +optional + optional string providerID = 3; + + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // +optional + optional bool unschedulable = 4; + + // If specified, the node's taints. + // +optional + repeated Taint taints = 5; +} + +// NodeStatus is information about the current status of a node. +message NodeStatus { + // Capacity represents the total resources of a node. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1; + + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> allocatable = 2; + + // NodePhase is the recently observed lifecycle phase of the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // The field is never populated, and now is deprecated. + // +optional + optional string phase = 3; + + // Conditions is an array of current observed node conditions. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // +optional + repeated NodeCondition conditions = 4; + + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // +optional + repeated NodeAddress addresses = 5; + + // Endpoints of daemons running on the Node. + // +optional + optional NodeDaemonEndpoints daemonEndpoints = 6; + + // Set of ids/uuids to uniquely identify the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // +optional + optional NodeSystemInfo nodeInfo = 7; + + // List of container images on this node + // +optional + repeated ContainerImage images = 8; + + // List of attachable volumes in use (mounted) by the node. + // +optional + repeated string volumesInUse = 9; + + // List of volumes that are attached to the node. + // +optional + repeated AttachedVolume volumesAttached = 10; +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +message NodeSystemInfo { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + optional string machineID = 1; + + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + optional string systemUUID = 2; + + // Boot ID reported by the node. + optional string bootID = 3; + + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + optional string kernelVersion = 4; + + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + optional string osImage = 5; + + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + optional string containerRuntimeVersion = 6; + + // Kubelet Version reported by the node. + optional string kubeletVersion = 7; + + // KubeProxy Version reported by the node. + optional string kubeProxyVersion = 8; + + // The Operating System reported by the node + optional string operatingSystem = 9; + + // The Architecture reported by the node + optional string architecture = 10; +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +message ObjectFieldSelector { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + optional string apiVersion = 1; + + // Path of the field to select in the specified API version. + optional string fieldPath = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map<string, string> labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map<string, string> annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReferences = 13; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // Namespace of the referent. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 2; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 4; + + // API version of the referent. + // +optional + optional string apiVersion = 5; + + // Specific resourceVersion to which this reference is made, if any. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + optional string fieldPath = 7; +} + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +message PersistentVolume { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeSpec spec = 2; + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeStatus status = 3; +} + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +message PersistentVolumeClaim { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimSpec spec = 2; + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimStatus status = 3; +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +message PersistentVolumeClaimList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of persistent volume claims. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + repeated PersistentVolumeClaim items = 2; +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +message PersistentVolumeClaimSpec { + // AccessModes contains the desired access modes the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 1; + + // A label query over volumes to consider for binding. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // Resources represents the minimum resources the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 2; + + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + optional string volumeName = 3; + + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + optional string storageClassName = 5; +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +message PersistentVolumeClaimStatus { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + optional string phase = 1; + + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 2; + + // Represents the actual resources of the underlying volume. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 3; +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +message PersistentVolumeClaimVolumeSource { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + optional string claimName = 1; + + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + optional bool readOnly = 2; +} + +// PersistentVolumeList is a list of PersistentVolume items. +message PersistentVolumeList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of persistent volumes. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + repeated PersistentVolume items = 2; +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +message PersistentVolumeSource { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; + + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // +optional + optional HostPathVolumeSource hostPath = 3; + + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 4; + + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 5; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 6; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + optional ISCSIVolumeSource iscsi = 7; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 8; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 9; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 10; + + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 13; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 15; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 16; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 18; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 19; +} + +// PersistentVolumeSpec is the specification of a persistent volume. +message PersistentVolumeSpec { + // A description of the persistent volume's resources and capacity. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 1; + + // The actual volume backing the persistent volume. + optional PersistentVolumeSource persistentVolumeSource = 2; + + // AccessModes contains all ways the volume can be mounted. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // +optional + repeated string accessModes = 3; + + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // +optional + optional ObjectReference claimRef = 4; + + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recycling must be supported by the volume plugin underlying this persistent volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // +optional + optional string persistentVolumeReclaimPolicy = 5; + + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + optional string storageClassName = 6; +} + +// PersistentVolumeStatus is the current status of a persistent volume. +message PersistentVolumeStatus { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // +optional + optional string phase = 1; + + // A human-readable message indicating details about why the volume is in this state. + // +optional + optional string message = 2; + + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + optional string reason = 3; +} + +// Represents a Photon Controller persistent disk resource. +message PhotonPersistentDiskVolumeSource { + // ID that identifies Photon Controller persistent disk + optional string pdID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; +} + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +message Pod { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 3; +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +message PodAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key <topologyKey> tches that of any node on which +// a pod of the set of pods is running +message PodAffinityTerm { + // A label query over a set of resources, in this case pods. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + repeated string namespaces = 2; + + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + optional string topologyKey = 3; +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +message PodAntiAffinity { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodAttachOptions { + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + optional bool tty = 4; + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; +} + +// PodCondition contains details for the current condition of this pod. +message PodCondition { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodExecOptions { + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + optional bool tty = 4; + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; + + // Command is the remote command to execute. argv array. Not executed within a shell. + repeated string command = 6; +} + +// PodList is a list of Pods. +message PodList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pods. + // More info: http://kubernetes.io/docs/user-guide/pods + repeated Pod items = 2; +} + +// PodLogOptions is the query options for a Pod's logs REST call. +message PodLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + optional string container = 1; + + // Follow the log stream of the pod. Defaults to false. + // +optional + optional bool follow = 2; + + // Return previous terminated container logs. Defaults to false. + // +optional + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + optional int64 limitBytes = 8; +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +message PodPortForwardOptions { + // List of ports to forward + // Required when using WebSockets + // +optional + repeated int32 ports = 1; +} + +// PodProxyOptions is the query options to a Pod's proxy call. +message PodProxyOptions { + // Path is the URL path to use for the current proxy request to pod. + // +optional + optional string path = 1; +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +message PodSecurityContext { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + optional SELinuxOptions seLinuxOptions = 1; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + optional int64 runAsUser = 2; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 3; + + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + repeated int64 supplementalGroups = 4; + + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + optional int64 fsGroup = 5; +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +message PodSignature { + // Reference to controller whose pods should avoid this node. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; +} + +// PodSpec is a description of a pod. +message PodSpec { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes + // +optional + repeated Volume volumes = 1; + + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + repeated Container initContainers = 20; + + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + repeated Container containers = 2; + + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // +optional + optional string restartPolicy = 3; + + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + optional int64 terminationGracePeriodSeconds = 4; + + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + optional int64 activeDeadlineSeconds = 5; + + // Set DNS policy for containers within the pod. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // +optional + optional string dnsPolicy = 6; + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // +optional + map<string, string> nodeSelector = 7; + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // +optional + optional string serviceAccountName = 8; + + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + optional string serviceAccount = 9; + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + optional bool automountServiceAccountToken = 21; + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + optional string nodeName = 10; + + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostNetwork = 11; + + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostPID = 12; + + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostIPC = 13; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional PodSecurityContext securityContext = 14; + + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + repeated LocalObjectReference imagePullSecrets = 15; + + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + optional string hostname = 16; + + // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". + // If not specified, the pod will not have a domainname at all. + // +optional + optional string subdomain = 17; + + // If specified, the pod's scheduling constraints + // +optional + optional Affinity affinity = 18; + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 19; + + // If specified, the pod's tolerations. + // +optional + repeated Toleration tolerations = 22; +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +message PodStatus { + // Current condition of the pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // +optional + optional string phase = 1; + + // Current service state of pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // +optional + repeated PodCondition conditions = 2; + + // A human readable message indicating details about why the pod is in this condition. + // +optional + optional string message = 3; + + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + // +optional + optional string reason = 4; + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + optional string hostIP = 5; + + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + optional string podIP = 6; + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + repeated ContainerStatus initContainerStatuses = 10; + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // +optional + repeated ContainerStatus containerStatuses = 8; + + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + optional string qosClass = 9; +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +message PodStatusResult { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 2; +} + +// PodTemplate describes a template for creating copies of a predefined pod. +message PodTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Template defines the pods that will be created from this pod template. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodTemplateSpec template = 2; +} + +// PodTemplateList is a list of PodTemplates. +message PodTemplateList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod templates + repeated PodTemplate items = 2; +} + +// PodTemplateSpec describes the data a pod should have when created from a template +message PodTemplateSpec { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; +} + +// PortworxVolumeSource represents a Portworx volume resource. +message PortworxVolumeSource { + // VolumeID uniquely identifies a Portworx volume + optional string volumeID = 1; + + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// Describes a class of pods that should avoid this node. +message PreferAvoidPodsEntry { + // The class of pods. + optional PodSignature podSignature = 1; + + // Time at which this entry was added to the list. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + + // (brief) reason why this entry was added to the list. + // +optional + optional string reason = 3; + + // Human readable message indicating why this entry was added to the list. + // +optional + optional string message = 4; +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +message PreferredSchedulingTerm { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + optional int32 weight = 1; + + // A node selector term, associated with the corresponding weight. + optional NodeSelectorTerm preference = 2; +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +message Probe { + // The action taken to determine the health of a container + optional Handler handler = 1; + + // Number of seconds after the container has started before liveness probes are initiated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional int32 initialDelaySeconds = 2; + + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + optional int32 timeoutSeconds = 3; + + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + optional int32 periodSeconds = 4; + + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + optional int32 successThreshold = 5; + + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + optional int32 failureThreshold = 6; +} + +// Represents a projected volume source +message ProjectedVolumeSource { + // list of volume projections + repeated VolumeProjection sources = 1; + + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +message QuobyteVolumeSource { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + optional string registry = 1; + + // Volume is a string that references an already created Quobyte volume by name. + optional string volume = 2; + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + optional bool readOnly = 3; + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + optional string user = 4; + + // Group to map volume access to + // Default is no group + // +optional + optional string group = 5; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDVolumeSource { + // A collection of Ceph monitors. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + +// RangeAllocation is not a public type. +message RangeAllocation { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Range is string that identifies the range represented by 'data'. + optional string range = 2; + + // Data is a bit array containing all allocated addresses in the previous segment. + optional bytes data = 3; +} + +// ReplicationController represents the configuration of a replication controller. +message ReplicationController { + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerSpec spec = 2; + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerStatus status = 3; +} + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +message ReplicationControllerCondition { + // Type of replication controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicationControllerList is a collection of replication controllers. +message ReplicationControllerList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of replication controllers. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + repeated ReplicationController items = 2; +} + +// ReplicationControllerSpec is the specification of a replication controller. +message ReplicationControllerSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map<string, string> selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + optional PodTemplateSpec template = 3; +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +message ReplicationControllerStatus { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replication controller. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replication controller's current state. + // +optional + repeated ReplicationControllerCondition conditions = 6; +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +message ResourceFieldSelector { + // Container name: required for volumes, optional for env vars + // +optional + optional string containerName = 1; + + // Required: resource to select + optional string resource = 2; + + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +message ResourceQuota { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaStatus status = 3; +} + +// ResourceQuotaList is a list of ResourceQuota items. +message ResourceQuotaList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ResourceQuota objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + repeated ResourceQuota items = 2; +} + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +message ResourceQuotaSpec { + // Hard is the set of desired hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1; + + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + repeated string scopes = 2; +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +message ResourceQuotaStatus { + // Hard is the set of enforced hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> hard = 1; + + // Used is the current observed total usage of the resource in the namespace. + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> used = 2; +} + +// ResourceRequirements describes the compute resource requirements. +message ResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> requests = 2; +} + +// SELinuxOptions are the labels to be applied to the container +message SELinuxOptions { + // User is a SELinux user label that applies to the container. + // +optional + optional string user = 1; + + // Role is a SELinux role label that applies to the container. + // +optional + optional string role = 2; + + // Type is a SELinux type label that applies to the container. + // +optional + optional string type = 3; + + // Level is SELinux level label that applies to the container. + // +optional + optional string level = 4; +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +message ScaleIOVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional LocalObjectReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + optional string protectionDomain = 5; + + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +message Secret { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + map<string, bytes> data = 2; + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + map<string, string> stringData = 4; + + // Used to facilitate programmatic handling of secret data. + // +optional + optional string type = 3; +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +message SecretEnvSource { + // The Secret to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the Secret must be defined + // +optional + optional bool optional = 2; +} + +// SecretKeySelector selects a key of a Secret. +message SecretKeySelector { + // The name of the secret in the pod's namespace to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key of the secret to select from. Must be a valid secret key. + optional string key = 2; + + // Specify whether the Secret or it's key must be defined + // +optional + optional bool optional = 3; +} + +// SecretList is a list of Secret. +message SecretList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: http://kubernetes.io/docs/user-guide/secrets + repeated Secret items = 2; +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +message SecretProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the Secret or its key must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +message SecretVolumeSource { + // Name of the secret in the pod's namespace to use. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + optional string secretName = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the Secret or it's keys must be defined + // +optional + optional bool optional = 4; +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +message SecurityContext { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + optional Capabilities capabilities = 1; + + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + optional bool privileged = 2; + + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional SELinuxOptions seLinuxOptions = 3; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional int64 runAsUser = 4; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 5; + + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + optional bool readOnlyRootFilesystem = 6; +} + +// SerializedReference is a reference to serialized object. +message SerializedReference { + // The reference to an object in the system. + // +optional + optional ObjectReference reference = 1; +} + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +message Service { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a service. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ServiceSpec spec = 2; + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ServiceStatus status = 3; +} + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +message ServiceAccount { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: http://kubernetes.io/docs/user-guide/secrets + // +optional + repeated ObjectReference secrets = 2; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // +optional + repeated LocalObjectReference imagePullSecrets = 3; + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + optional bool automountServiceAccountToken = 4; +} + +// ServiceAccountList is a list of ServiceAccount objects +message ServiceAccountList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ServiceAccounts. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + repeated ServiceAccount items = 2; +} + +// ServiceList holds a list of services. +message ServiceList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of services + repeated Service items = 2; +} + +// ServicePort contains information on service's port. +message ServicePort { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + // +optional + optional string name = 1; + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + // +optional + optional string protocol = 2; + + // The port that will be exposed by this service. + optional int32 port = 3; + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // +optional + optional int32 nodePort = 5; +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +message ServiceProxyOptions { + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + optional string path = 1; +} + +// ServiceSpec describes the attributes that a user creates on a service. +message ServiceSpec { + // The list of ports that are exposed by this service. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + repeated ServicePort ports = 1; + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + map<string, string> selector = 2; + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + optional string clusterIP = 3; + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + optional string type = 4; + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. A previous form of this + // functionality exists as the deprecatedPublicIPs field. When using this + // field, callers should also clear the deprecatedPublicIPs field. + // +optional + repeated string externalIPs = 5; + + // deprecatedPublicIPs is deprecated and replaced by the externalIPs field + // with almost the exact same semantics. This field is retained in the v1 + // API for compatibility until at least 8/20/2016. It will be removed from + // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are + // set, deprecatedPublicIPs is used. + // +k8s:conversion-gen=false + // +optional + repeated string deprecatedPublicIPs = 6; + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + optional string sessionAffinity = 7; + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + optional string loadBalancerIP = 8; + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // +optional + repeated string loadBalancerSourceRanges = 9; + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + // +optional + optional string externalName = 10; +} + +// ServiceStatus represents the current status of a service. +message ServiceStatus { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + optional LoadBalancerStatus loadBalancer = 1; +} + +message Sysctl { + optional string name = 1; + + optional string value = 2; +} + +// TCPSocketAction describes an action based on opening a socket +message TCPSocketAction { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +message Taint { + // Required. The taint key to be applied to a node. + optional string key = 1; + + // Required. The taint value corresponding to the taint key. + // +optional + optional string value = 2; + + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple <key,value,effect> using the matching operator <operator>. +message Toleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + optional int64 tolerationSeconds = 5; +} + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +message Volume { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 1; + + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + optional VolumeSource volumeSource = 2; +} + +// VolumeMount describes a mounting of a Volume within a container. +message VolumeMount { + // This must match the Name of a Volume. + optional string name = 1; + + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + optional bool readOnly = 2; + + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + optional string mountPath = 3; + + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + optional string subPath = 4; +} + +// Projection that may be projected along with other supported volume types +message VolumeProjection { + // information about the secret data to project + optional SecretProjection secret = 1; + + // information about the downwardAPI data to project + optional DownwardAPIProjection downwardAPI = 2; + + // information about the configMap data to project + optional ConfigMapProjection configMap = 3; +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +message VolumeSource { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + optional HostPathVolumeSource hostPath = 1; + + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional EmptyDirVolumeSource emptyDir = 2; + + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; + + // GitRepo represents a git repository at a particular revision. + // +optional + optional GitRepoVolumeSource gitRepo = 5; + + // Secret represents a secret that should populate this volume. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + optional SecretVolumeSource secret = 6; + + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 7; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // +optional + optional ISCSIVolumeSource iscsi = 8; + + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 9; + + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 13; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 14; + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 15; + + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + optional DownwardAPIVolumeSource downwardAPI = 16; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 17; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 18; + + // ConfigMap represents a configMap that should populate this volume + // +optional + optional ConfigMapVolumeSource configMap = 19; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 21; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 22; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; + + // Items for all in one resources secrets, configmaps, and downward API + optional ProjectedVolumeSource projected = 26; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 24; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 25; +} + +// Represents a vSphere volume resource. +message VsphereVirtualDiskVolumeSource { + // Path that identifies vSphere volume vmdk + optional string volumePath = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 2; +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +message WeightedPodAffinityTerm { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + optional int32 weight = 1; + + // Required. A pod affinity term, associated with the corresponding weight. + optional PodAffinityTerm podAffinityTerm = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/api/v1/helpers.go b/vendor/k8s.io/client-go/pkg/api/v1/helpers.go new file mode 100644 index 000000000..01f4ef470 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/helpers.go @@ -0,0 +1,632 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/client-go/pkg/api" +) + +// IsOpaqueIntResourceName returns true if the resource name has the opaque +// integer resource prefix. +func IsOpaqueIntResourceName(name ResourceName) bool { + return strings.HasPrefix(string(name), ResourceOpaqueIntPrefix) +} + +// OpaqueIntResourceName returns a ResourceName with the canonical opaque +// integer prefix prepended. If the argument already has the prefix, it is +// returned unmodified. +func OpaqueIntResourceName(name string) ResourceName { + if IsOpaqueIntResourceName(ResourceName(name)) { + return ResourceName(name) + } + return ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *Service) bool { + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" +} + +// this function aims to check if the service's cluster IP is requested or not +func IsServiceIPRequested(service *Service) bool { + // ExternalName services are CNAME aliases to external ones. Ignore the IP. + if service.Spec.Type == ServiceTypeExternalName { + return false + } + return service.Spec.ClusterIP == "" +} + +var standardFinalizers = sets.NewString( + string(FinalizerKubernetes), + metav1.FinalizerOrphanDependents, +) + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { + c := &LoadBalancerStatus{} + c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case NodeSelectorOpIn: + op = selection.In + case NodeSelectorOpNotIn: + op = selection.NotIn + case NodeSelectorOpExists: + op = selection.Exists + case NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case NodeSelectorOpGt: + op = selection.GreaterThan + case NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +const ( + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // CreatedByAnnotation represents the key used to store the spec(json) + // used to create the resource. + CreatedByAnnotation = "kubernetes.io/created-by" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by + // the kubelet. Pods with other sysctls will fail to launch. + SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" + + // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure + // container of a pod. The annotation value is a comma separated list of sysctl_name=value + // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly + // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use + // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet + // will fail to launch. + UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" +) + +// Tries to add a toleration to annotations list. Returns true if something was updated +// false otherwise. +func AddOrUpdateTolerationInPod(pod *Pod, toleration *Toleration) (bool, error) { + podTolerations := pod.Spec.Tolerations + + var newTolerations []Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if api.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false, nil + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true, nil +} + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by <key,effect,operator,value>, +// if the two tolerations have same <key,effect,operator,value> combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// ToleratesTaint checks if the toleration tolerates the taint. +// The matching follows the rules below: +// (1) Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// (2) If toleration.operator is 'Exists', it means to match all taint values. +// (3) Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. +func (t *Toleration) ToleratesTaint(taint *Taint) bool { + if len(t.Effect) > 0 && t.Effect != taint.Effect { + return false + } + + if len(t.Key) > 0 && t.Key != taint.Key { + return false + } + + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + switch t.Operator { + // empty operator means Equal + case "", TolerationOpEqual: + return t.Value == taint.Value + case TolerationOpExists: + return true + default: + return false + } +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []Toleration, taint *Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []Toleration, taints []Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// DeleteTaintsByKey removes all the taints that have the same key to given taintKey +func DeleteTaintsByKey(taints []Taint, taintKey string) ([]Taint, bool) { + newTaints := []Taint{} + deleted := false + for i := range taints { + if taintKey == taints[i].Key { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// DeleteTaint removes all the the taints that have the same key and effect to given taintToDelete. +func DeleteTaint(taints []Taint, taintToDelete *Taint) ([]Taint, bool) { + newTaints := []Taint{} + deleted := false + for i := range taints { + if taintToDelete.MatchTaint(&taints[i]) { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []Taint, tolerations []Toleration) (bool, []Toleration) { + if len(taints) == 0 { + return true, []Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []Toleration{} + } + result := []Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []Toleration{} + } + } + return true, result +} + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch *Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { + var avoidPods AvoidPods + if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls +// and a slice of unsafe Sysctls. This is only a convenience wrapper around +// SysctlsFromPodAnnotation. +func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey]) + if err != nil { + return nil, nil, err + } + + return safe, unsafe, nil +} + +// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. +func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) { + if len(annotation) == 0 { + return nil, nil + } + + kvs := strings.Split(annotation, ",") + sysctls := make([]Sysctl, len(kvs)) + for i, kv := range kvs { + cs := strings.Split(kv, "=") + if len(cs) != 2 || len(cs[0]) == 0 { + return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv) + } + sysctls[i].Name = cs[0] + sysctls[i].Value = cs[1] + } + return sysctls, nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []Sysctl) string { + if len(sysctls) == 0 { + return "" + } + + kvs := make([]string, len(sysctls)) + for i := range sysctls { + kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value) + } + return strings.Join(kvs, ",") +} + +type Sysctl struct { + Name string `protobuf:"bytes,1,opt,name=name"` + Value string `protobuf:"bytes,2,opt,name=value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +// Tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func AddOrUpdateTaint(node *Node, taint *Taint) (*Node, bool, error) { + objCopy, err := api.Scheme.DeepCopy(node) + if err != nil { + return nil, false, err + } + newNode := objCopy.(*Node) + nodeTaints := newNode.Spec.Taints + + var newTaints []Taint + updated := false + for i := range nodeTaints { + if taint.MatchTaint(&nodeTaints[i]) { + if api.Semantic.DeepEqual(taint, nodeTaints[i]) { + return newNode, false, nil + } + newTaints = append(newTaints, *taint) + updated = true + continue + } + + newTaints = append(newTaints, nodeTaints[i]) + } + + if !updated { + newTaints = append(newTaints, *taint) + } + + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +func TaintExists(taints []Taint, taintToFind *Taint) bool { + for _, taint := range taints { + if taint.MatchTaint(taintToFind) { + return true + } + } + return false +} + +// Tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func RemoveTaint(node *Node, taint *Taint) (*Node, bool, error) { + objCopy, err := api.Scheme.DeepCopy(node) + if err != nil { + return nil, false, err + } + newNode := objCopy.(*Node) + nodeTaints := newNode.Spec.Taints + if len(nodeTaints) == 0 { + return newNode, false, nil + } + + if !TaintExists(nodeTaints, taint) { + return newNode, false, nil + } + + newTaints, _ := DeleteTaint(nodeTaints, taint) + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +// TODO: remove when alpha support for affinity is removed +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/meta.go b/vendor/k8s.io/client-go/pkg/api/v1/meta.go new file mode 100644 index 000000000..bb1ae2ff7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/meta.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp metav1.Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *metav1.Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } + +func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { + ret := make([]metav1.OwnerReference, len(meta.OwnerReferences)) + for i := 0; i < len(meta.OwnerReferences); i++ { + ret[i].Kind = meta.OwnerReferences[i].Kind + ret[i].Name = meta.OwnerReferences[i].Name + ret[i].UID = meta.OwnerReferences[i].UID + ret[i].APIVersion = meta.OwnerReferences[i].APIVersion + if meta.OwnerReferences[i].Controller != nil { + value := *meta.OwnerReferences[i].Controller + ret[i].Controller = &value + } + if meta.OwnerReferences[i].BlockOwnerDeletion != nil { + value := *meta.OwnerReferences[i].BlockOwnerDeletion + ret[i].BlockOwnerDeletion = &value + } + } + return ret +} + +func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { + newReferences := make([]metav1.OwnerReference, len(references)) + for i := 0; i < len(references); i++ { + newReferences[i].Kind = references[i].Kind + newReferences[i].Name = references[i].Name + newReferences[i].UID = references[i].UID + newReferences[i].APIVersion = references[i].APIVersion + if references[i].Controller != nil { + value := *references[i].Controller + newReferences[i].Controller = &value + } + if references[i].BlockOwnerDeletion != nil { + value := *references[i].BlockOwnerDeletion + newReferences[i].BlockOwnerDeletion = &value + } + } + meta.OwnerReferences = newReferences +} + +func (meta *ObjectMeta) GetClusterName() string { + return meta.ClusterName +} +func (meta *ObjectMeta) SetClusterName(clusterName string) { + meta.ClusterName = clusterName +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/ref.go b/vendor/k8s.io/client-go/pkg/api/v1/ref.go new file mode 100644 index 000000000..5d33719fe --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/ref.go @@ -0,0 +1,133 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta meta.List + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.ListAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: /<prefix>/<version>/* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 3 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + version = parts[2] + } + + // only has list metadata + if objectMeta == nil { + return &ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*ObjectReference, error) { + ref, err := GetReference(scheme, obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/client-go/pkg/api/v1/register.go b/vendor/k8s.io/client-go/pkg/api/v1/register.go new file mode 100644 index 000000000..5c2dfddd1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/register.go @@ -0,0 +1,96 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs, addFastPathConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationController{}, + &ReplicationControllerList{}, + &Service{}, + &ServiceProxyOptions{}, + &ServiceList{}, + &Endpoints{}, + &EndpointsList{}, + &Node{}, + &NodeList{}, + &NodeProxyOptions{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &Secret{}, + &SecretList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + // Add common types + scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go b/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go new file mode 100644 index 000000000..ec8423276 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/resource_helpers.go @@ -0,0 +1,257 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) NvidiaGPU() *resource.Quantity { + if val, ok := (*self)[ResourceNvidiaGPU]; ok { + return &val + } + return &resource.Quantity{} +} + +func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return ContainerStatus{}, false +} + +func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i] + } + } + return ContainerStatus{} +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == ConditionTrue +} + +// Extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status PodStatus) *PodCondition { + _, condition := GetPodCondition(&status, PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// GetNodeCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } else { + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual + } +} + +// IsNodeReady returns true if a node is ready; false otherwise. +func IsNodeReady(node *Node) bool { + for _, c := range node.Status.Conditions { + if c.Type == NodeReady { + return c.Status == ConditionTrue + } + } + return false +} + +// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all +// containers of the pod. +func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { + reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} + for _, container := range pod.Spec.Containers { + for name, quantity := range container.Resources.Requests { + if value, ok := reqs[name]; !ok { + reqs[name] = *quantity.Copy() + } else { + value.Add(quantity) + reqs[name] = value + } + } + for name, quantity := range container.Resources.Limits { + if value, ok := limits[name]; !ok { + limits[name] = *quantity.Copy() + } else { + value.Add(quantity) + limits[name] = value + } + } + } + // init containers define the minimum of any resource + for _, container := range pod.Spec.InitContainers { + for name, quantity := range container.Resources.Requests { + value, ok := reqs[name] + if !ok { + reqs[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + reqs[name] = *quantity.Copy() + } + } + for name, quantity := range container.Resources.Limits { + value, ok := limits[name] + if !ok { + limits[name] = *quantity.Copy() + continue + } + if quantity.Cmp(value) > 0 { + limits[name] = *quantity.Copy() + } + } + } + return +} + +// finds and returns the request for a specific resource. +func GetResourceRequest(pod *Pod, resource ResourceName) int64 { + if resource == ResourcePods { + return 1 + } + totalResources := int64(0) + for _, container := range pod.Spec.Containers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == ResourceCPU { + totalResources += rQuantity.MilliValue() + } else { + totalResources += rQuantity.Value() + } + } + } + // take max_resource(sum_pod, any_init_container) + for _, container := range pod.Spec.InitContainers { + if rQuantity, ok := container.Resources.Requests[resource]; ok { + if resource == ResourceCPU && rQuantity.MilliValue() > totalResources { + totalResources = rQuantity.MilliValue() + } else if rQuantity.Value() > totalResources { + totalResources = rQuantity.Value() + } + } + } + return totalResources +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go new file mode 100644 index 000000000..c6fd805aa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types.generated.go @@ -0,0 +1,73800 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg5_runtime "k8s.io/apimachinery/pkg/runtime" + pkg1_types "k8s.io/apimachinery/pkg/types" + pkg4_intstr "k8s.io/apimachinery/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg2_v1.Time + var v2 pkg5_runtime.RawExtension + var v3 pkg1_types.UID + var v4 pkg4_intstr.IntOrString + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [15]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.GenerateName != "" + yyq2[2] = x.Namespace != "" + yyq2[3] = x.SelfLink != "" + yyq2[4] = x.UID != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.Generation != 0 + yyq2[7] = true + yyq2[8] = x.DeletionTimestamp != nil + yyq2[9] = x.DeletionGracePeriodSeconds != nil + yyq2[10] = len(x.Labels) != 0 + yyq2[11] = len(x.Annotations) != 0 + yyq2[12] = len(x.OwnerReferences) != 0 + yyq2[13] = len(x.Finalizers) != 0 + yyq2[14] = x.ClusterName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(15) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("generateName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selfLink")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.Generation)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("generation")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.Generation)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yy25 := &x.CreationTimestamp + yym26 := z.EncBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.EncExt(yy25) { + } else if yym26 { + z.EncBinaryMarshal(yy25) + } else if !yym26 && z.IsJSONHandle() { + z.EncJSONMarshal(yy25) + } else { + z.EncFallback(yy25) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy27 := &x.CreationTimestamp + yym28 := z.EncBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.EncExt(yy27) { + } else if yym28 { + z.EncBinaryMarshal(yy27) + } else if !yym28 && z.IsJSONHandle() { + z.EncJSONMarshal(yy27) + } else { + z.EncFallback(yy27) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.DeletionTimestamp == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { + } else if yym30 { + z.EncBinaryMarshal(x.DeletionTimestamp) + } else if !yym30 && z.IsJSONHandle() { + z.EncJSONMarshal(x.DeletionTimestamp) + } else { + z.EncFallback(x.DeletionTimestamp) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeletionTimestamp == nil { + r.EncodeNil() + } else { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { + } else if yym31 { + z.EncBinaryMarshal(x.DeletionTimestamp) + } else if !yym31 && z.IsJSONHandle() { + z.EncJSONMarshal(x.DeletionTimestamp) + } else { + z.EncFallback(x.DeletionTimestamp) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.DeletionGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy33 := *x.DeletionGracePeriodSeconds + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + r.EncodeInt(int64(yy33)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeletionGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy35 := *x.DeletionGracePeriodSeconds + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeInt(int64(yy35)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.Labels == nil { + r.EncodeNil() + } else { + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + z.F.EncMapStringStringV(x.Labels, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labels")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Labels == nil { + r.EncodeNil() + } else { + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + z.F.EncMapStringStringV(x.Labels, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.Annotations == nil { + r.EncodeNil() + } else { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + z.F.EncMapStringStringV(x.Annotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("annotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Annotations == nil { + r.EncodeNil() + } else { + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + z.F.EncMapStringStringV(x.Annotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OwnerReferences == nil { + r.EncodeNil() + } else { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + h.encSlicev1_OwnerReference(([]pkg2_v1.OwnerReference)(x.OwnerReferences), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else { + z.F.EncSliceStringV(x.Finalizers, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym51 := z.EncBinary() + _ = yym51 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "generateName": + if r.TryDecodeAsNil() { + x.GenerateName = "" + } else { + yyv6 := &x.GenerateName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv8 := &x.Namespace + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "selfLink": + if r.TryDecodeAsNil() { + x.SelfLink = "" + } else { + yyv10 := &x.SelfLink + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv12 := &x.UID + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "generation": + if r.TryDecodeAsNil() { + x.Generation = 0 + } else { + yyv16 := &x.Generation + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int64)(yyv16)) = int64(r.DecodeInt(64)) + } + } + case "creationTimestamp": + if r.TryDecodeAsNil() { + x.CreationTimestamp = pkg2_v1.Time{} + } else { + yyv18 := &x.CreationTimestamp + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) + } else { + z.DecFallback(yyv18, false) + } + } + case "deletionTimestamp": + if r.TryDecodeAsNil() { + if x.DeletionTimestamp != nil { + x.DeletionTimestamp = nil + } + } else { + if x.DeletionTimestamp == nil { + x.DeletionTimestamp = new(pkg2_v1.Time) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { + } else if yym21 { + z.DecBinaryUnmarshal(x.DeletionTimestamp) + } else if !yym21 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.DeletionTimestamp) + } else { + z.DecFallback(x.DeletionTimestamp, false) + } + } + case "deletionGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.DeletionGracePeriodSeconds != nil { + x.DeletionGracePeriodSeconds = nil + } + } else { + if x.DeletionGracePeriodSeconds == nil { + x.DeletionGracePeriodSeconds = new(int64) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "labels": + if r.TryDecodeAsNil() { + x.Labels = nil + } else { + yyv24 := &x.Labels + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + z.F.DecMapStringStringX(yyv24, false, d) + } + } + case "annotations": + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv26 := &x.Annotations + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + z.F.DecMapStringStringX(yyv26, false, d) + } + } + case "ownerReferences": + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv28 := &x.OwnerReferences + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv28), d) + } + } + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv30 := &x.Finalizers + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + z.F.DecSliceStringX(yyv30, false, d) + } + } + case "clusterName": + if r.TryDecodeAsNil() { + x.ClusterName = "" + } else { + yyv32 := &x.ClusterName + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj34 int + var yyb34 bool + var yyhl34 bool = l >= 0 + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv35 := &x.Name + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.GenerateName = "" + } else { + yyv37 := &x.GenerateName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv39 := &x.Namespace + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SelfLink = "" + } else { + yyv41 := &x.SelfLink + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv43 := &x.UID + yym44 := z.DecBinary() + _ = yym44 + if false { + } else if z.HasExtensions() && z.DecExt(yyv43) { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv45 := &x.ResourceVersion + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*string)(yyv45)) = r.DecodeString() + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Generation = 0 + } else { + yyv47 := &x.Generation + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + *((*int64)(yyv47)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CreationTimestamp = pkg2_v1.Time{} + } else { + yyv49 := &x.CreationTimestamp + yym50 := z.DecBinary() + _ = yym50 + if false { + } else if z.HasExtensions() && z.DecExt(yyv49) { + } else if yym50 { + z.DecBinaryUnmarshal(yyv49) + } else if !yym50 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv49) + } else { + z.DecFallback(yyv49, false) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeletionTimestamp != nil { + x.DeletionTimestamp = nil + } + } else { + if x.DeletionTimestamp == nil { + x.DeletionTimestamp = new(pkg2_v1.Time) + } + yym52 := z.DecBinary() + _ = yym52 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { + } else if yym52 { + z.DecBinaryUnmarshal(x.DeletionTimestamp) + } else if !yym52 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.DeletionTimestamp) + } else { + z.DecFallback(x.DeletionTimestamp, false) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeletionGracePeriodSeconds != nil { + x.DeletionGracePeriodSeconds = nil + } + } else { + if x.DeletionGracePeriodSeconds == nil { + x.DeletionGracePeriodSeconds = new(int64) + } + yym54 := z.DecBinary() + _ = yym54 + if false { + } else { + *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Labels = nil + } else { + yyv55 := &x.Labels + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + z.F.DecMapStringStringX(yyv55, false, d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Annotations = nil + } else { + yyv57 := &x.Annotations + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + z.F.DecMapStringStringX(yyv57, false, d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OwnerReferences = nil + } else { + yyv59 := &x.OwnerReferences + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + h.decSlicev1_OwnerReference((*[]pkg2_v1.OwnerReference)(yyv59), d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv61 := &x.Finalizers + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + z.F.DecSliceStringX(yyv61, false, d) + } + } + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterName = "" + } else { + yyv63 := &x.ClusterName + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*string)(yyv63)) = r.DecodeString() + } + } + for { + yyj34++ + if yyhl34 { + yyb34 = yyj34 > l + } else { + yyb34 = r.CheckBreak() + } + if yyb34 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj34-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [27]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil + yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil + yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil + yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil + yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil + yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil + yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil + yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil + yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil + yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil + yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil + yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil + yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil + yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[21] = x.VolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[22] = x.VolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[23] = x.VolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[24] = x.VolumeSource.Projected != nil && x.Projected != nil + yyq2[25] = x.VolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[26] = x.VolumeSource.ScaleIO != nil && x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(27) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + var yyn6 bool + if x.VolumeSource.HostPath == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.VolumeSource.EmptyDir == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } + } + } + var yyn12 bool + if x.VolumeSource.GCEPersistentDisk == nil { + yyn12 = true + goto LABEL12 + } + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn12 { + r.EncodeNil() + } else { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn15 bool + if x.VolumeSource.AWSElasticBlockStore == nil { + yyn15 = true + goto LABEL15 + } + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn15 { + r.EncodeNil() + } else { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + } + var yyn18 bool + if x.VolumeSource.GitRepo == nil { + yyn18 = true + goto LABEL18 + } + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn18 { + r.EncodeNil() + } else { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } + } + } + var yyn21 bool + if x.VolumeSource.Secret == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn21 { + r.EncodeNil() + } else { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + } + var yyn24 bool + if x.VolumeSource.NFS == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn24 { + r.EncodeNil() + } else { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + } + var yyn27 bool + if x.VolumeSource.ISCSI == nil { + yyn27 = true + goto LABEL27 + } + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn27 { + r.EncodeNil() + } else { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + } + var yyn30 bool + if x.VolumeSource.Glusterfs == nil { + yyn30 = true + goto LABEL30 + } + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn30 { + r.EncodeNil() + } else { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + } + var yyn33 bool + if x.VolumeSource.PersistentVolumeClaim == nil { + yyn33 = true + goto LABEL33 + } + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn33 { + r.EncodeNil() + } else { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } + } + } + var yyn36 bool + if x.VolumeSource.RBD == nil { + yyn36 = true + goto LABEL36 + } + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn36 { + r.EncodeNil() + } else { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + } + var yyn39 bool + if x.VolumeSource.FlexVolume == nil { + yyn39 = true + goto LABEL39 + } + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn39 { + r.EncodeNil() + } else { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn42 bool + if x.VolumeSource.Cinder == nil { + yyn42 = true + goto LABEL42 + } + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn42 { + r.EncodeNil() + } else { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + } + var yyn45 bool + if x.VolumeSource.CephFS == nil { + yyn45 = true + goto LABEL45 + } + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn45 { + r.EncodeNil() + } else { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + } + var yyn48 bool + if x.VolumeSource.Flocker == nil { + yyn48 = true + goto LABEL48 + } + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn48 { + r.EncodeNil() + } else { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + } + var yyn51 bool + if x.VolumeSource.DownwardAPI == nil { + yyn51 = true + goto LABEL51 + } + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn51 { + r.EncodeNil() + } else { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + } + var yyn54 bool + if x.VolumeSource.FC == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + } + var yyn57 bool + if x.VolumeSource.AzureFile == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.VolumeSource.ConfigMap == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + } + var yyn63 bool + if x.VolumeSource.VsphereVolume == nil { + yyn63 = true + goto LABEL63 + } + LABEL63: + if yyr2 || yy2arr2 { + if yyn63 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn63 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn66 bool + if x.VolumeSource.Quobyte == nil { + yyn66 = true + goto LABEL66 + } + LABEL66: + if yyr2 || yy2arr2 { + if yyn66 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn66 { + r.EncodeNil() + } else { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + } + var yyn69 bool + if x.VolumeSource.AzureDisk == nil { + yyn69 = true + goto LABEL69 + } + LABEL69: + if yyr2 || yy2arr2 { + if yyn69 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn69 { + r.EncodeNil() + } else { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn72 bool + if x.VolumeSource.PhotonPersistentDisk == nil { + yyn72 = true + goto LABEL72 + } + LABEL72: + if yyr2 || yy2arr2 { + if yyn72 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn72 { + r.EncodeNil() + } else { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn75 bool + if x.VolumeSource.Projected == nil { + yyn75 = true + goto LABEL75 + } + LABEL75: + if yyr2 || yy2arr2 { + if yyn75 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn75 { + r.EncodeNil() + } else { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + } + var yyn78 bool + if x.VolumeSource.PortworxVolume == nil { + yyn78 = true + goto LABEL78 + } + LABEL78: + if yyr2 || yy2arr2 { + if yyn78 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn78 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn81 bool + if x.VolumeSource.ScaleIO == nil { + yyn81 = true + goto LABEL81 + } + LABEL81: + if yyr2 || yy2arr2 { + if yyn81 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[26] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[26] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn81 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPath": + if x.VolumeSource.HostPath == nil { + x.VolumeSource.HostPath = new(HostPathVolumeSource) + } + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "emptyDir": + if x.VolumeSource.EmptyDir == nil { + x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) + } + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if x.VolumeSource.GCEPersistentDisk == nil { + x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if x.VolumeSource.AWSElasticBlockStore == nil { + x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "gitRepo": + if x.VolumeSource.GitRepo == nil { + x.VolumeSource.GitRepo = new(GitRepoVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + case "secret": + if x.VolumeSource.Secret == nil { + x.VolumeSource.Secret = new(SecretVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + case "nfs": + if x.VolumeSource.NFS == nil { + x.VolumeSource.NFS = new(NFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "iscsi": + if x.VolumeSource.ISCSI == nil { + x.VolumeSource.ISCSI = new(ISCSIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "glusterfs": + if x.VolumeSource.Glusterfs == nil { + x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "persistentVolumeClaim": + if x.VolumeSource.PersistentVolumeClaim == nil { + x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + case "rbd": + if x.VolumeSource.RBD == nil { + x.VolumeSource.RBD = new(RBDVolumeSource) + } + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "flexVolume": + if x.VolumeSource.FlexVolume == nil { + x.VolumeSource.FlexVolume = new(FlexVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "cinder": + if x.VolumeSource.Cinder == nil { + x.VolumeSource.Cinder = new(CinderVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if x.VolumeSource.CephFS == nil { + x.VolumeSource.CephFS = new(CephFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "flocker": + if x.VolumeSource.Flocker == nil { + x.VolumeSource.Flocker = new(FlockerVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "downwardAPI": + if x.VolumeSource.DownwardAPI == nil { + x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "fc": + if x.VolumeSource.FC == nil { + x.VolumeSource.FC = new(FCVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "azureFile": + if x.VolumeSource.AzureFile == nil { + x.VolumeSource.AzureFile = new(AzureFileVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "configMap": + if x.VolumeSource.ConfigMap == nil { + x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + case "vsphereVolume": + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if x.VolumeSource.Quobyte == nil { + x.VolumeSource.Quobyte = new(QuobyteVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if x.VolumeSource.AzureDisk == nil { + x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if x.VolumeSource.PhotonPersistentDisk == nil { + x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "projected": + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj32 int + var yyb32 bool + var yyhl32 bool = l >= 0 + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv33 := &x.Name + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + if x.VolumeSource.HostPath == nil { + x.VolumeSource.HostPath = new(HostPathVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + if x.VolumeSource.EmptyDir == nil { + x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + if x.VolumeSource.GCEPersistentDisk == nil { + x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.AWSElasticBlockStore == nil { + x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + if x.VolumeSource.GitRepo == nil { + x.VolumeSource.GitRepo = new(GitRepoVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + if x.VolumeSource.Secret == nil { + x.VolumeSource.Secret = new(SecretVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + if x.VolumeSource.NFS == nil { + x.VolumeSource.NFS = new(NFSVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + if x.VolumeSource.ISCSI == nil { + x.VolumeSource.ISCSI = new(ISCSIVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + if x.VolumeSource.Glusterfs == nil { + x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + if x.VolumeSource.PersistentVolumeClaim == nil { + x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + if x.VolumeSource.RBD == nil { + x.VolumeSource.RBD = new(RBDVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + if x.VolumeSource.FlexVolume == nil { + x.VolumeSource.FlexVolume = new(FlexVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.Cinder == nil { + x.VolumeSource.Cinder = new(CinderVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + if x.VolumeSource.CephFS == nil { + x.VolumeSource.CephFS = new(CephFSVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + if x.VolumeSource.Flocker == nil { + x.VolumeSource.Flocker = new(FlockerVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + if x.VolumeSource.DownwardAPI == nil { + x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + if x.VolumeSource.FC == nil { + x.VolumeSource.FC = new(FCVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + if x.VolumeSource.AzureFile == nil { + x.VolumeSource.AzureFile = new(AzureFileVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + if x.VolumeSource.ConfigMap == nil { + x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + if x.VolumeSource.VsphereVolume == nil { + x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.Quobyte == nil { + x.VolumeSource.Quobyte = new(QuobyteVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + if x.VolumeSource.AzureDisk == nil { + x.VolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.PhotonPersistentDisk == nil { + x.VolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + if x.VolumeSource.Projected == nil { + x.VolumeSource.Projected = new(ProjectedVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + if x.VolumeSource.PortworxVolume == nil { + x.VolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.VolumeSource.ScaleIO == nil { + x.VolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + for { + yyj32++ + if yyhl32 { + yyb32 = yyj32 > l + } else { + yyb32 = r.CheckBreak() + } + if yyb32 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj32-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [26]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HostPath != nil + yyq2[1] = x.EmptyDir != nil + yyq2[2] = x.GCEPersistentDisk != nil + yyq2[3] = x.AWSElasticBlockStore != nil + yyq2[4] = x.GitRepo != nil + yyq2[5] = x.Secret != nil + yyq2[6] = x.NFS != nil + yyq2[7] = x.ISCSI != nil + yyq2[8] = x.Glusterfs != nil + yyq2[9] = x.PersistentVolumeClaim != nil + yyq2[10] = x.RBD != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.Cinder != nil + yyq2[13] = x.CephFS != nil + yyq2[14] = x.Flocker != nil + yyq2[15] = x.DownwardAPI != nil + yyq2[16] = x.FC != nil + yyq2[17] = x.AzureFile != nil + yyq2[18] = x.ConfigMap != nil + yyq2[19] = x.VsphereVolume != nil + yyq2[20] = x.Quobyte != nil + yyq2[21] = x.AzureDisk != nil + yyq2[22] = x.PhotonPersistentDisk != nil + yyq2[23] = x.Projected != nil + yyq2[24] = x.PortworxVolume != nil + yyq2[25] = x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(26) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EmptyDir == nil { + r.EncodeNil() + } else { + x.EmptyDir.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GitRepo == nil { + r.EncodeNil() + } else { + x.GitRepo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PersistentVolumeClaim == nil { + r.EncodeNil() + } else { + x.PersistentVolumeClaim.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("projected")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Projected == nil { + r.EncodeNil() + } else { + x.Projected.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[24] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[24] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[25] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[25] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hostPath": + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "emptyDir": + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "gitRepo": + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + case "nfs": + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "iscsi": + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "glusterfs": + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "persistentVolumeClaim": + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + case "rbd": + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "flexVolume": + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "cinder": + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "flocker": + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "fc": + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "azureFile": + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "projected": + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj30 int + var yyb30 bool + var yyhl30 bool = l >= 0 + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.EmptyDir != nil { + x.EmptyDir = nil + } + } else { + if x.EmptyDir == nil { + x.EmptyDir = new(EmptyDirVolumeSource) + } + x.EmptyDir.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GitRepo != nil { + x.GitRepo = nil + } + } else { + if x.GitRepo == nil { + x.GitRepo = new(GitRepoVolumeSource) + } + x.GitRepo.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretVolumeSource) + } + x.Secret.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PersistentVolumeClaim != nil { + x.PersistentVolumeClaim = nil + } + } else { + if x.PersistentVolumeClaim == nil { + x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) + } + x.PersistentVolumeClaim.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIVolumeSource) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapVolumeSource) + } + x.ConfigMap.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Projected != nil { + x.Projected = nil + } + } else { + if x.Projected == nil { + x.Projected = new(ProjectedVolumeSource) + } + x.Projected.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + for { + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj30-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("claimName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "claimName": + if r.TryDecodeAsNil() { + x.ClaimName = "" + } else { + yyv4 := &x.ClaimName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClaimName = "" + } else { + yyv9 := &x.ClaimName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv11 := &x.ReadOnly + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [19]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.GCEPersistentDisk != nil + yyq2[1] = x.AWSElasticBlockStore != nil + yyq2[2] = x.HostPath != nil + yyq2[3] = x.Glusterfs != nil + yyq2[4] = x.NFS != nil + yyq2[5] = x.RBD != nil + yyq2[6] = x.ISCSI != nil + yyq2[7] = x.Cinder != nil + yyq2[8] = x.CephFS != nil + yyq2[9] = x.FC != nil + yyq2[10] = x.Flocker != nil + yyq2[11] = x.FlexVolume != nil + yyq2[12] = x.AzureFile != nil + yyq2[13] = x.VsphereVolume != nil + yyq2[14] = x.Quobyte != nil + yyq2[15] = x.AzureDisk != nil + yyq2[16] = x.PhotonPersistentDisk != nil + yyq2[17] = x.PortworxVolume != nil + yyq2[18] = x.ScaleIO != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(19) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "gcePersistentDisk": + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "hostPath": + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "glusterfs": + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "nfs": + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "rbd": + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "iscsi": + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "cinder": + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "fc": + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "flocker": + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "flexVolume": + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "azureFile": + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "vsphereVolume": + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "portworxVolume": + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [24]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil + yyq2[2] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil + yyq2[3] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil + yyq2[4] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil + yyq2[5] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil + yyq2[6] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil + yyq2[7] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil + yyq2[8] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil + yyq2[9] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil + yyq2[10] = x.PersistentVolumeSource.FC != nil && x.FC != nil + yyq2[11] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil + yyq2[12] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil + yyq2[13] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil + yyq2[14] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil + yyq2[15] = x.PersistentVolumeSource.Quobyte != nil && x.Quobyte != nil + yyq2[16] = x.PersistentVolumeSource.AzureDisk != nil && x.AzureDisk != nil + yyq2[17] = x.PersistentVolumeSource.PhotonPersistentDisk != nil && x.PhotonPersistentDisk != nil + yyq2[18] = x.PersistentVolumeSource.PortworxVolume != nil && x.PortworxVolume != nil + yyq2[19] = x.PersistentVolumeSource.ScaleIO != nil && x.ScaleIO != nil + yyq2[20] = len(x.AccessModes) != 0 + yyq2[21] = x.ClaimRef != nil + yyq2[22] = x.PersistentVolumeReclaimPolicy != "" + yyq2[23] = x.StorageClassName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(24) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + var yyn6 bool + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.GCEPersistentDisk == nil { + r.EncodeNil() + } else { + x.GCEPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.AWSElasticBlockStore == nil { + r.EncodeNil() + } else { + x.AWSElasticBlockStore.CodecEncodeSelf(e) + } + } + } + } + var yyn12 bool + if x.PersistentVolumeSource.HostPath == nil { + yyn12 = true + goto LABEL12 + } + LABEL12: + if yyr2 || yy2arr2 { + if yyn12 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn12 { + r.EncodeNil() + } else { + if x.HostPath == nil { + r.EncodeNil() + } else { + x.HostPath.CodecEncodeSelf(e) + } + } + } + } + var yyn15 bool + if x.PersistentVolumeSource.Glusterfs == nil { + yyn15 = true + goto LABEL15 + } + LABEL15: + if yyr2 || yy2arr2 { + if yyn15 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn15 { + r.EncodeNil() + } else { + if x.Glusterfs == nil { + r.EncodeNil() + } else { + x.Glusterfs.CodecEncodeSelf(e) + } + } + } + } + var yyn18 bool + if x.PersistentVolumeSource.NFS == nil { + yyn18 = true + goto LABEL18 + } + LABEL18: + if yyr2 || yy2arr2 { + if yyn18 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn18 { + r.EncodeNil() + } else { + if x.NFS == nil { + r.EncodeNil() + } else { + x.NFS.CodecEncodeSelf(e) + } + } + } + } + var yyn21 bool + if x.PersistentVolumeSource.RBD == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rbd")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn21 { + r.EncodeNil() + } else { + if x.RBD == nil { + r.EncodeNil() + } else { + x.RBD.CodecEncodeSelf(e) + } + } + } + } + var yyn24 bool + if x.PersistentVolumeSource.ISCSI == nil { + yyn24 = true + goto LABEL24 + } + LABEL24: + if yyr2 || yy2arr2 { + if yyn24 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsi")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn24 { + r.EncodeNil() + } else { + if x.ISCSI == nil { + r.EncodeNil() + } else { + x.ISCSI.CodecEncodeSelf(e) + } + } + } + } + var yyn27 bool + if x.PersistentVolumeSource.Cinder == nil { + yyn27 = true + goto LABEL27 + } + LABEL27: + if yyr2 || yy2arr2 { + if yyn27 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cinder")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn27 { + r.EncodeNil() + } else { + if x.Cinder == nil { + r.EncodeNil() + } else { + x.Cinder.CodecEncodeSelf(e) + } + } + } + } + var yyn30 bool + if x.PersistentVolumeSource.CephFS == nil { + yyn30 = true + goto LABEL30 + } + LABEL30: + if yyr2 || yy2arr2 { + if yyn30 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cephfs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn30 { + r.EncodeNil() + } else { + if x.CephFS == nil { + r.EncodeNil() + } else { + x.CephFS.CodecEncodeSelf(e) + } + } + } + } + var yyn33 bool + if x.PersistentVolumeSource.FC == nil { + yyn33 = true + goto LABEL33 + } + LABEL33: + if yyr2 || yy2arr2 { + if yyn33 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fc")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn33 { + r.EncodeNil() + } else { + if x.FC == nil { + r.EncodeNil() + } else { + x.FC.CodecEncodeSelf(e) + } + } + } + } + var yyn36 bool + if x.PersistentVolumeSource.Flocker == nil { + yyn36 = true + goto LABEL36 + } + LABEL36: + if yyr2 || yy2arr2 { + if yyn36 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flocker")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn36 { + r.EncodeNil() + } else { + if x.Flocker == nil { + r.EncodeNil() + } else { + x.Flocker.CodecEncodeSelf(e) + } + } + } + } + var yyn39 bool + if x.PersistentVolumeSource.FlexVolume == nil { + yyn39 = true + goto LABEL39 + } + LABEL39: + if yyr2 || yy2arr2 { + if yyn39 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn39 { + r.EncodeNil() + } else { + if x.FlexVolume == nil { + r.EncodeNil() + } else { + x.FlexVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn42 bool + if x.PersistentVolumeSource.AzureFile == nil { + yyn42 = true + goto LABEL42 + } + LABEL42: + if yyr2 || yy2arr2 { + if yyn42 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn42 { + r.EncodeNil() + } else { + if x.AzureFile == nil { + r.EncodeNil() + } else { + x.AzureFile.CodecEncodeSelf(e) + } + } + } + } + var yyn45 bool + if x.PersistentVolumeSource.VsphereVolume == nil { + yyn45 = true + goto LABEL45 + } + LABEL45: + if yyr2 || yy2arr2 { + if yyn45 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn45 { + r.EncodeNil() + } else { + if x.VsphereVolume == nil { + r.EncodeNil() + } else { + x.VsphereVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn48 bool + if x.PersistentVolumeSource.Quobyte == nil { + yyn48 = true + goto LABEL48 + } + LABEL48: + if yyr2 || yy2arr2 { + if yyn48 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("quobyte")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn48 { + r.EncodeNil() + } else { + if x.Quobyte == nil { + r.EncodeNil() + } else { + x.Quobyte.CodecEncodeSelf(e) + } + } + } + } + var yyn51 bool + if x.PersistentVolumeSource.AzureDisk == nil { + yyn51 = true + goto LABEL51 + } + LABEL51: + if yyr2 || yy2arr2 { + if yyn51 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("azureDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn51 { + r.EncodeNil() + } else { + if x.AzureDisk == nil { + r.EncodeNil() + } else { + x.AzureDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn54 bool + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + yyn54 = true + goto LABEL54 + } + LABEL54: + if yyr2 || yy2arr2 { + if yyn54 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("photonPersistentDisk")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn54 { + r.EncodeNil() + } else { + if x.PhotonPersistentDisk == nil { + r.EncodeNil() + } else { + x.PhotonPersistentDisk.CodecEncodeSelf(e) + } + } + } + } + var yyn57 bool + if x.PersistentVolumeSource.PortworxVolume == nil { + yyn57 = true + goto LABEL57 + } + LABEL57: + if yyr2 || yy2arr2 { + if yyn57 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portworxVolume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn57 { + r.EncodeNil() + } else { + if x.PortworxVolume == nil { + r.EncodeNil() + } else { + x.PortworxVolume.CodecEncodeSelf(e) + } + } + } + } + var yyn60 bool + if x.PersistentVolumeSource.ScaleIO == nil { + yyn60 = true + goto LABEL60 + } + LABEL60: + if yyr2 || yy2arr2 { + if yyn60 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleIO")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn60 { + r.EncodeNil() + } else { + if x.ScaleIO == nil { + r.EncodeNil() + } else { + x.ScaleIO.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym65 := z.EncBinary() + _ = yym65 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.ClaimRef == nil { + r.EncodeNil() + } else { + x.ClaimRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("claimRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ClaimRef == nil { + r.EncodeNil() + } else { + x.ClaimRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[22] { + x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[22] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[23] { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[23] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageClassName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + case "gcePersistentDisk": + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + case "awsElasticBlockStore": + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + case "hostPath": + if x.PersistentVolumeSource.HostPath == nil { + x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) + } + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + case "glusterfs": + if x.PersistentVolumeSource.Glusterfs == nil { + x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + case "nfs": + if x.PersistentVolumeSource.NFS == nil { + x.PersistentVolumeSource.NFS = new(NFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + case "rbd": + if x.PersistentVolumeSource.RBD == nil { + x.PersistentVolumeSource.RBD = new(RBDVolumeSource) + } + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + case "iscsi": + if x.PersistentVolumeSource.ISCSI == nil { + x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + case "cinder": + if x.PersistentVolumeSource.Cinder == nil { + x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + case "cephfs": + if x.PersistentVolumeSource.CephFS == nil { + x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) + } + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + case "fc": + if x.PersistentVolumeSource.FC == nil { + x.PersistentVolumeSource.FC = new(FCVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + case "flocker": + if x.PersistentVolumeSource.Flocker == nil { + x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + case "flexVolume": + if x.PersistentVolumeSource.FlexVolume == nil { + x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) + } + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + case "azureFile": + if x.PersistentVolumeSource.AzureFile == nil { + x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + case "vsphereVolume": + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + case "quobyte": + if x.PersistentVolumeSource.Quobyte == nil { + x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) + } + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + case "azureDisk": + if x.PersistentVolumeSource.AzureDisk == nil { + x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + case "photonPersistentDisk": + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + case "portworxVolume": + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + case "scaleIO": + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv24 := &x.AccessModes + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv24), d) + } + } + case "claimRef": + if r.TryDecodeAsNil() { + if x.ClaimRef != nil { + x.ClaimRef = nil + } + } else { + if x.ClaimRef == nil { + x.ClaimRef = new(ObjectReference) + } + x.ClaimRef.CodecDecodeSelf(d) + } + case "persistentVolumeReclaimPolicy": + if r.TryDecodeAsNil() { + x.PersistentVolumeReclaimPolicy = "" + } else { + yyv27 := &x.PersistentVolumeReclaimPolicy + yyv27.CodecDecodeSelf(d) + } + case "storageClassName": + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv28 := &x.StorageClassName + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj30 int + var yyb30 bool + var yyhl30 bool = l >= 0 + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv31 := &x.Capacity + yyv31.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.GCEPersistentDisk == nil { + x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GCEPersistentDisk != nil { + x.GCEPersistentDisk = nil + } + } else { + if x.GCEPersistentDisk == nil { + x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) + } + x.GCEPersistentDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AWSElasticBlockStore == nil { + x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AWSElasticBlockStore != nil { + x.AWSElasticBlockStore = nil + } + } else { + if x.AWSElasticBlockStore == nil { + x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) + } + x.AWSElasticBlockStore.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.HostPath == nil { + x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HostPath != nil { + x.HostPath = nil + } + } else { + if x.HostPath == nil { + x.HostPath = new(HostPathVolumeSource) + } + x.HostPath.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Glusterfs == nil { + x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Glusterfs != nil { + x.Glusterfs = nil + } + } else { + if x.Glusterfs == nil { + x.Glusterfs = new(GlusterfsVolumeSource) + } + x.Glusterfs.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.NFS == nil { + x.PersistentVolumeSource.NFS = new(NFSVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NFS != nil { + x.NFS = nil + } + } else { + if x.NFS == nil { + x.NFS = new(NFSVolumeSource) + } + x.NFS.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.RBD == nil { + x.PersistentVolumeSource.RBD = new(RBDVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RBD != nil { + x.RBD = nil + } + } else { + if x.RBD == nil { + x.RBD = new(RBDVolumeSource) + } + x.RBD.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ISCSI == nil { + x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ISCSI != nil { + x.ISCSI = nil + } + } else { + if x.ISCSI == nil { + x.ISCSI = new(ISCSIVolumeSource) + } + x.ISCSI.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Cinder == nil { + x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Cinder != nil { + x.Cinder = nil + } + } else { + if x.Cinder == nil { + x.Cinder = new(CinderVolumeSource) + } + x.Cinder.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.CephFS == nil { + x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CephFS != nil { + x.CephFS = nil + } + } else { + if x.CephFS == nil { + x.CephFS = new(CephFSVolumeSource) + } + x.CephFS.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.FC == nil { + x.PersistentVolumeSource.FC = new(FCVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FC != nil { + x.FC = nil + } + } else { + if x.FC == nil { + x.FC = new(FCVolumeSource) + } + x.FC.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Flocker == nil { + x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Flocker != nil { + x.Flocker = nil + } + } else { + if x.Flocker == nil { + x.Flocker = new(FlockerVolumeSource) + } + x.Flocker.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.FlexVolume == nil { + x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FlexVolume != nil { + x.FlexVolume = nil + } + } else { + if x.FlexVolume == nil { + x.FlexVolume = new(FlexVolumeSource) + } + x.FlexVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AzureFile == nil { + x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureFile != nil { + x.AzureFile = nil + } + } else { + if x.AzureFile == nil { + x.AzureFile = new(AzureFileVolumeSource) + } + x.AzureFile.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.VsphereVolume == nil { + x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.VsphereVolume != nil { + x.VsphereVolume = nil + } + } else { + if x.VsphereVolume == nil { + x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) + } + x.VsphereVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.Quobyte == nil { + x.PersistentVolumeSource.Quobyte = new(QuobyteVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Quobyte != nil { + x.Quobyte = nil + } + } else { + if x.Quobyte == nil { + x.Quobyte = new(QuobyteVolumeSource) + } + x.Quobyte.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.AzureDisk == nil { + x.PersistentVolumeSource.AzureDisk = new(AzureDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AzureDisk != nil { + x.AzureDisk = nil + } + } else { + if x.AzureDisk == nil { + x.AzureDisk = new(AzureDiskVolumeSource) + } + x.AzureDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.PhotonPersistentDisk == nil { + x.PersistentVolumeSource.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PhotonPersistentDisk != nil { + x.PhotonPersistentDisk = nil + } + } else { + if x.PhotonPersistentDisk == nil { + x.PhotonPersistentDisk = new(PhotonPersistentDiskVolumeSource) + } + x.PhotonPersistentDisk.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.PortworxVolume == nil { + x.PersistentVolumeSource.PortworxVolume = new(PortworxVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PortworxVolume != nil { + x.PortworxVolume = nil + } + } else { + if x.PortworxVolume == nil { + x.PortworxVolume = new(PortworxVolumeSource) + } + x.PortworxVolume.CodecDecodeSelf(d) + } + if x.PersistentVolumeSource.ScaleIO == nil { + x.PersistentVolumeSource.ScaleIO = new(ScaleIOVolumeSource) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ScaleIO != nil { + x.ScaleIO = nil + } + } else { + if x.ScaleIO == nil { + x.ScaleIO = new(ScaleIOVolumeSource) + } + x.ScaleIO.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv51 := &x.AccessModes + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv51), d) + } + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ClaimRef != nil { + x.ClaimRef = nil + } + } else { + if x.ClaimRef == nil { + x.ClaimRef = new(ObjectReference) + } + x.ClaimRef.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PersistentVolumeReclaimPolicy = "" + } else { + yyv54 := &x.PersistentVolumeReclaimPolicy + yyv54.CodecDecodeSelf(d) + } + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageClassName = "" + } else { + yyv55 := &x.StorageClassName + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + *((*string)(yyv55)) = r.DecodeString() + } + } + for { + yyj30++ + if yyhl30 { + yyb30 = yyj30 > l + } else { + yyb30 = r.CheckBreak() + } + if yyb30 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj30-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = x.Message != "" + yyq2[2] = x.Reason != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv5 := &x.Message + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv10 := &x.Phase + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePersistentVolume((*[]PersistentVolume)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeClaimSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeClaimStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PersistentVolumeClaimSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PersistentVolumeClaimStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.AccessModes) != 0 + yyq2[1] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.VolumeName != "" + yyq2[4] = x.StorageClassName != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Resources + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Resources + yy12.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy18 := *x.StorageClassName + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageClassName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StorageClassName == nil { + r.EncodeNil() + } else { + yy20 := *x.StorageClassName + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv4 := &x.AccessModes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv8 := &x.Resources + yyv8.CodecDecodeSelf(d) + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv9 := &x.VolumeName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "storageClassName": + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv14 := &x.AccessModes + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv14), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv18 := &x.Resources + yyv18.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv19 := &x.VolumeName + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StorageClassName != nil { + x.StorageClassName = nil + } + } else { + if x.StorageClassName == nil { + x.StorageClassName = new(string) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(x.StorageClassName)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.AccessModes) != 0 + yyq2[2] = len(x.Capacity) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("accessModes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AccessModes == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "accessModes": + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv5 := &x.AccessModes + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) + } + } + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv7 := &x.Capacity + yyv7.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv9 := &x.Phase + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AccessModes = nil + } else { + yyv10 := &x.AccessModes + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv12 := &x.Capacity + yyv12.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv7 := &x.Path + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Medium != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Medium.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("medium")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Medium.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "medium": + if r.TryDecodeAsNil() { + x.Medium = "" + } else { + yyv4 := &x.Medium + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Medium = "" + } else { + yyv6 := &x.Medium + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("endpoints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "endpoints": + if r.TryDecodeAsNil() { + x.EndpointsName = "" + } else { + yyv4 := &x.EndpointsName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EndpointsName = "" + } else { + yyv11 := &x.EndpointsName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.RBDPool != "" + yyq2[4] = x.RadosUser != "" + yyq2[5] = x.Keyring != "" + yyq2[6] = x.SecretRef != nil + yyq2[7] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.CephMonitors == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.CephMonitors, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("monitors")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CephMonitors == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.CephMonitors, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("keyring")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "monitors": + if r.TryDecodeAsNil() { + x.CephMonitors = nil + } else { + yyv4 := &x.CephMonitors + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "image": + if r.TryDecodeAsNil() { + x.RBDImage = "" + } else { + yyv6 := &x.RBDImage + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "pool": + if r.TryDecodeAsNil() { + x.RBDPool = "" + } else { + yyv10 := &x.RBDPool + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "user": + if r.TryDecodeAsNil() { + x.RadosUser = "" + } else { + yyv12 := &x.RadosUser + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "keyring": + if r.TryDecodeAsNil() { + x.Keyring = "" + } else { + yyv14 := &x.Keyring + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv17 := &x.ReadOnly + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CephMonitors = nil + } else { + yyv20 := &x.CephMonitors + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecSliceStringX(yyv20, false, d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RBDImage = "" + } else { + yyv22 := &x.RBDImage + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv24 := &x.FSType + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RBDPool = "" + } else { + yyv26 := &x.RBDPool + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RadosUser = "" + } else { + yyv28 := &x.RadosUser + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Keyring = "" + } else { + yyv30 := &x.Keyring + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv33 := &x.ReadOnly + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Path != "" + yyq2[2] = x.User != "" + yyq2[3] = x.SecretFile != "" + yyq2[4] = x.SecretRef != nil + yyq2[5] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Monitors == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Monitors, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("monitors")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Monitors == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Monitors, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretFile")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "monitors": + if r.TryDecodeAsNil() { + x.Monitors = nil + } else { + yyv4 := &x.Monitors + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv8 := &x.User + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "secretFile": + if r.TryDecodeAsNil() { + x.SecretFile = "" + } else { + yyv10 := &x.SecretFile + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv13 := &x.ReadOnly + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Monitors = nil + } else { + yyv16 := &x.Monitors + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv18 := &x.Path + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv20 := &x.User + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretFile = "" + } else { + yyv22 := &x.SecretFile + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv25 := &x.ReadOnly + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.DatasetName != "" + yyq2[1] = x.DatasetUUID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("datasetName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("datasetUUID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DatasetUUID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "datasetName": + if r.TryDecodeAsNil() { + x.DatasetName = "" + } else { + yyv4 := &x.DatasetName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "datasetUUID": + if r.TryDecodeAsNil() { + x.DatasetUUID = "" + } else { + yyv6 := &x.DatasetUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DatasetName = "" + } else { + yyv9 := &x.DatasetName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DatasetUUID = "" + } else { + yyv11 := &x.DatasetUUID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pdName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("partition")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "pdName": + if r.TryDecodeAsNil() { + x.PDName = "" + } else { + yyv4 := &x.PDName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "partition": + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PDName = "" + } else { + yyv13 := &x.PDName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *QuobyteVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + yyq2[3] = x.User != "" + yyq2[4] = x.Group != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("registry")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Registry)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volume")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Volume)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *QuobyteVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *QuobyteVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "registry": + if r.TryDecodeAsNil() { + x.Registry = "" + } else { + yyv4 := &x.Registry + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "volume": + if r.TryDecodeAsNil() { + x.Volume = "" + } else { + yyv6 := &x.Volume + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv10 := &x.User + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv12 := &x.Group + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *QuobyteVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Registry = "" + } else { + yyv15 := &x.Registry + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volume = "" + } else { + yyv17 := &x.Volume + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv21 := &x.User + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.SecretRef != nil + yyq2[3] = x.ReadOnly != false + yyq2[4] = len(x.Options) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("driver")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Options == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Options, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("options")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Options == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncMapStringStringV(x.Options, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "driver": + if r.TryDecodeAsNil() { + x.Driver = "" + } else { + yyv4 := &x.Driver + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv9 := &x.ReadOnly + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + case "options": + if r.TryDecodeAsNil() { + x.Options = nil + } else { + yyv11 := &x.Options + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecMapStringStringX(yyv11, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Driver = "" + } else { + yyv14 := &x.Driver + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv16 := &x.FSType + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Options = nil + } else { + yyv21 := &x.Options + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecMapStringStringX(yyv21, false, d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.Partition != 0 + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("partition")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Partition)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "partition": + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv8 := &x.Partition + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv13 := &x.VolumeID + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv15 := &x.FSType + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Partition = 0 + } else { + yyv17 := &x.Partition + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Revision != "" + yyq2[2] = x.Directory != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("repository")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("directory")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "repository": + if r.TryDecodeAsNil() { + x.Repository = "" + } else { + yyv4 := &x.Repository + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "revision": + if r.TryDecodeAsNil() { + x.Revision = "" + } else { + yyv6 := &x.Revision + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "directory": + if r.TryDecodeAsNil() { + x.Directory = "" + } else { + yyv8 := &x.Directory + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Repository = "" + } else { + yyv11 := &x.Repository + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = "" + } else { + yyv13 := &x.Revision + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Directory = "" + } else { + yyv15 := &x.Directory + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SecretName != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv13 := &x.SecretName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Server)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("server")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Server)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "server": + if r.TryDecodeAsNil() { + x.Server = "" + } else { + yyv4 := &x.Server + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Server = "" + } else { + yyv11 := &x.Server + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.ISCSIInterface != "" + yyq2[4] = x.FSType != "" + yyq2[5] = x.ReadOnly != false + yyq2[6] = len(x.Portals) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iqn")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Lun)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lun")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Lun)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Portals == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("portals")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Portals == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + z.F.EncSliceStringV(x.Portals, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "targetPortal": + if r.TryDecodeAsNil() { + x.TargetPortal = "" + } else { + yyv4 := &x.TargetPortal + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "iqn": + if r.TryDecodeAsNil() { + x.IQN = "" + } else { + yyv6 := &x.IQN + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "lun": + if r.TryDecodeAsNil() { + x.Lun = 0 + } else { + yyv8 := &x.Lun + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "iscsiInterface": + if r.TryDecodeAsNil() { + x.ISCSIInterface = "" + } else { + yyv10 := &x.ISCSIInterface + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv12 := &x.FSType + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv14 := &x.ReadOnly + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "portals": + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv16 := &x.Portals + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecSliceStringX(yyv16, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetPortal = "" + } else { + yyv19 := &x.TargetPortal + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IQN = "" + } else { + yyv21 := &x.IQN + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Lun = 0 + } else { + yyv23 := &x.Lun + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ISCSIInterface = "" + } else { + yyv25 := &x.ISCSIInterface + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv27 := &x.FSType + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv29 := &x.ReadOnly + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Portals = nil + } else { + yyv31 := &x.Portals + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + z.F.DecSliceStringX(yyv31, false, d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.FSType != "" + yyq2[3] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.TargetWWNs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.TargetWWNs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetWWNs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.TargetWWNs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Lun == nil { + r.EncodeNil() + } else { + yy7 := *x.Lun + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lun")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Lun == nil { + r.EncodeNil() + } else { + yy9 := *x.Lun + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "targetWWNs": + if r.TryDecodeAsNil() { + x.TargetWWNs = nil + } else { + yyv4 := &x.TargetWWNs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "lun": + if r.TryDecodeAsNil() { + if x.Lun != nil { + x.Lun = nil + } + } else { + if x.Lun == nil { + x.Lun = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv8 := &x.FSType + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv10 := &x.ReadOnly + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetWWNs = nil + } else { + yyv13 := &x.TargetWWNs + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Lun != nil { + x.Lun = nil + } + } else { + if x.Lun == nil { + x.Lun = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv17 := &x.FSType + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv19 := &x.ReadOnly + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("shareName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv4 := &x.SecretName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "shareName": + if r.TryDecodeAsNil() { + x.ShareName = "" + } else { + yyv6 := &x.ShareName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ShareName = "" + } else { + yyv13 := &x.ShareName + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumePath": + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + yyv4 := &x.VolumePath + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumePath = "" + } else { + yyv9 := &x.VolumePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PhotonPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pdID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PdID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PhotonPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "pdID": + if r.TryDecodeAsNil() { + x.PdID = "" + } else { + yyv4 := &x.PdID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PhotonPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PdID = "" + } else { + yyv9 := &x.PdID + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv11 := &x.FSType + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x AzureDataDiskCachingMode) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.CachingMode != nil + yyq2[3] = x.FSType != nil + yyq2[4] = x.ReadOnly != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("diskName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DiskName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("diskURI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DataDiskURI)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CachingMode == nil { + r.EncodeNil() + } else { + yy10 := *x.CachingMode + yy10.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cachingMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CachingMode == nil { + r.EncodeNil() + } else { + yy12 := *x.CachingMode + yy12.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.FSType == nil { + r.EncodeNil() + } else { + yy15 := *x.FSType + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FSType == nil { + r.EncodeNil() + } else { + yy17 := *x.FSType + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ReadOnly == nil { + r.EncodeNil() + } else { + yy20 := *x.ReadOnly + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadOnly == nil { + r.EncodeNil() + } else { + yy22 := *x.ReadOnly + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AzureDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "diskName": + if r.TryDecodeAsNil() { + x.DiskName = "" + } else { + yyv4 := &x.DiskName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "diskURI": + if r.TryDecodeAsNil() { + x.DataDiskURI = "" + } else { + yyv6 := &x.DataDiskURI + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cachingMode": + if r.TryDecodeAsNil() { + if x.CachingMode != nil { + x.CachingMode = nil + } + } else { + if x.CachingMode == nil { + x.CachingMode = new(AzureDataDiskCachingMode) + } + x.CachingMode.CodecDecodeSelf(d) + } + case "fsType": + if r.TryDecodeAsNil() { + if x.FSType != nil { + x.FSType = nil + } + } else { + if x.FSType == nil { + x.FSType = new(string) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(x.FSType)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + if x.ReadOnly != nil { + x.ReadOnly = nil + } + } else { + if x.ReadOnly == nil { + x.ReadOnly = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.ReadOnly)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DiskName = "" + } else { + yyv14 := &x.DiskName + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DataDiskURI = "" + } else { + yyv16 := &x.DataDiskURI + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CachingMode != nil { + x.CachingMode = nil + } + } else { + if x.CachingMode == nil { + x.CachingMode = new(AzureDataDiskCachingMode) + } + x.CachingMode.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FSType != nil { + x.FSType = nil + } + } else { + if x.FSType == nil { + x.FSType = new(string) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(x.FSType)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadOnly != nil { + x.ReadOnly = nil + } + } else { + if x.ReadOnly == nil { + x.ReadOnly = new(bool) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(x.ReadOnly)) = r.DecodeBool() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PortworxVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FSType != "" + yyq2[2] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PortworxVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PortworxVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumeID": + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv4 := &x.VolumeID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv6 := &x.FSType + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv8 := &x.ReadOnly + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PortworxVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeID = "" + } else { + yyv11 := &x.VolumeID + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv13 := &x.FSType + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleIOVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[3] = x.SSLEnabled != false + yyq2[4] = x.ProtectionDomain != "" + yyq2[5] = x.StoragePool != "" + yyq2[6] = x.StorageMode != "" + yyq2[7] = x.VolumeName != "" + yyq2[8] = x.FSType != "" + yyq2[9] = x.ReadOnly != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gateway")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Gateway)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.System)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("system")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.System)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.SSLEnabled)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sslEnabled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.SSLEnabled)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protectionDomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProtectionDomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storagePool")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StoragePool)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("storageMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.StorageMode)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsType")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleIOVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "gateway": + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv4 := &x.Gateway + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "system": + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv6 := &x.System + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + case "sslEnabled": + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv9 := &x.SSLEnabled + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(yyv9)) = r.DecodeBool() + } + } + case "protectionDomain": + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv11 := &x.ProtectionDomain + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "storagePool": + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv13 := &x.StoragePool + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "storageMode": + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv15 := &x.StorageMode + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + case "volumeName": + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv17 := &x.VolumeName + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + case "fsType": + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv19 := &x.FSType + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv21 := &x.ReadOnly + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleIOVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Gateway = "" + } else { + yyv24 := &x.Gateway + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.System = "" + } else { + yyv26 := &x.System + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(LocalObjectReference) + } + x.SecretRef.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SSLEnabled = false + } else { + yyv29 := &x.SSLEnabled + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProtectionDomain = "" + } else { + yyv31 := &x.ProtectionDomain + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StoragePool = "" + } else { + yyv33 := &x.StoragePool + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StorageMode = "" + } else { + yyv35 := &x.StorageMode + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeName = "" + } else { + yyv37 := &x.VolumeName + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSType = "" + } else { + yyv39 := &x.FSType + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv41 := &x.ReadOnly + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj23-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.DefaultMode != nil + yyq2[3] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy10 := *x.DefaultMode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy12 := *x.DefaultMode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy15 := *x.Optional + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy17 := *x.Optional + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv15 := &x.Items + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv15), d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = len(x.Items) != 0 + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Items == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv6 := &x.Items + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv6), d) + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv13 := &x.Items + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceKeyToPath((*[]KeyToPath)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ProjectedVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Sources == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolumeProjection(([]VolumeProjection)(x.Sources), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ProjectedVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "sources": + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv4 := &x.Sources + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ProjectedVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Sources = nil + } else { + yyv9 := &x.Sources + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceVolumeProjection((*[]VolumeProjection)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Secret != nil + yyq2[1] = x.DownwardAPI != nil + yyq2[2] = x.ConfigMap != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secret")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secret == nil { + r.EncodeNil() + } else { + x.Secret.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DownwardAPI == nil { + r.EncodeNil() + } else { + x.DownwardAPI.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMap")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMap == nil { + r.EncodeNil() + } else { + x.ConfigMap.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "secret": + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + case "downwardAPI": + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + case "configMap": + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Secret != nil { + x.Secret = nil + } + } else { + if x.Secret == nil { + x.Secret = new(SecretProjection) + } + x.Secret.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DownwardAPI != nil { + x.DownwardAPI = nil + } + } else { + if x.DownwardAPI == nil { + x.DownwardAPI = new(DownwardAPIProjection) + } + x.DownwardAPI.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMap != nil { + x.ConfigMap = nil + } + } else { + if x.ConfigMap == nil { + x.ConfigMap = new(ConfigMapProjection) + } + x.ConfigMap.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy10 := *x.Mode + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy12 := *x.Mode + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv6 := &x.Path + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv11 := &x.Key + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv13 := &x.Path + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.HostPort != 0 + yyq2[3] = x.Protocol != "" + yyq2[4] = x.HostIP != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.HostPort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ContainerPort)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostPort": + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv6 := &x.HostPort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "containerPort": + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv8 := &x.ContainerPort + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv10 := &x.Protocol + yyv10.CodecDecodeSelf(d) + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPort = 0 + } else { + yyv16 := &x.HostPort + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerPort = 0 + } else { + yyv18 := &x.ContainerPort + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv20 := &x.Protocol + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv21 := &x.HostIP + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.ReadOnly != false + yyq2[3] = x.SubPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnly")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.ReadOnly)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mountPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "readOnly": + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv6 := &x.ReadOnly + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "mountPath": + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv8 := &x.MountPath + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "subPath": + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv10 := &x.SubPath + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnly = false + } else { + yyv15 := &x.ReadOnly + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MountPath = "" + } else { + yyv17 := &x.MountPath + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SubPath = "" + } else { + yyv19 := &x.SubPath + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[2] = x.ValueFrom != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ValueFrom == nil { + r.EncodeNil() + } else { + x.ValueFrom.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ValueFrom == nil { + r.EncodeNil() + } else { + x.ValueFrom.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "valueFrom": + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv12 := &x.Value + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ValueFrom != nil { + x.ValueFrom = nil + } + } else { + if x.ValueFrom == nil { + x.ValueFrom = new(EnvVarSource) + } + x.ValueFrom.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.FieldRef != nil + yyq2[1] = x.ResourceFieldRef != nil + yyq2[2] = x.ConfigMapKeyRef != nil + yyq2[3] = x.SecretKeyRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapKeyRef == nil { + r.EncodeNil() + } else { + x.ConfigMapKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretKeyRef == nil { + r.EncodeNil() + } else { + x.SecretKeyRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "fieldRef": + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + case "configMapKeyRef": + if r.TryDecodeAsNil() { + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } + } else { + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) + } + case "secretKeyRef": + if r.TryDecodeAsNil() { + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } + } else { + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMapKeyRef != nil { + x.ConfigMapKeyRef = nil + } + } else { + if x.ConfigMapKeyRef == nil { + x.ConfigMapKeyRef = new(ConfigMapKeySelector) + } + x.ConfigMapKeyRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretKeyRef != nil { + x.SecretKeyRef = nil + } + } else { + if x.SecretKeyRef == nil { + x.SecretKeyRef = new(SecretKeySelector) + } + x.SecretKeyRef.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv4 := &x.APIVersion + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fieldPath": + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv6 := &x.FieldPath + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv9 := &x.APIVersion + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv11 := &x.FieldPath + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ContainerName != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Divisor + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("divisor")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Divisor + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "containerName": + if r.TryDecodeAsNil() { + x.ContainerName = "" + } else { + yyv4 := &x.ContainerName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv6 := &x.Resource + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "divisor": + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv8 := &x.Divisor + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerName = "" + } else { + yyv11 := &x.ContainerName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv13 := &x.Resource + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Divisor = pkg3_resource.Quantity{} + } else { + yyv15 := &x.Divisor + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy10 := *x.Optional + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy12 := *x.Optional + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv11 := &x.Name + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EnvFromSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Prefix != "" + yyq2[1] = x.ConfigMapRef != nil + yyq2[2] = x.SecretRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("prefix")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Prefix)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("configMapRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ConfigMapRef == nil { + r.EncodeNil() + } else { + x.ConfigMapRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecretRef == nil { + r.EncodeNil() + } else { + x.SecretRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EnvFromSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EnvFromSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "configMapRef": + if r.TryDecodeAsNil() { + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } + } else { + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) + } + case "secretRef": + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) + } + x.SecretRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EnvFromSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv9 := &x.Prefix + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ConfigMapRef != nil { + x.ConfigMapRef = nil + } + } else { + if x.ConfigMapRef == nil { + x.ConfigMapRef = new(ConfigMapEnvSource) + } + x.ConfigMapRef.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecretRef != nil { + x.SecretRef = nil + } + } else { + if x.SecretRef == nil { + x.SecretRef = new(SecretEnvSource) + } + x.SecretRef.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecretEnvSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Optional != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Optional == nil { + r.EncodeNil() + } else { + yy7 := *x.Optional + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("optional")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Optional == nil { + r.EncodeNil() + } else { + yy9 := *x.Optional + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretEnvSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretEnvSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "optional": + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretEnvSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Optional != nil { + x.Optional = nil + } + } else { + if x.Optional == nil { + x.Optional = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.Optional)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv11 := &x.Value + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[2] = x.Host != "" + yyq2[3] = x.Scheme != "" + yyq2[4] = len(x.HTTPHeaders) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Port + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Port + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Scheme.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scheme")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Scheme.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.HTTPHeaders == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTPHeaders == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "port": + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "scheme": + if r.TryDecodeAsNil() { + x.Scheme = "" + } else { + yyv10 := &x.Scheme + yyv10.CodecDecodeSelf(d) + } + case "httpHeaders": + if r.TryDecodeAsNil() { + x.HTTPHeaders = nil + } else { + yyv11 := &x.HTTPHeaders + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv11), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv14 := &x.Path + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv16 := &x.Port + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) + } else { + z.DecFallback(yyv16, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv18 := &x.Host + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Scheme = "" + } else { + yyv20 := &x.Scheme + yyv20.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HTTPHeaders = nil + } else { + yyv21 := &x.HTTPHeaders + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceHTTPHeader((*[]HTTPHeader)(yyv21), d) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Port + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Port + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "port": + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = pkg4_intstr.IntOrString{} + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Command) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Command == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv4 := &x.Command + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv7 := &x.Command + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + z.F.DecSliceStringX(yyv7, false, d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Handler.Exec != nil && x.Exec != nil + yyq2[1] = x.Handler.HTTPGet != nil && x.HTTPGet != nil + yyq2[2] = x.Handler.TCPSocket != nil && x.TCPSocket != nil + yyq2[3] = x.InitialDelaySeconds != 0 + yyq2[4] = x.TimeoutSeconds != 0 + yyq2[5] = x.PeriodSeconds != 0 + yyq2[6] = x.SuccessThreshold != 0 + yyq2[7] = x.FailureThreshold != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + var yyn3 bool + if x.Handler.Exec == nil { + yyn3 = true + goto LABEL3 + } + LABEL3: + if yyr2 || yy2arr2 { + if yyn3 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn3 { + r.EncodeNil() + } else { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } + } + } + var yyn6 bool + if x.Handler.HTTPGet == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpGet")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } + } + } + var yyn9 bool + if x.Handler.TCPSocket == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn9 { + r.EncodeNil() + } else { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.InitialDelaySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.InitialDelaySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.TimeoutSeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.TimeoutSeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.PeriodSeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.PeriodSeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.SuccessThreshold)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.SuccessThreshold)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.FailureThreshold)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(x.FailureThreshold)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exec": + if x.Handler.Exec == nil { + x.Handler.Exec = new(ExecAction) + } + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + case "httpGet": + if x.Handler.HTTPGet == nil { + x.Handler.HTTPGet = new(HTTPGetAction) + } + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + case "tcpSocket": + if x.Handler.TCPSocket == nil { + x.Handler.TCPSocket = new(TCPSocketAction) + } + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + case "initialDelaySeconds": + if r.TryDecodeAsNil() { + x.InitialDelaySeconds = 0 + } else { + yyv7 := &x.InitialDelaySeconds + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "timeoutSeconds": + if r.TryDecodeAsNil() { + x.TimeoutSeconds = 0 + } else { + yyv9 := &x.TimeoutSeconds + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + case "periodSeconds": + if r.TryDecodeAsNil() { + x.PeriodSeconds = 0 + } else { + yyv11 := &x.PeriodSeconds + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + case "successThreshold": + if r.TryDecodeAsNil() { + x.SuccessThreshold = 0 + } else { + yyv13 := &x.SuccessThreshold + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(yyv13)) = int32(r.DecodeInt(32)) + } + } + case "failureThreshold": + if r.TryDecodeAsNil() { + x.FailureThreshold = 0 + } else { + yyv15 := &x.FailureThreshold + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + if x.Handler.Exec == nil { + x.Handler.Exec = new(ExecAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + if x.Handler.HTTPGet == nil { + x.Handler.HTTPGet = new(HTTPGetAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + if x.Handler.TCPSocket == nil { + x.Handler.TCPSocket = new(TCPSocketAction) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitialDelaySeconds = 0 + } else { + yyv21 := &x.InitialDelaySeconds + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeoutSeconds = 0 + } else { + yyv23 := &x.TimeoutSeconds + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PeriodSeconds = 0 + } else { + yyv25 := &x.PeriodSeconds + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SuccessThreshold = 0 + } else { + yyv27 := &x.SuccessThreshold + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FailureThreshold = 0 + } else { + yyv29 := &x.FailureThreshold + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj17-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x TerminationMessagePolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TerminationMessagePolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Add) != 0 + yyq2[1] = len(x.Drop) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Add == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Add), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("add")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Add == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Add), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Drop == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Drop), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("drop")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Drop == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceCapability(([]Capability)(x.Drop), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "add": + if r.TryDecodeAsNil() { + x.Add = nil + } else { + yyv4 := &x.Add + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv4), d) + } + } + case "drop": + if r.TryDecodeAsNil() { + x.Drop = nil + } else { + yyv6 := &x.Drop + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Add = nil + } else { + yyv9 := &x.Add + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Drop = nil + } else { + yyv11 := &x.Drop + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceCapability((*[]Capability)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Limits) != 0 + yyq2[1] = len(x.Requests) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Limits == nil { + r.EncodeNil() + } else { + x.Limits.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limits")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Limits == nil { + r.EncodeNil() + } else { + x.Limits.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Requests == nil { + r.EncodeNil() + } else { + x.Requests.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requests")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Requests == nil { + r.EncodeNil() + } else { + x.Requests.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "limits": + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv4 := &x.Limits + yyv4.CodecDecodeSelf(d) + } + case "requests": + if r.TryDecodeAsNil() { + x.Requests = nil + } else { + yyv5 := &x.Requests + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv7 := &x.Limits + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Requests = nil + } else { + yyv8 := &x.Requests + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [20]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Image != "" + yyq2[2] = len(x.Command) != 0 + yyq2[3] = len(x.Args) != 0 + yyq2[4] = x.WorkingDir != "" + yyq2[5] = len(x.Ports) != 0 + yyq2[6] = len(x.EnvFrom) != 0 + yyq2[7] = len(x.Env) != 0 + yyq2[8] = true + yyq2[9] = len(x.VolumeMounts) != 0 + yyq2[10] = x.LivenessProbe != nil + yyq2[11] = x.ReadinessProbe != nil + yyq2[12] = x.Lifecycle != nil + yyq2[13] = x.TerminationMessagePath != "" + yyq2[14] = x.TerminationMessagePolicy != "" + yyq2[15] = x.ImagePullPolicy != "" + yyq2[16] = x.SecurityContext != nil + yyq2[17] = x.Stdin != false + yyq2[18] = x.StdinOnce != false + yyq2[19] = x.TTY != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(20) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Command == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Args == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Args, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("args")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Args == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Args, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("workingDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("envFrom")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.EnvFrom == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceEnvFromSource(([]EnvFromSource)(x.EnvFrom), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Env == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceEnvVar(([]EnvVar)(x.Env), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("env")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Env == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceEnvVar(([]EnvVar)(x.Env), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yy28 := &x.Resources + yy28.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy30 := &x.Resources + yy30.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.VolumeMounts == nil { + r.EncodeNil() + } else { + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeMounts == nil { + r.EncodeNil() + } else { + yym34 := z.EncBinary() + _ = yym34 + if false { + } else { + h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.LivenessProbe == nil { + r.EncodeNil() + } else { + x.LivenessProbe.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LivenessProbe == nil { + r.EncodeNil() + } else { + x.LivenessProbe.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + if x.ReadinessProbe == nil { + r.EncodeNil() + } else { + x.ReadinessProbe.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadinessProbe == nil { + r.EncodeNil() + } else { + x.ReadinessProbe.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + if x.Lifecycle == nil { + r.EncodeNil() + } else { + x.Lifecycle.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Lifecycle == nil { + r.EncodeNil() + } else { + x.Lifecycle.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.TerminationMessagePolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + x.ImagePullPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ImagePullPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("securityContext")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + yym57 := z.EncBinary() + _ = yym57 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + yym60 := z.EncBinary() + _ = yym60 + if false { + } else { + r.EncodeBool(bool(x.StdinOnce)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeBool(bool(x.StdinOnce)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + yym63 := z.EncBinary() + _ = yym63 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "image": + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv6 := &x.Image + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv8 := &x.Command + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "args": + if r.TryDecodeAsNil() { + x.Args = nil + } else { + yyv10 := &x.Args + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "workingDir": + if r.TryDecodeAsNil() { + x.WorkingDir = "" + } else { + yyv12 := &x.WorkingDir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv14 := &x.Ports + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv14), d) + } + } + case "envFrom": + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv16 := &x.EnvFrom + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv16), d) + } + } + case "env": + if r.TryDecodeAsNil() { + x.Env = nil + } else { + yyv18 := &x.Env + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + h.decSliceEnvVar((*[]EnvVar)(yyv18), d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv20 := &x.Resources + yyv20.CodecDecodeSelf(d) + } + case "volumeMounts": + if r.TryDecodeAsNil() { + x.VolumeMounts = nil + } else { + yyv21 := &x.VolumeMounts + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + h.decSliceVolumeMount((*[]VolumeMount)(yyv21), d) + } + } + case "livenessProbe": + if r.TryDecodeAsNil() { + if x.LivenessProbe != nil { + x.LivenessProbe = nil + } + } else { + if x.LivenessProbe == nil { + x.LivenessProbe = new(Probe) + } + x.LivenessProbe.CodecDecodeSelf(d) + } + case "readinessProbe": + if r.TryDecodeAsNil() { + if x.ReadinessProbe != nil { + x.ReadinessProbe = nil + } + } else { + if x.ReadinessProbe == nil { + x.ReadinessProbe = new(Probe) + } + x.ReadinessProbe.CodecDecodeSelf(d) + } + case "lifecycle": + if r.TryDecodeAsNil() { + if x.Lifecycle != nil { + x.Lifecycle = nil + } + } else { + if x.Lifecycle == nil { + x.Lifecycle = new(Lifecycle) + } + x.Lifecycle.CodecDecodeSelf(d) + } + case "terminationMessagePath": + if r.TryDecodeAsNil() { + x.TerminationMessagePath = "" + } else { + yyv26 := &x.TerminationMessagePath + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + case "terminationMessagePolicy": + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv28 := &x.TerminationMessagePolicy + yyv28.CodecDecodeSelf(d) + } + case "imagePullPolicy": + if r.TryDecodeAsNil() { + x.ImagePullPolicy = "" + } else { + yyv29 := &x.ImagePullPolicy + yyv29.CodecDecodeSelf(d) + } + case "securityContext": + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(SecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv31 := &x.Stdin + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + case "stdinOnce": + if r.TryDecodeAsNil() { + x.StdinOnce = false + } else { + yyv33 := &x.StdinOnce + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv35 := &x.TTY + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*bool)(yyv35)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj37 int + var yyb37 bool + var yyhl37 bool = l >= 0 + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv38 := &x.Name + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + *((*string)(yyv38)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv40 := &x.Image + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv42 := &x.Command + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + z.F.DecSliceStringX(yyv42, false, d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Args = nil + } else { + yyv44 := &x.Args + yym45 := z.DecBinary() + _ = yym45 + if false { + } else { + z.F.DecSliceStringX(yyv44, false, d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.WorkingDir = "" + } else { + yyv46 := &x.WorkingDir + yym47 := z.DecBinary() + _ = yym47 + if false { + } else { + *((*string)(yyv46)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv48 := &x.Ports + yym49 := z.DecBinary() + _ = yym49 + if false { + } else { + h.decSliceContainerPort((*[]ContainerPort)(yyv48), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EnvFrom = nil + } else { + yyv50 := &x.EnvFrom + yym51 := z.DecBinary() + _ = yym51 + if false { + } else { + h.decSliceEnvFromSource((*[]EnvFromSource)(yyv50), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Env = nil + } else { + yyv52 := &x.Env + yym53 := z.DecBinary() + _ = yym53 + if false { + } else { + h.decSliceEnvVar((*[]EnvVar)(yyv52), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = ResourceRequirements{} + } else { + yyv54 := &x.Resources + yyv54.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeMounts = nil + } else { + yyv55 := &x.VolumeMounts + yym56 := z.DecBinary() + _ = yym56 + if false { + } else { + h.decSliceVolumeMount((*[]VolumeMount)(yyv55), d) + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LivenessProbe != nil { + x.LivenessProbe = nil + } + } else { + if x.LivenessProbe == nil { + x.LivenessProbe = new(Probe) + } + x.LivenessProbe.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadinessProbe != nil { + x.ReadinessProbe = nil + } + } else { + if x.ReadinessProbe == nil { + x.ReadinessProbe = new(Probe) + } + x.ReadinessProbe.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Lifecycle != nil { + x.Lifecycle = nil + } + } else { + if x.Lifecycle == nil { + x.Lifecycle = new(Lifecycle) + } + x.Lifecycle.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePath = "" + } else { + yyv60 := &x.TerminationMessagePath + yym61 := z.DecBinary() + _ = yym61 + if false { + } else { + *((*string)(yyv60)) = r.DecodeString() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TerminationMessagePolicy = "" + } else { + yyv62 := &x.TerminationMessagePolicy + yyv62.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullPolicy = "" + } else { + yyv63 := &x.ImagePullPolicy + yyv63.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(SecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv65 := &x.Stdin + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*bool)(yyv65)) = r.DecodeBool() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StdinOnce = false + } else { + yyv67 := &x.StdinOnce + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } + } + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv69 := &x.TTY + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + for { + yyj37++ + if yyhl37 { + yyb37 = yyj37 > l + } else { + yyb37 = r.CheckBreak() + } + if yyb37 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj37-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Exec != nil + yyq2[1] = x.HTTPGet != nil + yyq2[2] = x.TCPSocket != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Exec == nil { + r.EncodeNil() + } else { + x.Exec.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("httpGet")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTPGet == nil { + r.EncodeNil() + } else { + x.HTTPGet.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TCPSocket == nil { + r.EncodeNil() + } else { + x.TCPSocket.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exec": + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + case "httpGet": + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + case "tcpSocket": + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Exec != nil { + x.Exec = nil + } + } else { + if x.Exec == nil { + x.Exec = new(ExecAction) + } + x.Exec.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTPGet != nil { + x.HTTPGet = nil + } + } else { + if x.HTTPGet == nil { + x.HTTPGet = new(HTTPGetAction) + } + x.HTTPGet.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TCPSocket != nil { + x.TCPSocket = nil + } + } else { + if x.TCPSocket == nil { + x.TCPSocket = new(TCPSocketAction) + } + x.TCPSocket.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PostStart != nil + yyq2[1] = x.PreStop != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PostStart == nil { + r.EncodeNil() + } else { + x.PostStart.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("postStart")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PostStart == nil { + r.EncodeNil() + } else { + x.PostStart.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreStop == nil { + r.EncodeNil() + } else { + x.PreStop.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preStop")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreStop == nil { + r.EncodeNil() + } else { + x.PreStop.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "postStart": + if r.TryDecodeAsNil() { + if x.PostStart != nil { + x.PostStart = nil + } + } else { + if x.PostStart == nil { + x.PostStart = new(Handler) + } + x.PostStart.CodecDecodeSelf(d) + } + case "preStop": + if r.TryDecodeAsNil() { + if x.PreStop != nil { + x.PreStop = nil + } + } else { + if x.PreStop == nil { + x.PreStop = new(Handler) + } + x.PreStop.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PostStart != nil { + x.PostStart = nil + } + } else { + if x.PostStart == nil { + x.PostStart = new(Handler) + } + x.PostStart.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PreStop != nil { + x.PreStop = nil + } + } else { + if x.PreStop == nil { + x.PreStop = new(Handler) + } + x.PreStop.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Reason != "" + yyq2[1] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv4 := &x.Reason + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv11 := &x.Message + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.StartedAt + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if yym5 { + z.EncBinaryMarshal(yy4) + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.StartedAt + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if yym7 { + z.EncBinaryMarshal(yy6) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "startedAt": + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv4 := &x.StartedAt + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv7 := &x.StartedAt + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if yym8 { + z.DecBinaryUnmarshal(yyv7) + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Signal != 0 + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + yyq2[4] = true + yyq2[5] = true + yyq2[6] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ExitCode)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("exitCode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ExitCode)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Signal)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("signal")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Signal)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy16 := &x.StartedAt + yym17 := z.EncBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.EncExt(yy16) { + } else if yym17 { + z.EncBinaryMarshal(yy16) + } else if !yym17 && z.IsJSONHandle() { + z.EncJSONMarshal(yy16) + } else { + z.EncFallback(yy16) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.StartedAt + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(yy18) { + } else if yym19 { + z.EncBinaryMarshal(yy18) + } else if !yym19 && z.IsJSONHandle() { + z.EncJSONMarshal(yy18) + } else { + z.EncFallback(yy18) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yy21 := &x.FinishedAt + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(yy21) { + } else if yym22 { + z.EncBinaryMarshal(yy21) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(yy21) + } else { + z.EncFallback(yy21) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy23 := &x.FinishedAt + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.EncExt(yy23) { + } else if yym24 { + z.EncBinaryMarshal(yy23) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(yy23) + } else { + z.EncFallback(yy23) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "exitCode": + if r.TryDecodeAsNil() { + x.ExitCode = 0 + } else { + yyv4 := &x.ExitCode + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "signal": + if r.TryDecodeAsNil() { + x.Signal = 0 + } else { + yyv6 := &x.Signal + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "startedAt": + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv12 := &x.StartedAt + yym13 := z.DecBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.DecExt(yyv12) { + } else if yym13 { + z.DecBinaryUnmarshal(yyv12) + } else if !yym13 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv12) + } else { + z.DecFallback(yyv12, false) + } + } + case "finishedAt": + if r.TryDecodeAsNil() { + x.FinishedAt = pkg2_v1.Time{} + } else { + yyv14 := &x.FinishedAt + yym15 := z.DecBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.DecExt(yyv14) { + } else if yym15 { + z.DecBinaryUnmarshal(yyv14) + } else if !yym15 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv14) + } else { + z.DecFallback(yyv14, false) + } + } + case "containerID": + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExitCode = 0 + } else { + yyv19 := &x.ExitCode + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Signal = 0 + } else { + yyv21 := &x.Signal + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv23 := &x.Reason + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv25 := &x.Message + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StartedAt = pkg2_v1.Time{} + } else { + yyv27 := &x.StartedAt + yym28 := z.DecBinary() + _ = yym28 + if false { + } else if z.HasExtensions() && z.DecExt(yyv27) { + } else if yym28 { + z.DecBinaryUnmarshal(yyv27) + } else if !yym28 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv27) + } else { + z.DecFallback(yyv27, false) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FinishedAt = pkg2_v1.Time{} + } else { + yyv29 := &x.FinishedAt + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else if yym30 { + z.DecBinaryUnmarshal(yyv29) + } else if !yym30 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv29) + } else { + z.DecFallback(yyv29, false) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Waiting != nil + yyq2[1] = x.Running != nil + yyq2[2] = x.Terminated != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Waiting == nil { + r.EncodeNil() + } else { + x.Waiting.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("waiting")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Waiting == nil { + r.EncodeNil() + } else { + x.Waiting.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Running == nil { + r.EncodeNil() + } else { + x.Running.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("running")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Running == nil { + r.EncodeNil() + } else { + x.Running.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Terminated == nil { + r.EncodeNil() + } else { + x.Terminated.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Terminated == nil { + r.EncodeNil() + } else { + x.Terminated.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "waiting": + if r.TryDecodeAsNil() { + if x.Waiting != nil { + x.Waiting = nil + } + } else { + if x.Waiting == nil { + x.Waiting = new(ContainerStateWaiting) + } + x.Waiting.CodecDecodeSelf(d) + } + case "running": + if r.TryDecodeAsNil() { + if x.Running != nil { + x.Running = nil + } + } else { + if x.Running == nil { + x.Running = new(ContainerStateRunning) + } + x.Running.CodecDecodeSelf(d) + } + case "terminated": + if r.TryDecodeAsNil() { + if x.Terminated != nil { + x.Terminated = nil + } + } else { + if x.Terminated == nil { + x.Terminated = new(ContainerStateTerminated) + } + x.Terminated.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Waiting != nil { + x.Waiting = nil + } + } else { + if x.Waiting == nil { + x.Waiting = new(ContainerStateWaiting) + } + x.Waiting.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Running != nil { + x.Running = nil + } + } else { + if x.Running == nil { + x.Running = new(ContainerStateRunning) + } + x.Running.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Terminated != nil { + x.Terminated = nil + } + } else { + if x.Terminated == nil { + x.Terminated = new(ContainerStateTerminated) + } + x.Terminated.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = true + yyq2[7] = x.ContainerID != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.State + yy7.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("state")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.State + yy9.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy12 := &x.LastTerminationState + yy12.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastState")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.LastTerminationState + yy14.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Ready)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ready")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(x.Ready)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.RestartCount)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartCount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(x.RestartCount)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("image")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Image)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imageID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "state": + if r.TryDecodeAsNil() { + x.State = ContainerState{} + } else { + yyv6 := &x.State + yyv6.CodecDecodeSelf(d) + } + case "lastState": + if r.TryDecodeAsNil() { + x.LastTerminationState = ContainerState{} + } else { + yyv7 := &x.LastTerminationState + yyv7.CodecDecodeSelf(d) + } + case "ready": + if r.TryDecodeAsNil() { + x.Ready = false + } else { + yyv8 := &x.Ready + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "restartCount": + if r.TryDecodeAsNil() { + x.RestartCount = 0 + } else { + yyv10 := &x.RestartCount + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "image": + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv12 := &x.Image + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "imageID": + if r.TryDecodeAsNil() { + x.ImageID = "" + } else { + yyv14 := &x.ImageID + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "containerID": + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv16 := &x.ContainerID + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv19 := &x.Name + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.State = ContainerState{} + } else { + yyv21 := &x.State + yyv21.CodecDecodeSelf(d) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTerminationState = ContainerState{} + } else { + yyv22 := &x.LastTerminationState + yyv22.CodecDecodeSelf(d) + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ready = false + } else { + yyv23 := &x.Ready + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartCount = 0 + } else { + yyv25 := &x.RestartCount + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Image = "" + } else { + yyv27 := &x.Image + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImageID = "" + } else { + yyv29 := &x.ImageID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerID = "" + } else { + yyv31 := &x.ContainerID + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg2_v1.Time{} + } else { + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.NodeSelectorTerms == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelectorTerms == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "nodeSelectorTerms": + if r.TryDecodeAsNil() { + x.NodeSelectorTerms = nil + } else { + yyv4 := &x.NodeSelectorTerms + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelectorTerms = nil + } else { + yyv7 := &x.NodeSelectorTerms + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MatchExpressions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "matchExpressions": + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv4 := &x.MatchExpressions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MatchExpressions = nil + } else { + yyv7 := &x.MatchExpressions + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = len(x.Values) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Operator.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Values == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("values")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Values == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Values, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) + } + case "values": + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv7 := &x.Values + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + z.F.DecSliceStringX(yyv7, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv10 := &x.Key + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv12 := &x.Operator + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Values = nil + } else { + yyv13 := &x.Values + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.NodeAffinity != nil + yyq2[1] = x.PodAffinity != nil + yyq2[2] = x.PodAntiAffinity != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.NodeAffinity == nil { + r.EncodeNil() + } else { + x.NodeAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeAffinity == nil { + r.EncodeNil() + } else { + x.NodeAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAffinity == nil { + r.EncodeNil() + } else { + x.PodAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodAntiAffinity == nil { + r.EncodeNil() + } else { + x.PodAntiAffinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "nodeAffinity": + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + case "podAffinity": + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + case "podAntiAffinity": + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeAffinity != nil { + x.NodeAffinity = nil + } + } else { + if x.NodeAffinity == nil { + x.NodeAffinity = new(NodeAffinity) + } + x.NodeAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAffinity != nil { + x.PodAffinity = nil + } + } else { + if x.PodAffinity == nil { + x.PodAffinity = new(PodAffinity) + } + x.PodAffinity.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodAntiAffinity != nil { + x.PodAntiAffinity = nil + } + } else { + if x.PodAntiAffinity == nil { + x.PodAntiAffinity = new(PodAntiAffinity) + } + x.PodAntiAffinity.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) + } + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.PodAffinityTerm + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.PodAffinityTerm + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "podAffinityTerm": + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv6 := &x.PodAffinityTerm + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodAffinityTerm = PodAffinityTerm{} + } else { + yyv10 := &x.PodAffinityTerm + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.LabelSelector != nil + yyq2[1] = len(x.Namespaces) != 0 + yyq2[2] = x.TopologyKey != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LabelSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { + } else { + z.EncFallback(x.LabelSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaces")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Namespaces == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.Namespaces, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "labelSelector": + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + case "namespaces": + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv6 := &x.Namespaces + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "topologyKey": + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + yyv8 := &x.TopologyKey + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LabelSelector != nil { + x.LabelSelector = nil + } + } else { + if x.LabelSelector == nil { + x.LabelSelector = new(pkg2_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { + } else { + z.DecFallback(x.LabelSelector, false) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespaces = nil + } else { + yyv13 := &x.Namespaces + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TopologyKey = "" + } else { + yyv15 := &x.TopologyKey + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil + yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "requiredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + case "preferredDuringSchedulingIgnoredDuringExecution": + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = nil + } + } else { + if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { + x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) + } + x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferredDuringSchedulingIgnoredDuringExecution = nil + } else { + yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("weight")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Weight)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Preference + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Preference + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "weight": + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv4 := &x.Weight + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "preference": + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv6 := &x.Preference + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Weight = 0 + } else { + yyv8 := &x.Weight + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Preference = NodeSelectorTerm{} + } else { + yyv10 := &x.Preference + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Value != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Effect.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TimeAdded + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeAdded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TimeAdded + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv6 := &x.Value + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv8 := &x.Effect + yyv8.CodecDecodeSelf(d) + } + case "timeAdded": + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv9 := &x.TimeAdded + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv12 := &x.Key + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv14 := &x.Value + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv16 := &x.Effect + yyv16.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TimeAdded = pkg2_v1.Time{} + } else { + yyv17 := &x.TimeAdded + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Key != "" + yyq2[1] = x.Operator != "" + yyq2[2] = x.Value != "" + yyq2[3] = x.Effect != "" + yyq2[4] = x.TolerationSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("key")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Key)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Operator.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operator")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Operator.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Value)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Effect.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("effect")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Effect.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TolerationSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerationSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TolerationSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TolerationSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "operator": + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv6 := &x.Operator + yyv6.CodecDecodeSelf(d) + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv7 := &x.Value + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "effect": + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv9 := &x.Effect + yyv9.CodecDecodeSelf(d) + } + case "tolerationSeconds": + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv13 := &x.Key + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Operator = "" + } else { + yyv15 := &x.Operator + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv16 := &x.Value + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Effect = "" + } else { + yyv18 := &x.Effect + yyv18.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TolerationSeconds != nil { + x.TolerationSeconds = nil + } + } else { + if x.TolerationSeconds == nil { + x.TolerationSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.TolerationSeconds)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [22]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Volumes) != 0 + yyq2[1] = len(x.InitContainers) != 0 + yyq2[3] = x.RestartPolicy != "" + yyq2[4] = x.TerminationGracePeriodSeconds != nil + yyq2[5] = x.ActiveDeadlineSeconds != nil + yyq2[6] = x.DNSPolicy != "" + yyq2[7] = len(x.NodeSelector) != 0 + yyq2[8] = x.ServiceAccountName != "" + yyq2[9] = x.DeprecatedServiceAccount != "" + yyq2[10] = x.AutomountServiceAccountToken != nil + yyq2[11] = x.NodeName != "" + yyq2[12] = x.HostNetwork != false + yyq2[13] = x.HostPID != false + yyq2[14] = x.HostIPC != false + yyq2[15] = x.SecurityContext != nil + yyq2[16] = len(x.ImagePullSecrets) != 0 + yyq2[17] = x.Hostname != "" + yyq2[18] = x.Subdomain != "" + yyq2[19] = x.Affinity != nil + yyq2[20] = x.SchedulerName != "" + yyq2[21] = len(x.Tolerations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(22) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Volumes == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceVolume(([]Volume)(x.Volumes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Volumes == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceVolume(([]Volume)(x.Volumes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainers == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceContainer(([]Container)(x.InitContainers), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Containers == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceContainer(([]Container)(x.Containers), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Containers == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceContainer(([]Container)(x.Containers), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.RestartPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.RestartPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.TerminationGracePeriodSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TerminationGracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy18 := *x.TerminationGracePeriodSeconds + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy21 := *x.ActiveDeadlineSeconds + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy23 := *x.ActiveDeadlineSeconds + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeInt(int64(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + x.DNSPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.DNSPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeSelector == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + z.F.EncMapStringStringV(x.NodeSelector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy38 := *x.AutomountServiceAccountToken + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeBool(bool(yy38)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy40 := *x.AutomountServiceAccountToken + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeBool(bool(yy40)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[11] { + yym43 := z.EncBinary() + _ = yym43 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[11] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[12] { + yym46 := z.EncBinary() + _ = yym46 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[12] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym47 := z.EncBinary() + _ = yym47 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym50 := z.EncBinary() + _ = yym50 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[14] { + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[14] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym53 := z.EncBinary() + _ = yym53 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[15] { + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[15] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("securityContext")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SecurityContext == nil { + r.EncodeNil() + } else { + x.SecurityContext.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[16] { + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym58 := z.EncBinary() + _ = yym58 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[16] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym59 := z.EncBinary() + _ = yym59 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[17] { + yym61 := z.EncBinary() + _ = yym61 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[17] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym62 := z.EncBinary() + _ = yym62 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[18] { + yym64 := z.EncBinary() + _ = yym64 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[18] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subdomain")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym65 := z.EncBinary() + _ = yym65 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[19] { + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[19] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("affinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Affinity == nil { + r.EncodeNil() + } else { + x.Affinity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[20] { + yym70 := z.EncBinary() + _ = yym70 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[20] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym71 := z.EncBinary() + _ = yym71 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[21] { + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym73 := z.EncBinary() + _ = yym73 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[21] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tolerations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Tolerations == nil { + r.EncodeNil() + } else { + yym74 := z.EncBinary() + _ = yym74 + if false { + } else { + h.encSliceToleration(([]Toleration)(x.Tolerations), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv4 := &x.Volumes + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv4), d) + } + } + case "initContainers": + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv6 := &x.InitContainers + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv6), d) + } + } + case "containers": + if r.TryDecodeAsNil() { + x.Containers = nil + } else { + yyv8 := &x.Containers + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv8), d) + } + } + case "restartPolicy": + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv10 := &x.RestartPolicy + yyv10.CodecDecodeSelf(d) + } + case "terminationGracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "dnsPolicy": + if r.TryDecodeAsNil() { + x.DNSPolicy = "" + } else { + yyv15 := &x.DNSPolicy + yyv15.CodecDecodeSelf(d) + } + case "nodeSelector": + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv16 := &x.NodeSelector + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecMapStringStringX(yyv16, false, d) + } + } + case "serviceAccountName": + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv18 := &x.ServiceAccountName + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "serviceAccount": + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv20 := &x.DeprecatedServiceAccount + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv24 := &x.NodeName + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv26 := &x.HostNetwork + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv28 := &x.HostPID + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*bool)(yyv28)) = r.DecodeBool() + } + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv30 := &x.HostIPC + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + case "securityContext": + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(PodSecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + case "imagePullSecrets": + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv33 := &x.ImagePullSecrets + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv33), d) + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv35 := &x.Hostname + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + case "subdomain": + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv37 := &x.Subdomain + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + case "affinity": + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + case "schedulerName": + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv40 := &x.SchedulerName + yym41 := z.DecBinary() + _ = yym41 + if false { + } else { + *((*string)(yyv40)) = r.DecodeString() + } + } + case "tolerations": + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv42 := &x.Tolerations + yym43 := z.DecBinary() + _ = yym43 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv42), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj44 int + var yyb44 bool + var yyhl44 bool = l >= 0 + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv45 := &x.Volumes + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + h.decSliceVolume((*[]Volume)(yyv45), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainers = nil + } else { + yyv47 := &x.InitContainers + yym48 := z.DecBinary() + _ = yym48 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv47), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Containers = nil + } else { + yyv49 := &x.Containers + yym50 := z.DecBinary() + _ = yym50 + if false { + } else { + h.decSliceContainer((*[]Container)(yyv49), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RestartPolicy = "" + } else { + yyv51 := &x.RestartPolicy + yyv51.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TerminationGracePeriodSeconds != nil { + x.TerminationGracePeriodSeconds = nil + } + } else { + if x.TerminationGracePeriodSeconds == nil { + x.TerminationGracePeriodSeconds = new(int64) + } + yym53 := z.DecBinary() + _ = yym53 + if false { + } else { + *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym55 := z.DecBinary() + _ = yym55 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DNSPolicy = "" + } else { + yyv56 := &x.DNSPolicy + yyv56.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeSelector = nil + } else { + yyv57 := &x.NodeSelector + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + z.F.DecMapStringStringX(yyv57, false, d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceAccountName = "" + } else { + yyv59 := &x.ServiceAccountName + yym60 := z.DecBinary() + _ = yym60 + if false { + } else { + *((*string)(yyv59)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DeprecatedServiceAccount = "" + } else { + yyv61 := &x.DeprecatedServiceAccount + yym62 := z.DecBinary() + _ = yym62 + if false { + } else { + *((*string)(yyv61)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym64 := z.DecBinary() + _ = yym64 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeName = "" + } else { + yyv65 := &x.NodeName + yym66 := z.DecBinary() + _ = yym66 + if false { + } else { + *((*string)(yyv65)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv67 := &x.HostNetwork + yym68 := z.DecBinary() + _ = yym68 + if false { + } else { + *((*bool)(yyv67)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv69 := &x.HostPID + yym70 := z.DecBinary() + _ = yym70 + if false { + } else { + *((*bool)(yyv69)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv71 := &x.HostIPC + yym72 := z.DecBinary() + _ = yym72 + if false { + } else { + *((*bool)(yyv71)) = r.DecodeBool() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SecurityContext != nil { + x.SecurityContext = nil + } + } else { + if x.SecurityContext == nil { + x.SecurityContext = new(PodSecurityContext) + } + x.SecurityContext.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv74 := &x.ImagePullSecrets + yym75 := z.DecBinary() + _ = yym75 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv74), d) + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv76 := &x.Hostname + yym77 := z.DecBinary() + _ = yym77 + if false { + } else { + *((*string)(yyv76)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subdomain = "" + } else { + yyv78 := &x.Subdomain + yym79 := z.DecBinary() + _ = yym79 + if false { + } else { + *((*string)(yyv78)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Affinity != nil { + x.Affinity = nil + } + } else { + if x.Affinity == nil { + x.Affinity = new(Affinity) + } + x.Affinity.CodecDecodeSelf(d) + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SchedulerName = "" + } else { + yyv81 := &x.SchedulerName + yym82 := z.DecBinary() + _ = yym82 + if false { + } else { + *((*string)(yyv81)) = r.DecodeString() + } + } + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Tolerations = nil + } else { + yyv83 := &x.Tolerations + yym84 := z.DecBinary() + _ = yym84 + if false { + } else { + h.decSliceToleration((*[]Toleration)(yyv83), d) + } + } + for { + yyj44++ + if yyhl44 { + yyb44 = yyj44 > l + } else { + yyb44 = r.CheckBreak() + } + if yyb44 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj44-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.SELinuxOptions != nil + yyq2[1] = x.RunAsUser != nil + yyq2[2] = x.RunAsNonRoot != nil + yyq2[3] = len(x.SupplementalGroups) != 0 + yyq2[4] = x.FSGroup != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy7 := *x.RunAsUser + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy9 := *x.RunAsUser + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy12 := *x.RunAsNonRoot + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(yy12)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy14 := *x.RunAsNonRoot + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeBool(bool(yy14)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.SupplementalGroups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceInt64V(x.SupplementalGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SupplementalGroups == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncSliceInt64V(x.SupplementalGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.FSGroup == nil { + r.EncodeNil() + } else { + yy20 := *x.FSGroup + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FSGroup == nil { + r.EncodeNil() + } else { + yy22 := *x.FSGroup + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + case "runAsNonRoot": + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = nil + } else { + yyv9 := &x.SupplementalGroups + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceInt64X(yyv9, false, d) + } + } + case "fsGroup": + if r.TryDecodeAsNil() { + if x.FSGroup != nil { + x.FSGroup = nil + } + } else { + if x.FSGroup == nil { + x.FSGroup = new(int64) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = nil + } else { + yyv19 := &x.SupplementalGroups + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceInt64X(yyv19, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FSGroup != nil { + x.FSGroup = nil + } + } else { + if x.FSGroup == nil { + x.FSGroup = new(int64) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x PodQOSClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *PodQOSClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + yyq2[1] = len(x.Conditions) != 0 + yyq2[2] = x.Message != "" + yyq2[3] = x.Reason != "" + yyq2[4] = x.HostIP != "" + yyq2[5] = x.PodIP != "" + yyq2[6] = x.StartTime != nil + yyq2[7] = len(x.InitContainerStatuses) != 0 + yyq2[8] = len(x.ContainerStatuses) != 0 + yyq2[9] = x.QOSClass != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym22 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym23 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym23 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("initContainerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.InitContainerStatuses == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.InitContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ContainerStatuses == nil { + r.EncodeNil() + } else { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ContainerStatuses == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + x.QOSClass.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("qosClass")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.QOSClass.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv5 := &x.Conditions + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSlicePodCondition((*[]PodCondition)(yyv5), d) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv9 := &x.Reason + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + case "hostIP": + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv11 := &x.HostIP + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "podIP": + if r.TryDecodeAsNil() { + x.PodIP = "" + } else { + yyv13 := &x.PodIP + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg2_v1.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym16 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "initContainerStatuses": + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv17 := &x.InitContainerStatuses + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv17), d) + } + } + case "containerStatuses": + if r.TryDecodeAsNil() { + x.ContainerStatuses = nil + } else { + yyv19 := &x.ContainerStatuses + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv19), d) + } + } + case "qosClass": + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv21 := &x.QOSClass + yyv21.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv23 := &x.Phase + yyv23.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv24 := &x.Conditions + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + h.decSlicePodCondition((*[]PodCondition)(yyv24), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv26 := &x.Message + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv28 := &x.Reason + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIP = "" + } else { + yyv30 := &x.HostIP + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodIP = "" + } else { + yyv32 := &x.PodIP + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg2_v1.Time) + } + yym35 := z.DecBinary() + _ = yym35 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym35 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym35 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InitContainerStatuses = nil + } else { + yyv36 := &x.InitContainerStatuses + yym37 := z.DecBinary() + _ = yym37 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv36), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerStatuses = nil + } else { + yyv38 := &x.ContainerStatuses + yym39 := z.DecBinary() + _ = yym39 + if false { + } else { + h.decSliceContainerStatus((*[]ContainerStatus)(yyv38), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.QOSClass = "" + } else { + yyv40 := &x.QOSClass + yyv40.CodecDecodeSelf(d) + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Status + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Status + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv10 := &x.Status + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv18 := &x.Status + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePod(([]Pod)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePod(([]Pod)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePod((*[]Pod)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePod((*[]Pod)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = PodTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodTemplate((*[]PodTemplate)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodTemplate((*[]PodTemplate)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = len(x.Selector) != 0 + yyq2[3] = x.Template != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Template == nil { + r.EncodeNil() + } else { + x.Template.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Template == nil { + r.EncodeNil() + } else { + x.Template.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv8 := &x.Selector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecMapStringStringX(yyv8, false, d) + } + } + case "template": + if r.TryDecodeAsNil() { + if x.Template != nil { + x.Template = nil + } + } else { + if x.Template == nil { + x.Template = new(PodTemplateSpec) + } + x.Template.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv16 := &x.Selector + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + z.F.DecMapStringStringX(yyv16, false, d) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Template != nil { + x.Template = nil + } + } else { + if x.Template == nil { + x.Template = new(PodTemplateSpec) + } + x.Template.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceReplicationControllerCondition(([]ReplicationControllerCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "fullyLabeledReplicas": + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decSliceReplicationControllerCondition((*[]ReplicationControllerCondition)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ReplicationControllerConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ReplicationControllerConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ReplicationControllerCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ReplicationControllerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ReplicationControllerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ReplicationControllerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ReplicationControllerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceReplicationController(([]ReplicationController)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceReplicationController(([]ReplicationController)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceReplicationController((*[]ReplicationController)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceReplicationController((*[]ReplicationController)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "loadBalancer": + if r.TryDecodeAsNil() { + x.LoadBalancer = LoadBalancerStatus{} + } else { + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancer = LoadBalancerStatus{} + } else { + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ingress": + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv4 := &x.Ingress + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv7 := &x.Ingress + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.IP != "" + yyq2[1] = x.Hostname != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv9 := &x.IP + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv11 := &x.Hostname + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.ClusterIP != "" + yyq2[3] = x.Type != "" + yyq2[4] = len(x.ExternalIPs) != 0 + yyq2[5] = len(x.DeprecatedPublicIPs) != 0 + yyq2[6] = x.SessionAffinity != "" + yyq2[7] = x.LoadBalancerIP != "" + yyq2[8] = len(x.LoadBalancerSourceRanges) != 0 + yyq2[9] = x.ExternalName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceServicePort(([]ServicePort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ExternalIPs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.ExternalIPs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ExternalIPs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.ExternalIPs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.DeprecatedPublicIPs == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deprecatedPublicIPs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeprecatedPublicIPs == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + x.SessionAffinity.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.SessionAffinity.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LoadBalancerSourceRanges == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceServicePort((*[]ServicePort)(yyv4), d) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "clusterIP": + if r.TryDecodeAsNil() { + x.ClusterIP = "" + } else { + yyv8 := &x.ClusterIP + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv10 := &x.Type + yyv10.CodecDecodeSelf(d) + } + case "externalIPs": + if r.TryDecodeAsNil() { + x.ExternalIPs = nil + } else { + yyv11 := &x.ExternalIPs + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + z.F.DecSliceStringX(yyv11, false, d) + } + } + case "deprecatedPublicIPs": + if r.TryDecodeAsNil() { + x.DeprecatedPublicIPs = nil + } else { + yyv13 := &x.DeprecatedPublicIPs + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecSliceStringX(yyv13, false, d) + } + } + case "sessionAffinity": + if r.TryDecodeAsNil() { + x.SessionAffinity = "" + } else { + yyv15 := &x.SessionAffinity + yyv15.CodecDecodeSelf(d) + } + case "loadBalancerIP": + if r.TryDecodeAsNil() { + x.LoadBalancerIP = "" + } else { + yyv16 := &x.LoadBalancerIP + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "loadBalancerSourceRanges": + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv18 := &x.LoadBalancerSourceRanges + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + z.F.DecSliceStringX(yyv18, false, d) + } + } + case "externalName": + if r.TryDecodeAsNil() { + x.ExternalName = "" + } else { + yyv20 := &x.ExternalName + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj22 int + var yyb22 bool + var yyhl22 bool = l >= 0 + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv23 := &x.Ports + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceServicePort((*[]ServicePort)(yyv23), d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv25 := &x.Selector + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecMapStringStringX(yyv25, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ClusterIP = "" + } else { + yyv27 := &x.ClusterIP + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv29 := &x.Type + yyv29.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalIPs = nil + } else { + yyv30 := &x.ExternalIPs + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + z.F.DecSliceStringX(yyv30, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DeprecatedPublicIPs = nil + } else { + yyv32 := &x.DeprecatedPublicIPs + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + z.F.DecSliceStringX(yyv32, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SessionAffinity = "" + } else { + yyv34 := &x.SessionAffinity + yyv34.CodecDecodeSelf(d) + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerIP = "" + } else { + yyv35 := &x.LoadBalancerIP + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancerSourceRanges = nil + } else { + yyv37 := &x.LoadBalancerSourceRanges + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + z.F.DecSliceStringX(yyv37, false, d) + } + } + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalName = "" + } else { + yyv39 := &x.ExternalName + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + for { + yyj22++ + if yyhl22 { + yyb22 = yyj22 > l + } else { + yyb22 = r.CheckBreak() + } + if yyb22 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj22-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[1] = x.Protocol != "" + yyq2[3] = true + yyq2[4] = x.NodePort != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.TargetPort + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetPort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.TargetPort + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(x.NodePort)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.NodePort)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv6 := &x.Protocol + yyv6.CodecDecodeSelf(d) + } + case "port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "targetPort": + if r.TryDecodeAsNil() { + x.TargetPort = pkg4_intstr.IntOrString{} + } else { + yyv9 := &x.TargetPort + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + case "nodePort": + if r.TryDecodeAsNil() { + x.NodePort = 0 + } else { + yyv11 := &x.NodePort + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv14 := &x.Name + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv16 := &x.Protocol + yyv16.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv17 := &x.Port + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetPort = pkg4_intstr.IntOrString{} + } else { + yyv19 := &x.TargetPort + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodePort = 0 + } else { + yyv21 := &x.NodePort + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ServiceSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ServiceStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ServiceSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ServiceStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceService(([]Service)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceService(([]Service)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceService((*[]Service)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceService((*[]Service)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Secrets) != 0 + yyq2[4] = len(x.ImagePullSecrets) != 0 + yyq2[5] = x.AutomountServiceAccountToken != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Secrets == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Secrets == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ImagePullSecrets == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy21 := *x.AutomountServiceAccountToken + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(yy21)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("automountServiceAccountToken")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AutomountServiceAccountToken == nil { + r.EncodeNil() + } else { + yy23 := *x.AutomountServiceAccountToken + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeBool(bool(yy23)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "secrets": + if r.TryDecodeAsNil() { + x.Secrets = nil + } else { + yyv10 := &x.Secrets + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceObjectReference((*[]ObjectReference)(yyv10), d) + } + } + case "imagePullSecrets": + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv12 := &x.ImagePullSecrets + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv12), d) + } + } + case "automountServiceAccountToken": + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv17 := &x.Kind + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv19 := &x.APIVersion + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv21 := &x.ObjectMeta + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(yyv21) { + } else { + z.DecFallback(yyv21, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Secrets = nil + } else { + yyv23 := &x.Secrets + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceObjectReference((*[]ObjectReference)(yyv23), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ImagePullSecrets = nil + } else { + yyv25 := &x.ImagePullSecrets + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv25), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.AutomountServiceAccountToken != nil { + x.AutomountServiceAccountToken = nil + } + } else { + if x.AutomountServiceAccountToken == nil { + x.AutomountServiceAccountToken = new(bool) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(x.AutomountServiceAccountToken)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceServiceAccount((*[]ServiceAccount)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceServiceAccount((*[]ServiceAccount)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subsets == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subsets")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subsets == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subsets": + if r.TryDecodeAsNil() { + x.Subsets = nil + } else { + yyv10 := &x.Subsets + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subsets = nil + } else { + yyv19 := &x.Subsets + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEndpointSubset((*[]EndpointSubset)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Addresses) != 0 + yyq2[1] = len(x.NotReadyAddresses) != 0 + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Addresses == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("addresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Addresses == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NotReadyAddresses == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NotReadyAddresses == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "addresses": + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv4 := &x.Addresses + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) + } + } + case "notReadyAddresses": + if r.TryDecodeAsNil() { + x.NotReadyAddresses = nil + } else { + yyv6 := &x.NotReadyAddresses + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv11 := &x.Addresses + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NotReadyAddresses = nil + } else { + yyv13 := &x.NotReadyAddresses + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Hostname != "" + yyq2[2] = x.NodeName != nil + yyq2[3] = x.TargetRef != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ip")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.IP)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostname")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.NodeName == nil { + r.EncodeNil() + } else { + yy10 := *x.NodeName + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NodeName == nil { + r.EncodeNil() + } else { + yy12 := *x.NodeName + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.TargetRef == nil { + r.EncodeNil() + } else { + x.TargetRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetRef == nil { + r.EncodeNil() + } else { + x.TargetRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ip": + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv4 := &x.IP + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "hostname": + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv6 := &x.Hostname + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "nodeName": + if r.TryDecodeAsNil() { + if x.NodeName != nil { + x.NodeName = nil + } + } else { + if x.NodeName == nil { + x.NodeName = new(string) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(x.NodeName)) = r.DecodeString() + } + } + case "targetRef": + if r.TryDecodeAsNil() { + if x.TargetRef != nil { + x.TargetRef = nil + } + } else { + if x.TargetRef == nil { + x.TargetRef = new(ObjectReference) + } + x.TargetRef.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.IP = "" + } else { + yyv12 := &x.IP + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hostname = "" + } else { + yyv14 := &x.Hostname + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NodeName != nil { + x.NodeName = nil + } + } else { + if x.NodeName == nil { + x.NodeName = new(string) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(x.NodeName)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetRef != nil { + x.TargetRef = nil + } + } else { + if x.TargetRef == nil { + x.TargetRef = new(ObjectReference) + } + x.TargetRef.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + yyq2[2] = x.Protocol != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.Protocol.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Protocol.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv6 := &x.Port + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "protocol": + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv8 := &x.Protocol + yyv8.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv12 := &x.Port + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Protocol = "" + } else { + yyv14 := &x.Protocol + yyv14.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEndpoints(([]Endpoints)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEndpoints(([]Endpoints)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEndpoints((*[]Endpoints)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEndpoints((*[]Endpoints)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodCIDR != "" + yyq2[1] = x.ExternalID != "" + yyq2[2] = x.ProviderID != "" + yyq2[3] = x.Unschedulable != false + yyq2[4] = len(x.Taints) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("externalID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("providerID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Unschedulable)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Unschedulable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Taints == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("taints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Taints == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceTaint(([]Taint)(x.Taints), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podCIDR": + if r.TryDecodeAsNil() { + x.PodCIDR = "" + } else { + yyv4 := &x.PodCIDR + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "externalID": + if r.TryDecodeAsNil() { + x.ExternalID = "" + } else { + yyv6 := &x.ExternalID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "providerID": + if r.TryDecodeAsNil() { + x.ProviderID = "" + } else { + yyv8 := &x.ProviderID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "unschedulable": + if r.TryDecodeAsNil() { + x.Unschedulable = false + } else { + yyv10 := &x.Unschedulable + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "taints": + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv12 := &x.Taints + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodCIDR = "" + } else { + yyv15 := &x.PodCIDR + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExternalID = "" + } else { + yyv17 := &x.ExternalID + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ProviderID = "" + } else { + yyv19 := &x.ProviderID + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Unschedulable = false + } else { + yyv21 := &x.Unschedulable + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Taints = nil + } else { + yyv23 := &x.Taints + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceTaint((*[]Taint)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("Port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Port)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "Port": + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv4 := &x.Port + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Port = 0 + } else { + yyv7 := &x.Port + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.KubeletEndpoint + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.KubeletEndpoint + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kubeletEndpoint": + if r.TryDecodeAsNil() { + x.KubeletEndpoint = DaemonEndpoint{} + } else { + yyv4 := &x.KubeletEndpoint + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeletEndpoint = DaemonEndpoint{} + } else { + yyv6 := &x.KubeletEndpoint + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 10 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("machineID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("bootID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("osImage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("architecture")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "machineID": + if r.TryDecodeAsNil() { + x.MachineID = "" + } else { + yyv4 := &x.MachineID + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "systemUUID": + if r.TryDecodeAsNil() { + x.SystemUUID = "" + } else { + yyv6 := &x.SystemUUID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "bootID": + if r.TryDecodeAsNil() { + x.BootID = "" + } else { + yyv8 := &x.BootID + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "kernelVersion": + if r.TryDecodeAsNil() { + x.KernelVersion = "" + } else { + yyv10 := &x.KernelVersion + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "osImage": + if r.TryDecodeAsNil() { + x.OSImage = "" + } else { + yyv12 := &x.OSImage + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "containerRuntimeVersion": + if r.TryDecodeAsNil() { + x.ContainerRuntimeVersion = "" + } else { + yyv14 := &x.ContainerRuntimeVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "kubeletVersion": + if r.TryDecodeAsNil() { + x.KubeletVersion = "" + } else { + yyv16 := &x.KubeletVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "kubeProxyVersion": + if r.TryDecodeAsNil() { + x.KubeProxyVersion = "" + } else { + yyv18 := &x.KubeProxyVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + case "operatingSystem": + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + yyv20 := &x.OperatingSystem + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*string)(yyv20)) = r.DecodeString() + } + } + case "architecture": + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + yyv22 := &x.Architecture + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MachineID = "" + } else { + yyv25 := &x.MachineID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SystemUUID = "" + } else { + yyv27 := &x.SystemUUID + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.BootID = "" + } else { + yyv29 := &x.BootID + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KernelVersion = "" + } else { + yyv31 := &x.KernelVersion + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OSImage = "" + } else { + yyv33 := &x.OSImage + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ContainerRuntimeVersion = "" + } else { + yyv35 := &x.ContainerRuntimeVersion + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*string)(yyv35)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeletVersion = "" + } else { + yyv37 := &x.KubeletVersion + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*string)(yyv37)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.KubeProxyVersion = "" + } else { + yyv39 := &x.KubeProxyVersion + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*string)(yyv39)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.OperatingSystem = "" + } else { + yyv41 := &x.OperatingSystem + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*string)(yyv41)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Architecture = "" + } else { + yyv43 := &x.Architecture + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Capacity) != 0 + yyq2[1] = len(x.Allocatable) != 0 + yyq2[2] = x.Phase != "" + yyq2[3] = len(x.Conditions) != 0 + yyq2[4] = len(x.Addresses) != 0 + yyq2[5] = true + yyq2[6] = true + yyq2[7] = len(x.Images) != 0 + yyq2[8] = len(x.VolumesInUse) != 0 + yyq2[9] = len(x.VolumesAttached) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capacity")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capacity == nil { + r.EncodeNil() + } else { + x.Capacity.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Allocatable == nil { + r.EncodeNil() + } else { + x.Allocatable.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allocatable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Allocatable == nil { + r.EncodeNil() + } else { + x.Allocatable.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Addresses == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("addresses")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Addresses == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yy19 := &x.DaemonEndpoints + yy19.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy21 := &x.DaemonEndpoints + yy21.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yy24 := &x.NodeInfo + yy24.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy26 := &x.NodeInfo + yy26.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.Images == nil { + r.EncodeNil() + } else { + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("images")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Images == nil { + r.EncodeNil() + } else { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + h.encSliceContainerImage(([]ContainerImage)(x.Images), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.VolumesInUse == nil { + r.EncodeNil() + } else { + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumesInUse == nil { + r.EncodeNil() + } else { + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.VolumesAttached == nil { + r.EncodeNil() + } else { + yym35 := z.EncBinary() + _ = yym35 + if false { + } else { + h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumesAttached == nil { + r.EncodeNil() + } else { + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capacity": + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv4 := &x.Capacity + yyv4.CodecDecodeSelf(d) + } + case "allocatable": + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv5 := &x.Allocatable + yyv5.CodecDecodeSelf(d) + } + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv7 := &x.Conditions + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) + } + } + case "addresses": + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv9 := &x.Addresses + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) + } + } + case "daemonEndpoints": + if r.TryDecodeAsNil() { + x.DaemonEndpoints = NodeDaemonEndpoints{} + } else { + yyv11 := &x.DaemonEndpoints + yyv11.CodecDecodeSelf(d) + } + case "nodeInfo": + if r.TryDecodeAsNil() { + x.NodeInfo = NodeSystemInfo{} + } else { + yyv12 := &x.NodeInfo + yyv12.CodecDecodeSelf(d) + } + case "images": + if r.TryDecodeAsNil() { + x.Images = nil + } else { + yyv13 := &x.Images + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) + } + } + case "volumesInUse": + if r.TryDecodeAsNil() { + x.VolumesInUse = nil + } else { + yyv15 := &x.VolumesInUse + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) + } + } + case "volumesAttached": + if r.TryDecodeAsNil() { + x.VolumesAttached = nil + } else { + yyv17 := &x.VolumesAttached + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Capacity = nil + } else { + yyv20 := &x.Capacity + yyv20.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allocatable = nil + } else { + yyv21 := &x.Allocatable + yyv21.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv22 := &x.Phase + yyv22.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv23 := &x.Conditions + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Addresses = nil + } else { + yyv25 := &x.Addresses + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DaemonEndpoints = NodeDaemonEndpoints{} + } else { + yyv27 := &x.DaemonEndpoints + yyv27.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NodeInfo = NodeSystemInfo{} + } else { + yyv28 := &x.NodeInfo + yyv28.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Images = nil + } else { + yyv29 := &x.Images + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumesInUse = nil + } else { + yyv31 := &x.VolumesInUse + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumesAttached = nil + } else { + yyv33 := &x.VolumesAttached + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Name.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Name.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("devicePath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "devicePath": + if r.TryDecodeAsNil() { + x.DevicePath = "" + } else { + yyv5 := &x.DevicePath + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DevicePath = "" + } else { + yyv9 := &x.DevicePath + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *AvoidPods) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.PreferAvoidPods) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PreferAvoidPods == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preferAvoidPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PreferAvoidPods == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicePreferAvoidPodsEntry(([]PreferAvoidPodsEntry)(x.PreferAvoidPods), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *AvoidPods) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *AvoidPods) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "preferAvoidPods": + if r.TryDecodeAsNil() { + x.PreferAvoidPods = nil + } else { + yyv4 := &x.PreferAvoidPods + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *AvoidPods) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PreferAvoidPods = nil + } else { + yyv7 := &x.PreferAvoidPods + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSlicePreferAvoidPodsEntry((*[]PreferAvoidPodsEntry)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PreferAvoidPodsEntry) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = true + yyq2[2] = x.Reason != "" + yyq2[3] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.PodSignature + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSignature")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.PodSignature + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.EvictionTime + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if yym10 { + z.EncBinaryMarshal(yy9) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evictionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.EvictionTime + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(yy11) { + } else if yym12 { + z.EncBinaryMarshal(yy11) + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(yy11) + } else { + z.EncFallback(yy11) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PreferAvoidPodsEntry) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PreferAvoidPodsEntry) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSignature": + if r.TryDecodeAsNil() { + x.PodSignature = PodSignature{} + } else { + yyv4 := &x.PodSignature + yyv4.CodecDecodeSelf(d) + } + case "evictionTime": + if r.TryDecodeAsNil() { + x.EvictionTime = pkg2_v1.Time{} + } else { + yyv5 := &x.EvictionTime + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(yyv5) { + } else if yym6 { + z.DecBinaryUnmarshal(yyv5) + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv5) + } else { + z.DecFallback(yyv5, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv7 := &x.Reason + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv9 := &x.Message + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PreferAvoidPodsEntry) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodSignature = PodSignature{} + } else { + yyv12 := &x.PodSignature + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvictionTime = pkg2_v1.Time{} + } else { + yyv13 := &x.EvictionTime + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if yym14 { + z.DecBinaryUnmarshal(yyv13) + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv15 := &x.Reason + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv17 := &x.Message + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSignature) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodController != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PodController == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podController")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodController == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodController) { + } else { + z.EncFallback(x.PodController) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSignature) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSignature) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podController": + if r.TryDecodeAsNil() { + if x.PodController != nil { + x.PodController = nil + } + } else { + if x.PodController == nil { + x.PodController = new(pkg2_v1.OwnerReference) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSignature) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodController != nil { + x.PodController = nil + } + } else { + if x.PodController == nil { + x.PodController = new(pkg2_v1.OwnerReference) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodController) { + } else { + z.DecFallback(x.PodController, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SizeBytes != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Names == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Names, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("names")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Names == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Names, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.SizeBytes)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.SizeBytes)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "names": + if r.TryDecodeAsNil() { + x.Names = nil + } else { + yyv4 := &x.Names + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "sizeBytes": + if r.TryDecodeAsNil() { + x.SizeBytes = 0 + } else { + yyv6 := &x.SizeBytes + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Names = nil + } else { + yyv9 := &x.Names + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceStringX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SizeBytes = 0 + } else { + yyv11 := &x.SizeBytes + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastHeartbeatTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastHeartbeatTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastHeartbeatTime": + if r.TryDecodeAsNil() { + x.LastHeartbeatTime = pkg2_v1.Time{} + } else { + yyv6 := &x.LastHeartbeatTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastHeartbeatTime = pkg2_v1.Time{} + } else { + yyv17 := &x.LastHeartbeatTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg2_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Address)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("address")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Address)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "address": + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv5 := &x.Address + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv8 := &x.Type + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Address = "" + } else { + yyv9 := &x.Address + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encResourceList((ResourceList)(x), e) + } + } +} + +func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decResourceList((*ResourceList)(x), d) + } +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NodeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = NodeStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NodeSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = NodeStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNode(([]Node)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNode(([]Node)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNode((*[]Node)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNode((*[]Node)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Finalizers) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("finalizers")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Finalizers == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "finalizers": + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv4 := &x.Finalizers + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Finalizers = nil + } else { + yyv7 := &x.Finalizers + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Phase != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Phase.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("phase")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Phase.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "phase": + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv4 := &x.Phase + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Phase = "" + } else { + yyv6 := &x.Phase + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NamespaceSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = NamespaceStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NamespaceSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = NamespaceStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNamespace(([]Namespace)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNamespace(([]Namespace)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNamespace((*[]Namespace)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNamespace((*[]Namespace)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Target + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Target + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "target": + if r.TryDecodeAsNil() { + x.Target = ObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = ObjectReference{} + } else { + yyv18 := &x.Target + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.UID != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.UID == nil { + r.EncodeNil() + } else { + yy4 := *x.UID + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UID == nil { + r.EncodeNil() + } else { + yy6 := *x.UID + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "uid": + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.UID != nil { + x.UID = nil + } + } else { + if x.UID == nil { + x.UID = new(pkg1_types.UID) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.UID) { + } else { + *((*string)(x.UID)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeletionPropagation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeletionPropagation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.GracePeriodSeconds != nil + yyq2[3] = x.Preconditions != nil + yyq2[4] = x.OrphanDependents != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy10 := *x.GracePeriodSeconds + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(yy10)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.GracePeriodSeconds == nil { + r.EncodeNil() + } else { + yy12 := *x.GracePeriodSeconds + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(yy12)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("preconditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Preconditions == nil { + r.EncodeNil() + } else { + x.Preconditions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy18 := *x.OrphanDependents + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.OrphanDependents == nil { + r.EncodeNil() + } else { + yy20 := *x.OrphanDependents + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy23 := *x.PropagationPolicy + yy23.CodecEncodeSelf(e) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("PropagationPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PropagationPolicy == nil { + r.EncodeNil() + } else { + yy25 := *x.PropagationPolicy + yy25.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "gracePeriodSeconds": + if r.TryDecodeAsNil() { + if x.GracePeriodSeconds != nil { + x.GracePeriodSeconds = nil + } + } else { + if x.GracePeriodSeconds == nil { + x.GracePeriodSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + case "preconditions": + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + case "orphanDependents": + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + case "PropagationPolicy": + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.GracePeriodSeconds != nil { + x.GracePeriodSeconds = nil + } + } else { + if x.GracePeriodSeconds == nil { + x.GracePeriodSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Preconditions != nil { + x.Preconditions = nil + } + } else { + if x.Preconditions == nil { + x.Preconditions = new(Preconditions) + } + x.Preconditions.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.OrphanDependents != nil { + x.OrphanDependents = nil + } + } else { + if x.OrphanDependents == nil { + x.OrphanDependents = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.OrphanDependents)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PropagationPolicy != nil { + x.PropagationPolicy = nil + } + } else { + if x.PropagationPolicy == nil { + x.PropagationPolicy = new(DeletionPropagation) + } + x.PropagationPolicy.CodecDecodeSelf(d) + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.LabelSelector != "" + yyq2[3] = x.FieldSelector != "" + yyq2[4] = x.Watch != false + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.TimeoutSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Watch)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("watch")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Watch)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.TimeoutSeconds == nil { + r.EncodeNil() + } else { + yy22 := *x.TimeoutSeconds + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(yy22)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TimeoutSeconds == nil { + r.EncodeNil() + } else { + yy24 := *x.TimeoutSeconds + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(yy24)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "labelSelector": + if r.TryDecodeAsNil() { + x.LabelSelector = "" + } else { + yyv8 := &x.LabelSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "fieldSelector": + if r.TryDecodeAsNil() { + x.FieldSelector = "" + } else { + yyv10 := &x.FieldSelector + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "watch": + if r.TryDecodeAsNil() { + x.Watch = false + } else { + yyv12 := &x.Watch + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "timeoutSeconds": + if r.TryDecodeAsNil() { + if x.TimeoutSeconds != nil { + x.TimeoutSeconds = nil + } + } else { + if x.TimeoutSeconds == nil { + x.TimeoutSeconds = new(int64) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LabelSelector = "" + } else { + yyv23 := &x.LabelSelector + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldSelector = "" + } else { + yyv25 := &x.FieldSelector + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Watch = false + } else { + yyv27 := &x.Watch + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TimeoutSeconds != nil { + x.TimeoutSeconds = nil + } + } else { + if x.TimeoutSeconds == nil { + x.TimeoutSeconds = new(int64) + } + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [10]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Container != "" + yyq2[3] = x.Follow != false + yyq2[4] = x.Previous != false + yyq2[5] = x.SinceSeconds != nil + yyq2[6] = x.SinceTime != nil + yyq2[7] = x.Timestamps != false + yyq2[8] = x.TailLines != nil + yyq2[9] = x.LimitBytes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(10) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("follow")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Follow)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Previous)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("previous")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Previous)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.SinceSeconds == nil { + r.EncodeNil() + } else { + yy19 := *x.SinceSeconds + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(yy19)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SinceSeconds == nil { + r.EncodeNil() + } else { + yy21 := *x.SinceSeconds + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(yy21)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.SinceTime == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.EncExt(x.SinceTime) { + } else if yym24 { + z.EncBinaryMarshal(x.SinceTime) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.SinceTime) + } else { + z.EncFallback(x.SinceTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("sinceTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SinceTime == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else if z.HasExtensions() && z.EncExt(x.SinceTime) { + } else if yym25 { + z.EncBinaryMarshal(x.SinceTime) + } else if !yym25 && z.IsJSONHandle() { + z.EncJSONMarshal(x.SinceTime) + } else { + z.EncFallback(x.SinceTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeBool(bool(x.Timestamps)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("timestamps")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Timestamps)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.TailLines == nil { + r.EncodeNil() + } else { + yy30 := *x.TailLines + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(yy30)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tailLines")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TailLines == nil { + r.EncodeNil() + } else { + yy32 := *x.TailLines + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(yy32)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + if x.LimitBytes == nil { + r.EncodeNil() + } else { + yy35 := *x.LimitBytes + yym36 := z.EncBinary() + _ = yym36 + if false { + } else { + r.EncodeInt(int64(yy35)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limitBytes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LimitBytes == nil { + r.EncodeNil() + } else { + yy37 := *x.LimitBytes + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeInt(int64(yy37)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv8 := &x.Container + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "follow": + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv10 := &x.Follow + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "previous": + if r.TryDecodeAsNil() { + x.Previous = false + } else { + yyv12 := &x.Previous + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "sinceSeconds": + if r.TryDecodeAsNil() { + if x.SinceSeconds != nil { + x.SinceSeconds = nil + } + } else { + if x.SinceSeconds == nil { + x.SinceSeconds = new(int64) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) + } + } + case "sinceTime": + if r.TryDecodeAsNil() { + if x.SinceTime != nil { + x.SinceTime = nil + } + } else { + if x.SinceTime == nil { + x.SinceTime = new(pkg2_v1.Time) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.SinceTime) { + } else if yym17 { + z.DecBinaryUnmarshal(x.SinceTime) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.SinceTime) + } else { + z.DecFallback(x.SinceTime, false) + } + } + case "timestamps": + if r.TryDecodeAsNil() { + x.Timestamps = false + } else { + yyv18 := &x.Timestamps + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + case "tailLines": + if r.TryDecodeAsNil() { + if x.TailLines != nil { + x.TailLines = nil + } + } else { + if x.TailLines == nil { + x.TailLines = new(int64) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) + } + } + case "limitBytes": + if r.TryDecodeAsNil() { + if x.LimitBytes != nil { + x.LimitBytes = nil + } + } else { + if x.LimitBytes == nil { + x.LimitBytes = new(int64) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv29 := &x.Container + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Follow = false + } else { + yyv31 := &x.Follow + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Previous = false + } else { + yyv33 := &x.Previous + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*bool)(yyv33)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SinceSeconds != nil { + x.SinceSeconds = nil + } + } else { + if x.SinceSeconds == nil { + x.SinceSeconds = new(int64) + } + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SinceTime != nil { + x.SinceTime = nil + } + } else { + if x.SinceTime == nil { + x.SinceTime = new(pkg2_v1.Time) + } + yym38 := z.DecBinary() + _ = yym38 + if false { + } else if z.HasExtensions() && z.DecExt(x.SinceTime) { + } else if yym38 { + z.DecBinaryUnmarshal(x.SinceTime) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.SinceTime) + } else { + z.DecFallback(x.SinceTime, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Timestamps = false + } else { + yyv39 := &x.Timestamps + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TailLines != nil { + x.TailLines = nil + } + } else { + if x.TailLines == nil { + x.TailLines = new(int64) + } + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LimitBytes != nil { + x.LimitBytes = nil + } + } else { + if x.LimitBytes == nil { + x.LimitBytes = new(int64) + } + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stderr")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "stdout": + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "stderr": + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv21 := &x.APIVersion + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv23 := &x.Stdin + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv25 := &x.Stdout + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv27 := &x.Stderr + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv29 := &x.TTY + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv31 := &x.Container + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Stdin != false + yyq2[3] = x.Stdout != false + yyq2[4] = x.Stderr != false + yyq2[5] = x.TTY != false + yyq2[6] = x.Container != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdin")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Stdin)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stdout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Stdout)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stderr")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Stderr)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tty")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.TTY)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("container")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Container)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("command")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Command == nil { + r.EncodeNil() + } else { + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + z.F.EncSliceStringV(x.Command, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "stdin": + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv8 := &x.Stdin + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "stdout": + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv10 := &x.Stdout + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "stderr": + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv12 := &x.Stderr + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "tty": + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv14 := &x.TTY + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "container": + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv16 := &x.Container + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + case "command": + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv18 := &x.Command + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + z.F.DecSliceStringX(yyv18, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv21 := &x.Kind + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv23 := &x.APIVersion + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdin = false + } else { + yyv25 := &x.Stdin + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stdout = false + } else { + yyv27 := &x.Stdout + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Stderr = false + } else { + yyv29 := &x.Stderr + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TTY = false + } else { + yyv31 := &x.TTY + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*bool)(yyv31)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Container = "" + } else { + yyv33 := &x.Container + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*string)(yyv33)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Command = nil + } else { + yyv35 := &x.Command + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + z.F.DecSliceStringX(yyv35, false, d) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodPortForwardOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = len(x.Ports) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceInt32V(x.Ports, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodPortForwardOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodPortForwardOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv8 := &x.Ports + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceInt32X(yyv8, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodPortForwardOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv15 := &x.Ports + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceInt32X(yyv15, false, d) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv13 := &x.APIVersion + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv15 := &x.Path + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.Namespace != "" + yyq2[2] = x.Name != "" + yyq2[3] = x.UID != "" + yyq2[4] = x.APIVersion != "" + yyq2[5] = x.ResourceVersion != "" + yyq2[6] = x.FieldPath != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.UID) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv6 := &x.Namespace + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "resourceVersion": + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv14 := &x.ResourceVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "fieldPath": + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv16 := &x.FieldPath + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv19 := &x.Kind + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv21 := &x.Namespace + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv23 := &x.Name + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv25 := &x.UID + yym26 := z.DecBinary() + _ = yym26 + if false { + } else if z.HasExtensions() && z.DecExt(yyv25) { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceVersion = "" + } else { + yyv29 := &x.ResourceVersion + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FieldPath = "" + } else { + yyv31 := &x.FieldPath + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.Reference + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reference")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.Reference + yy12.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "reference": + if r.TryDecodeAsNil() { + x.Reference = ObjectReference{} + } else { + yyv8 := &x.Reference + yyv8.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv10 := &x.Kind + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv12 := &x.APIVersion + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reference = ObjectReference{} + } else { + yyv14 := &x.Reference + yyv14.CodecDecodeSelf(d) + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Component != "" + yyq2[1] = x.Host != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Component)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("component")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Component)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "component": + if r.TryDecodeAsNil() { + x.Component = "" + } else { + yyv4 := &x.Component + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv6 := &x.Host + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Component = "" + } else { + yyv9 := &x.Component + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv11 := &x.Host + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [11]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + yyq2[6] = true + yyq2[7] = true + yyq2[8] = true + yyq2[9] = x.Count != 0 + yyq2[10] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(11) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.InvolvedObject + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.InvolvedObject + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yy26 := &x.Source + yy26.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("source")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy28 := &x.Source + yy28.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yy31 := &x.FirstTimestamp + yym32 := z.EncBinary() + _ = yym32 + if false { + } else if z.HasExtensions() && z.EncExt(yy31) { + } else if yym32 { + z.EncBinaryMarshal(yy31) + } else if !yym32 && z.IsJSONHandle() { + z.EncJSONMarshal(yy31) + } else { + z.EncFallback(yy31) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.FirstTimestamp + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(yy33) { + } else if yym34 { + z.EncBinaryMarshal(yy33) + } else if !yym34 && z.IsJSONHandle() { + z.EncJSONMarshal(yy33) + } else { + z.EncFallback(yy33) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yy36 := &x.LastTimestamp + yym37 := z.EncBinary() + _ = yym37 + if false { + } else if z.HasExtensions() && z.EncExt(yy36) { + } else if yym37 { + z.EncBinaryMarshal(yy36) + } else if !yym37 && z.IsJSONHandle() { + z.EncJSONMarshal(yy36) + } else { + z.EncFallback(yy36) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.LastTimestamp + yym39 := z.EncBinary() + _ = yym39 + if false { + } else if z.HasExtensions() && z.EncExt(yy38) { + } else if yym39 { + z.EncBinaryMarshal(yy38) + } else if !yym39 && z.IsJSONHandle() { + z.EncJSONMarshal(yy38) + } else { + z.EncFallback(yy38) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[9] { + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[9] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("count")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym42 := z.EncBinary() + _ = yym42 + if false { + } else { + r.EncodeInt(int64(x.Count)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[10] { + yym44 := z.EncBinary() + _ = yym44 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[10] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym45 := z.EncBinary() + _ = yym45 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "involvedObject": + if r.TryDecodeAsNil() { + x.InvolvedObject = ObjectReference{} + } else { + yyv10 := &x.InvolvedObject + yyv10.CodecDecodeSelf(d) + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv11 := &x.Reason + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + case "source": + if r.TryDecodeAsNil() { + x.Source = EventSource{} + } else { + yyv15 := &x.Source + yyv15.CodecDecodeSelf(d) + } + case "firstTimestamp": + if r.TryDecodeAsNil() { + x.FirstTimestamp = pkg2_v1.Time{} + } else { + yyv16 := &x.FirstTimestamp + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else if yym17 { + z.DecBinaryUnmarshal(yyv16) + } else if !yym17 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv16) + } else { + z.DecFallback(yyv16, false) + } + } + case "lastTimestamp": + if r.TryDecodeAsNil() { + x.LastTimestamp = pkg2_v1.Time{} + } else { + yyv18 := &x.LastTimestamp + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else if yym19 { + z.DecBinaryUnmarshal(yyv18) + } else if !yym19 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv18) + } else { + z.DecFallback(yyv18, false) + } + } + case "count": + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv20 := &x.Count + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(yyv20)) = int32(r.DecodeInt(32)) + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv22 := &x.Type + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*string)(yyv22)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj24 int + var yyb24 bool + var yyhl24 bool = l >= 0 + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv25 := &x.Kind + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv27 := &x.APIVersion + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv29 := &x.ObjectMeta + yym30 := z.DecBinary() + _ = yym30 + if false { + } else if z.HasExtensions() && z.DecExt(yyv29) { + } else { + z.DecFallback(yyv29, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.InvolvedObject = ObjectReference{} + } else { + yyv31 := &x.InvolvedObject + yyv31.CodecDecodeSelf(d) + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv32 := &x.Reason + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*string)(yyv32)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv34 := &x.Message + yym35 := z.DecBinary() + _ = yym35 + if false { + } else { + *((*string)(yyv34)) = r.DecodeString() + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Source = EventSource{} + } else { + yyv36 := &x.Source + yyv36.CodecDecodeSelf(d) + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FirstTimestamp = pkg2_v1.Time{} + } else { + yyv37 := &x.FirstTimestamp + yym38 := z.DecBinary() + _ = yym38 + if false { + } else if z.HasExtensions() && z.DecExt(yyv37) { + } else if yym38 { + z.DecBinaryUnmarshal(yyv37) + } else if !yym38 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv37) + } else { + z.DecFallback(yyv37, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTimestamp = pkg2_v1.Time{} + } else { + yyv39 := &x.LastTimestamp + yym40 := z.DecBinary() + _ = yym40 + if false { + } else if z.HasExtensions() && z.DecExt(yyv39) { + } else if yym40 { + z.DecBinaryUnmarshal(yyv39) + } else if !yym40 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv39) + } else { + z.DecFallback(yyv39, false) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Count = 0 + } else { + yyv41 := &x.Count + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*int32)(yyv41)) = int32(r.DecodeInt(32)) + } + } + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv43 := &x.Type + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*string)(yyv43)) = r.DecodeString() + } + } + for { + yyj24++ + if yyhl24 { + yyb24 = yyj24 > l + } else { + yyb24 = r.CheckBreak() + } + if yyb24 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj24-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceEvent(([]Event)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceEvent(([]Event)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceEvent((*[]Event)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceEvent((*[]Event)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = len(x.Max) != 0 + yyq2[2] = len(x.Min) != 0 + yyq2[3] = len(x.Default) != 0 + yyq2[4] = len(x.DefaultRequest) != 0 + yyq2[5] = len(x.MaxLimitRequestRatio) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Max == nil { + r.EncodeNil() + } else { + x.Max.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Max == nil { + r.EncodeNil() + } else { + x.Max.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Min == nil { + r.EncodeNil() + } else { + x.Min.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Min == nil { + r.EncodeNil() + } else { + x.Min.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Default == nil { + r.EncodeNil() + } else { + x.Default.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("default")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Default == nil { + r.EncodeNil() + } else { + x.Default.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.DefaultRequest == nil { + r.EncodeNil() + } else { + x.DefaultRequest.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultRequest == nil { + r.EncodeNil() + } else { + x.DefaultRequest.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.MaxLimitRequestRatio == nil { + r.EncodeNil() + } else { + x.MaxLimitRequestRatio.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxLimitRequestRatio == nil { + r.EncodeNil() + } else { + x.MaxLimitRequestRatio.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "max": + if r.TryDecodeAsNil() { + x.Max = nil + } else { + yyv5 := &x.Max + yyv5.CodecDecodeSelf(d) + } + case "min": + if r.TryDecodeAsNil() { + x.Min = nil + } else { + yyv6 := &x.Min + yyv6.CodecDecodeSelf(d) + } + case "default": + if r.TryDecodeAsNil() { + x.Default = nil + } else { + yyv7 := &x.Default + yyv7.CodecDecodeSelf(d) + } + case "defaultRequest": + if r.TryDecodeAsNil() { + x.DefaultRequest = nil + } else { + yyv8 := &x.DefaultRequest + yyv8.CodecDecodeSelf(d) + } + case "maxLimitRequestRatio": + if r.TryDecodeAsNil() { + x.MaxLimitRequestRatio = nil + } else { + yyv9 := &x.MaxLimitRequestRatio + yyv9.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = nil + } else { + yyv12 := &x.Max + yyv12.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = nil + } else { + yyv13 := &x.Min + yyv13.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Default = nil + } else { + yyv14 := &x.Default + yyv14.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DefaultRequest = nil + } else { + yyv15 := &x.DefaultRequest + yyv15.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxLimitRequestRatio = nil + } else { + yyv16 := &x.MaxLimitRequestRatio + yyv16.CodecDecodeSelf(d) + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Limits == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("limits")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Limits == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "limits": + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv4 := &x.Limits + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Limits = nil + } else { + yyv7 := &x.Limits + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = LimitRangeSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = LimitRangeSpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceLimitRange(([]LimitRange)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceLimitRange(([]LimitRange)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceLimitRange((*[]LimitRange)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceLimitRange((*[]LimitRange)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Scopes) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Scopes == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scopes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Scopes == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hard": + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) + } + case "scopes": + if r.TryDecodeAsNil() { + x.Scopes = nil + } else { + yyv5 := &x.Scopes + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv8 := &x.Hard + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Scopes = nil + } else { + yyv9 := &x.Scopes + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hard) != 0 + yyq2[1] = len(x.Used) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hard")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hard == nil { + r.EncodeNil() + } else { + x.Hard.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Used == nil { + r.EncodeNil() + } else { + x.Used.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("used")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Used == nil { + r.EncodeNil() + } else { + x.Used.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hard": + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv4 := &x.Hard + yyv4.CodecDecodeSelf(d) + } + case "used": + if r.TryDecodeAsNil() { + x.Used = nil + } else { + yyv5 := &x.Used + yyv5.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hard = nil + } else { + yyv7 := &x.Hard + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Used = nil + } else { + yyv8 := &x.Used + yyv8.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ResourceQuotaSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ResourceQuotaStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ResourceQuotaSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ResourceQuotaStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceResourceQuota((*[]ResourceQuota)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceResourceQuota((*[]ResourceQuota)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + yyq2[4] = len(x.StringData) != 0 + yyq2[5] = x.Type != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.StringData == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncMapStringStringV(x.StringData, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("stringData")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StringData == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncMapStringStringV(x.StringData, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringSliceuint8((*map[string][]uint8)(yyv10), d) + } + } + case "stringData": + if r.TryDecodeAsNil() { + x.StringData = nil + } else { + yyv12 := &x.StringData + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecMapStringStringX(yyv12, false, d) + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv14 := &x.Type + yyv14.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv16 := &x.Kind + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv18 := &x.APIVersion + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv20 := &x.ObjectMeta + yym21 := z.DecBinary() + _ = yym21 + if false { + } else if z.HasExtensions() && z.DecExt(yyv20) { + } else { + z.DecFallback(yyv20, false) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv22 := &x.Data + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + h.decMapstringSliceuint8((*map[string][]uint8)(yyv22), d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.StringData = nil + } else { + yyv24 := &x.StringData + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + z.F.DecMapStringStringX(yyv24, false, d) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv26 := &x.Type + yyv26.CodecDecodeSelf(d) + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSecret(([]Secret)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSecret(([]Secret)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSecret((*[]Secret)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceSecret((*[]Secret)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + z.F.EncMapStringStringV(x.Data, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Data, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecMapStringStringX(yyv19, false, d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceConfigMap(([]ConfigMap)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceConfigMap((*[]ConfigMap)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceConfigMap((*[]ConfigMap)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.Message != "" + yyq2[3] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Status.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Status.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("error")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "error": + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv8 := &x.Error + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv11 := &x.Type + yyv11.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv12 := &x.Status + yyv12.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv13 := &x.Message + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv15 := &x.Error + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv10 := &x.Conditions + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceComponentCondition((*[]ComponentCondition)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv19 := &x.Conditions + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceComponentCondition((*[]ComponentCondition)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceComponentStatus((*[]ComponentStatus)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceComponentStatus((*[]ComponentStatus)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + yyq2[1] = x.DefaultMode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy7 := *x.DefaultMode + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultMode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultMode == nil { + r.EncodeNil() + } else { + yy9 := *x.DefaultMode + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + case "defaultMode": + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv9 := &x.Items + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DefaultMode != nil { + x.DefaultMode = nil + } + } else { + if x.DefaultMode == nil { + x.DefaultMode = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.DefaultMode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FieldRef != nil + yyq2[2] = x.ResourceFieldRef != nil + yyq2[3] = x.Mode != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FieldRef == nil { + r.EncodeNil() + } else { + x.FieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceFieldRef == nil { + r.EncodeNil() + } else { + x.ResourceFieldRef.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Mode == nil { + r.EncodeNil() + } else { + yy13 := *x.Mode + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(yy13)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("mode")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Mode == nil { + r.EncodeNil() + } else { + yy15 := *x.Mode + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(yy15)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "fieldRef": + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + case "resourceFieldRef": + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + case "mode": + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv11 := &x.Path + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FieldRef != nil { + x.FieldRef = nil + } + } else { + if x.FieldRef == nil { + x.FieldRef = new(ObjectFieldSelector) + } + x.FieldRef.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceFieldRef != nil { + x.ResourceFieldRef = nil + } + } else { + if x.ResourceFieldRef == nil { + x.ResourceFieldRef = new(ResourceFieldSelector) + } + x.ResourceFieldRef.CodecDecodeSelf(d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Mode != nil { + x.Mode = nil + } + } else { + if x.Mode == nil { + x.Mode = new(int32) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(x.Mode)) = int32(r.DecodeInt(32)) + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DownwardAPIProjection) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Items) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DownwardAPIProjection) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DownwardAPIProjection) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Capabilities != nil + yyq2[1] = x.Privileged != nil + yyq2[2] = x.SELinuxOptions != nil + yyq2[3] = x.RunAsUser != nil + yyq2[4] = x.RunAsNonRoot != nil + yyq2[5] = x.ReadOnlyRootFilesystem != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Capabilities == nil { + r.EncodeNil() + } else { + x.Capabilities.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("capabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Capabilities == nil { + r.EncodeNil() + } else { + x.Capabilities.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Privileged == nil { + r.EncodeNil() + } else { + yy7 := *x.Privileged + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("privileged")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Privileged == nil { + r.EncodeNil() + } else { + yy9 := *x.Privileged + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy15 := *x.RunAsUser + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsUser == nil { + r.EncodeNil() + } else { + yy17 := *x.RunAsUser + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy20 := *x.RunAsNonRoot + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeBool(bool(yy20)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RunAsNonRoot == nil { + r.EncodeNil() + } else { + yy22 := *x.RunAsNonRoot + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.ReadOnlyRootFilesystem == nil { + r.EncodeNil() + } else { + yy25 := *x.ReadOnlyRootFilesystem + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ReadOnlyRootFilesystem == nil { + r.EncodeNil() + } else { + yy27 := *x.ReadOnlyRootFilesystem + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "capabilities": + if r.TryDecodeAsNil() { + if x.Capabilities != nil { + x.Capabilities = nil + } + } else { + if x.Capabilities == nil { + x.Capabilities = new(Capabilities) + } + x.Capabilities.CodecDecodeSelf(d) + } + case "privileged": + if r.TryDecodeAsNil() { + if x.Privileged != nil { + x.Privileged = nil + } + } else { + if x.Privileged == nil { + x.Privileged = new(bool) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*bool)(x.Privileged)) = r.DecodeBool() + } + } + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + case "runAsNonRoot": + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + if x.ReadOnlyRootFilesystem != nil { + x.ReadOnlyRootFilesystem = nil + } + } else { + if x.ReadOnlyRootFilesystem == nil { + x.ReadOnlyRootFilesystem = new(bool) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Capabilities != nil { + x.Capabilities = nil + } + } else { + if x.Capabilities == nil { + x.Capabilities = new(Capabilities) + } + x.Capabilities.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Privileged != nil { + x.Privileged = nil + } + } else { + if x.Privileged == nil { + x.Privileged = new(bool) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*bool)(x.Privileged)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsUser != nil { + x.RunAsUser = nil + } + } else { + if x.RunAsUser == nil { + x.RunAsUser = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RunAsNonRoot != nil { + x.RunAsNonRoot = nil + } + } else { + if x.RunAsNonRoot == nil { + x.RunAsNonRoot = new(bool) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ReadOnlyRootFilesystem != nil { + x.ReadOnlyRootFilesystem = nil + } + } else { + if x.ReadOnlyRootFilesystem == nil { + x.ReadOnlyRootFilesystem = new(bool) + } + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.User != "" + yyq2[1] = x.Role != "" + yyq2[2] = x.Type != "" + yyq2[3] = x.Level != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Role)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("role")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Role)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Type)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Level)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("level")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Level)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv4 := &x.User + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "role": + if r.TryDecodeAsNil() { + x.Role = "" + } else { + yyv6 := &x.Role + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv8 := &x.Type + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "level": + if r.TryDecodeAsNil() { + x.Level = "" + } else { + yyv10 := &x.Level + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv13 := &x.User + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Role = "" + } else { + yyv15 := &x.Role + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv17 := &x.Type + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Level = "" + } else { + yyv19 := &x.Level + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Range)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("range")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Range)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "range": + if r.TryDecodeAsNil() { + x.Range = "" + } else { + yyv10 := &x.Range + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv12 := &x.Data + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *yyv12 = r.DecodeBytes(*(*[]byte)(yyv12), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Range = "" + } else { + yyv21 := &x.Range + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv23 := &x.Data + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *yyv23 = r.DecodeBytes(*(*[]byte)(yyv23), false, false) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicev1_OwnerReference(v []pkg2_v1.OwnerReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else { + z.EncFallback(yy2) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_OwnerReference(v *[]pkg2_v1.OwnerReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) + } + } else { + yyv1 = make([]pkg2_v1.OwnerReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else { + z.DecFallback(yyv2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg2_v1.OwnerReference{}) // var yyz1 pkg2_v1.OwnerReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg2_v1.OwnerReference{} + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else { + z.DecFallback(yyv6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg2_v1.OwnerReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) + } + } else { + yyv1 = make([]PersistentVolumeAccessMode, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeAccessMode{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 528) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolume, yyrl1) + } + } else { + yyv1 = make([]PersistentVolume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]KeyToPath, yyrl1) + } + } else { + yyv1 = make([]KeyToPath, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, KeyToPath{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = KeyToPath{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyToPath{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeProjection(v []VolumeProjection, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeProjection(v *[]VolumeProjection, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + } else { + yyv1 = make([]VolumeProjection, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeProjection{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeProjection{}) // var yyz1 VolumeProjection + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeProjection{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeProjection{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HTTPHeader, yyrl1) + } + } else { + yyv1 = make([]HTTPHeader, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPHeader{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPHeader{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPHeader{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Capability, yyrl1) + } + } else { + yyv1 = make([]Capability, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 Capability + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Capability{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + } else { + yyv1 = make([]ContainerPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvFromSource(v []EnvFromSource, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvFromSource(v *[]EnvFromSource, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EnvFromSource, yyrl1) + } + } else { + yyv1 = make([]EnvFromSource, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvFromSource{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvFromSource{}) // var yyz1 EnvFromSource + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvFromSource{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvFromSource{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EnvVar, yyrl1) + } + } else { + yyv1 = make([]EnvVar, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EnvVar{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EnvVar{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EnvVar{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]VolumeMount, yyrl1) + } + } else { + yyv1 = make([]VolumeMount, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, VolumeMount{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = VolumeMount{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []VolumeMount{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeSelectorTerm, yyrl1) + } + } else { + yyv1 = make([]NodeSelectorTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeSelectorRequirement, yyrl1) + } + } else { + yyv1 = make([]NodeSelectorRequirement, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeSelectorRequirement{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeSelectorRequirement{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeSelectorRequirement{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]PodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + } else { + yyv1 = make([]WeightedPodAffinityTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = WeightedPodAffinityTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []WeightedPodAffinityTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PreferredSchedulingTerm, yyrl1) + } + } else { + yyv1 = make([]PreferredSchedulingTerm, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferredSchedulingTerm{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferredSchedulingTerm{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 224) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Volume, yyrl1) + } + } else { + yyv1 = make([]Volume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Volume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Volume{}) // var yyz1 Volume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Volume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Volume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Container, yyrl1) + } + } else { + yyv1 = make([]Container, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Container{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Container{}) // var yyz1 Container + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Container{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Container{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + } else { + yyv1 = make([]LocalObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LocalObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LocalObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LocalObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceToleration(v []Toleration, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceToleration(v *[]Toleration, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Toleration, yyrl1) + } + } else { + yyv1 = make([]Toleration, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Toleration{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Toleration{}) // var yyz1 Toleration + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Toleration{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Toleration{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodCondition, yyrl1) + } + } else { + yyv1 = make([]PodCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerStatus, yyrl1) + } + } else { + yyv1 = make([]ContainerStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 736) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Pod, yyrl1) + } + } else { + yyv1 = make([]Pod, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Pod{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Pod{}) // var yyz1 Pod + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Pod{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Pod{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 784) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodTemplate, yyrl1) + } + } else { + yyv1 = make([]PodTemplate, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodTemplate{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodTemplate{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodTemplate{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicationControllerCondition(v []ReplicationControllerCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicationControllerCondition(v *[]ReplicationControllerCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicationControllerCondition, yyrl1) + } + } else { + yyv1 = make([]ReplicationControllerCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationControllerCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationControllerCondition{}) // var yyz1 ReplicationControllerCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationControllerCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationControllerCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicationController, yyrl1) + } + } else { + yyv1 = make([]ReplicationController, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicationController{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicationController{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicationController{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LoadBalancerIngress, yyrl1) + } + } else { + yyv1 = make([]LoadBalancerIngress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LoadBalancerIngress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LoadBalancerIngress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LoadBalancerIngress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServicePort, yyrl1) + } + } else { + yyv1 = make([]ServicePort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServicePort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServicePort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServicePort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 464) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Service, yyrl1) + } + } else { + yyv1 = make([]Service, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Service{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Service{}) // var yyz1 Service + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Service{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Service{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ObjectReference, yyrl1) + } + } else { + yyv1 = make([]ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ServiceAccount, yyrl1) + } + } else { + yyv1 = make([]ServiceAccount, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ServiceAccount{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ServiceAccount{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ServiceAccount{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointSubset, yyrl1) + } + } else { + yyv1 = make([]EndpointSubset, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointSubset{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointSubset{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointSubset{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointAddress, yyrl1) + } + } else { + yyv1 = make([]EndpointAddress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointAddress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointAddress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointAddress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]EndpointPort, yyrl1) + } + } else { + yyv1 = make([]EndpointPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, EndpointPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = EndpointPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []EndpointPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Endpoints, yyrl1) + } + } else { + yyv1 = make([]Endpoints, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Endpoints{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Endpoints{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Endpoints{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceTaint(v []Taint, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceTaint(v *[]Taint, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Taint, yyrl1) + } + } else { + yyv1 = make([]Taint, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Taint{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Taint{}) // var yyz1 Taint + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Taint{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Taint{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeCondition, yyrl1) + } + } else { + yyv1 = make([]NodeCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NodeAddress, yyrl1) + } + } else { + yyv1 = make([]NodeAddress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NodeAddress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NodeAddress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NodeAddress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ContainerImage, yyrl1) + } + } else { + yyv1 = make([]ContainerImage, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ContainerImage{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ContainerImage{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ContainerImage{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]UniqueVolumeName, yyrl1) + } + } else { + yyv1 = make([]UniqueVolumeName, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []UniqueVolumeName{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]AttachedVolume, yyrl1) + } + } else { + yyv1 = make([]AttachedVolume, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, AttachedVolume{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = AttachedVolume{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []AttachedVolume{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePreferAvoidPodsEntry(v []PreferAvoidPodsEntry, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePreferAvoidPodsEntry(v *[]PreferAvoidPodsEntry, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) + } + } else { + yyv1 = make([]PreferAvoidPodsEntry, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PreferAvoidPodsEntry{}) // var yyz1 PreferAvoidPodsEntry + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PreferAvoidPodsEntry{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PreferAvoidPodsEntry{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yyk1.CodecEncodeSelf(e) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) + } else { + z.EncFallback(yy3) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) + yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) + *v = yyv1 + } + var yymk1 ResourceName + var yymv1 pkg3_resource.Quantity + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yyv2.CodecDecodeSelf(d) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg3_resource.Quantity{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg3_resource.Quantity{} + } else { + yyv3 := &yymv1 + yym4 := z.DecBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.DecExt(yyv3) { + } else if !yym4 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv3) + } else { + z.DecFallback(yyv3, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yyv5.CodecDecodeSelf(d) + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg3_resource.Quantity{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg3_resource.Quantity{} + } else { + yyv6 := &yymv1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 656) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Node, yyrl1) + } + } else { + yyv1 = make([]Node, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Node{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Node{}) // var yyz1 Node + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Node{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Node{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]FinalizerName, yyrl1) + } + } else { + yyv1 = make([]FinalizerName, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FinalizerName + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FinalizerName{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Namespace, yyrl1) + } + } else { + yyv1 = make([]Namespace, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Namespace{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Namespace{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Namespace{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 504) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Event, yyrl1) + } + } else { + yyv1 = make([]Event, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Event{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Event{}) // var yyz1 Event + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Event{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Event{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yym3 := z.EncBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.EncExt(yy2) { + } else if !yym3 && z.IsJSONHandle() { + z.EncJSONMarshal(yy2) + } else { + z.EncFallback(yy2) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) + } + } else { + yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else if z.HasExtensions() && z.DecExt(yyv2) { + } else if !yym3 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv2) + } else { + z.DecFallback(yyv2, false) + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg5_runtime.RawExtension{} + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg5_runtime.RawExtension{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LimitRangeItem, yyrl1) + } + } else { + yyv1 = make([]LimitRangeItem, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRangeItem{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRangeItem{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRangeItem{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]LimitRange, yyrl1) + } + } else { + yyv1 = make([]LimitRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, LimitRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = LimitRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []LimitRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ResourceQuotaScope, yyrl1) + } + } else { + yyv1 = make([]ResourceQuotaScope, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuotaScope{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 304) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ResourceQuota, yyrl1) + } + } else { + yyv1 = make([]ResourceQuota, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ResourceQuota{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ResourceQuota{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ResourceQuota{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yym3 := z.EncBinary() + _ = yym3 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) + } + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string][]uint8, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 []uint8 + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *yyv8 = r.DecodeBytes(*(*[]byte)(yyv8), false, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(v)) +} + +func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + *v = r.DecodeBytes(*((*[]byte)(v)), false, false) +} + +func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Secret, yyrl1) + } + } else { + yyv1 = make([]Secret, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Secret{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Secret{}) // var yyz1 Secret + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Secret{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Secret{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ConfigMap, yyrl1) + } + } else { + yyv1 = make([]ConfigMap, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ConfigMap{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ConfigMap{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ConfigMap{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ComponentCondition, yyrl1) + } + } else { + yyv1 = make([]ComponentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ComponentStatus, yyrl1) + } + } else { + yyv1 = make([]ComponentStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ComponentStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ComponentStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ComponentStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + } + } else { + yyv1 = make([]DownwardAPIVolumeFile, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DownwardAPIVolumeFile{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DownwardAPIVolumeFile{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types.go b/vendor/k8s.io/client-go/pkg/api/v1/types.go new file mode 100644 index 000000000..a75a1d0f0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types.go @@ -0,0 +1,4380 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// The comments for the structs and fields can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored and not exported to the SwaggerAPI. +// +// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh + +// Common string formats +// --------------------- +// Many fields in this API have formatting requirements. The commonly used +// formats are defined here. +// +// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" +// in the C language. This is captured by the following regex: +// [A-Za-z_][A-Za-z0-9_]* +// This defines the format, but not the length restriction, which should be +// specified at the definition of any field of this type. +// +// DNS_LABEL: This is a string, no more than 63 characters long, that conforms +// to the definition of a "label" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])? +// +// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms +// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured +// by the following regex: +// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* +// or more simply: +// DNS_LABEL(\.DNS_LABEL)* +// +// IANA_SVC_NAME: This is a string, no more than 15 characters long, that +// conforms to the definition of IANA service name in RFC 6335. +// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. +// Hypens ('-') cannot be leading or trailing character of the string +// and cannot be adjacent to other hyphens. + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. +// +k8s:openapi-gen=false +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field. Once set, + // this value may not be unset or be set further into the future, although it may be shortened + // or the resource may be deleted prior to this time. For example, a user may request that + // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination + // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard + // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the + // API. In the presence of network partitions, this object may still exist after this + // timestamp, until an administrator or automated process can determine the resource is + // fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // +optional + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" +) + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +type Volume struct { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` + // GitRepo represents a git repository at a particular revision. + // +optional + GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` + // Secret represents a secret that should populate this volume. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. This is an + // alpha feature and may change in future. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` +} + +const ( + // AlphaStorageClassAnnotation represents the previous alpha storage class + // annotation. It's currently still used and will be held for backwards + // compatibility + AlphaStorageClassAnnotation = "volume.alpha.kubernetes.io/storage-class" + + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" +) + +// +genclient=true +// +nonNamespaced=true + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: http://kubernetes.io/docs/user-guide/persistent-volumes +type PersistentVolume struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes + // +optional + Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeSpec is the specification of a persistent volume. +type PersistentVolumeSpec struct { + // A description of the persistent volume's resources and capacity. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // The actual volume backing the persistent volume. + PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` + // AccessModes contains all ways the volume can be mounted. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding + // +optional + ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default) and Recycle. + // Recycling must be supported by the volume plugin underlying this persistent volume. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeStatus is the current status of a persistent volume. +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase + // +optional + Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` +} + +// PersistentVolumeList is a list of PersistentVolume items. +type PersistentVolumeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of persistent volumes. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes + Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + // +optional + Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +type PersistentVolumeClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // A list of persistent volume claims. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims + Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // AccessModes contains the desired access modes the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // A label query over volumes to consider for binding. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + // Resources represents the minimum resources the volume should have. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` + // Name of the StorageClass required by the claim. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // Represents the actual resources of the underlying volume. + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // Path of the directory on the host. + // More info: http://kubernetes.io/docs/user-guide/volumes#hostpath + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // A collection of Ceph monitors. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +type CinderVolumeSource struct { + // volume id used to identify the volume in cinder + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Optional: User is the rados user name, default is admin + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"` + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"` +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"` + + // Volume is a string that references an already created Quobyte volume by name. + Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"` + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"` + + // Group to map volume access to + // Default is no group + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +type GitRepoVolumeSource struct { + // Repository URL + Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` + // Commit hash for the specified revision. + // +optional + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // More info: http://kubernetes.io/docs/user-guide/volumes#secrets + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` + // Specify whether the Secret or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + SecretVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + Server string `json:"server" protobuf:"bytes,1,opt,name=server"` + + // Path that is exported by the NFS server. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: http://kubernetes.io/docs/user-guide/volumes#nfs + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI target lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: http://kubernetes.io/docs/user-guide/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Required: FC target worldwide names (WWNs) + TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"` + // Required: FC target lun number + Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` + // Share Name + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +type AzureDataDiskCachingMode string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"` + // The URI the data disk in the blob storage + DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the Protection Domain for the configured storage (defaults to "default"). + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The Storage Pool associated with the protection domain (defaults to "default"). + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + ConfigMapVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` + // information about the configMap data to project + ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` +} + +// ContainerPort represents a network port in a single container. +type ContainerPort struct { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` + // What host IP to bind the external port to. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // This must match the Name of a Volume. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Name of the environment variable. Must be a C_IDENTIFIER. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: no more than one of the following may be specified. + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +// EnvVarSource represents a source for the value of an EnvVar. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.podIP. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` + // Selects a key of a secret in the pod's namespace + // +optional + SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // Path of the field to select in the specified API version. + FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Required: resource to select + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key to select. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key of the secret to select from. Must be a valid secret key. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The header field value + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Path to access on the HTTP server. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` + // Number of seconds after the container has started before liveness probes are initiated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Adds and removes POSIX capabilities from running containers. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` + // Removed capabilities + // +optional + Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: http://kubernetes.io/docs/user-guide/compute-resources/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// A single application container that you want to run within a pod. +type Container struct { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: http://kubernetes.io/docs/user-guide/images + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // Security options the pod should run with. + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. + // More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details + // +optional + PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ContainerStateWaiting is a waiting state of a container. +type ContainerStateWaiting struct { + // (brief) reason the container is not yet running. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` + // Message regarding why the container is not yet running. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// ContainerStateRunning is a running state of a container. +type ContainerStateRunning struct { + // Time at which the container was last (re-)started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` +} + +// ContainerStateTerminated is a terminated state of a container. +type ContainerStateTerminated struct { + // Exit status from the last termination of the container + ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` + // Signal from the last termination of the container + // +optional + Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` + // (brief) reason from the last termination of the container + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Message regarding the last termination of the container + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // Time at which previous execution of the container started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` + // Time at which the container last terminated + // +optional + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` + // Container's ID in the format 'docker://<container_id>' + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // Details about a waiting container + // +optional + Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` + // Details about a running container + // +optional + Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` + // Details about a terminated container + // +optional + Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` +} + +// ContainerStatus contains details for the current status of this container. +type ContainerStatus struct { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Details about the container's current condition. + // +optional + State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` + // Details about the container's last termination condition. + // +optional + LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` + // Specifies whether the container has passed its readiness probe. + Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` + // The image the container is running. + // More info: http://kubernetes.io/docs/user-guide/images + // TODO(dchen1107): Which image the container is running with? + Image string `json:"image" protobuf:"bytes,6,opt,name=image"` + // ImageID of the container's image. + ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` + // Container's ID in the format 'docker://<container_id>'. + // More info: http://kubernetes.io/docs/user-guide/container-environment#container-information + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +// PodConditionType is a valid value for PodCondition.Type +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +// PodCondition contains details for the current condition of this pod. +type PodCondition struct { + // Type is the type of the condition. + // Currently only Ready. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + DefaultTerminationGracePeriodSeconds = 30 +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` +} + +// A null or empty node selector term matches no objects. +type NodeSelectorTerm struct { + //Required. A list of node selector requirements. The requirements are ANDed. + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"` +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key <topologyKey> tches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" + // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); + // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. + // +optional + TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` +} + +// The node this Taint is attached to has the effect "effect" on +// any pod that that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Required. The taint value corresponding to the taint key. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple <key,value,effect> using the matching operator <operator>. +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"` +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +const ( + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + // This key is only recognized by version >= 1.4. + PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded Containers + // for init containers. The annotation will be placed into the internal type and cleared. + // This key is recognized by version >= 1.3. For version 1.4 code, this key + // will have its value copied to the beta key. + PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is only recognized by version >= 1.4. + PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is recognized by version >= 1.3. For version 1.4 code, + // this key will have its value copied to the beta key. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" +) + +// PodSpec is a description of a pod. +type PodSpec struct { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: http://kubernetes.io/docs/user-guide/volumes + // +optional + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/containers + Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy + // +optional + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` + // Set DNS policy for containers within the pod. + // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Defaults to "ClusterFirst". + // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: http://kubernetes.io/docs/user-guide/node-selection/README + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // Current condition of the pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase + // +optional + Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` + // Current service state of pod. + // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions + // +optional + Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + // A human readable message indicating details about why the pod is in this condition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'OutOfDisk' + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses + // +optional + ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md + // +optional + QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient=true + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +type Pod struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pods. + // More info: http://kubernetes.io/docs/user-guide/pods + Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient=true + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines the pods that will be created from this pod template. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod templates + Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicationControllerSpec is the specification of a replication controller. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. + // Reference to an object that describes the pod that will be created if insufficient replicas are detected. + // +optional + // TemplateRef *ObjectReference `json:"templateRef,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient=true + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of replication controllers. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// ServiceStatus represents the current status of a service. +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` +} + +// ServiceSpec describes the attributes that a user creates on a service. +type ServiceSpec struct { + // The list of ports that are exposed by this service. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: http://kubernetes.io/docs/user-guide/services#overview + // +optional + Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. A previous form of this + // functionality exists as the deprecatedPublicIPs field. When using this + // field, callers should also clear the deprecatedPublicIPs field. + // +optional + ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` + + // deprecatedPublicIPs is deprecated and replaced by the externalIPs field + // with almost the exact same semantics. This field is retained in the v1 + // API for compatibility until at least 8/20/2016. It will be removed from + // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are + // set, deprecatedPublicIPs is used. + // +k8s:conversion-gen=false + // +optional + DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" protobuf:"bytes,6,rep,name=deprecatedPublicIPs"` + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies + // +optional + SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: http://kubernetes.io/docs/user-guide/services-firewalls + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid DNS name and requires Type to be ExternalName. + // +optional + ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` +} + +// ServicePort contains information on service's port. +type ServicePort struct { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + // Optional if only one ServicePort is defined on this service. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The IP protocol for this port. Supports "TCP" and "UDP". + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // The port that will be exposed by this service. + Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: http://kubernetes.io/docs/user-guide/services#defining-a-service + // +optional + TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: http://kubernetes.io/docs/user-guide/services#type--nodeport + // +optional + NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` +} + +// +genclient=true + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a service. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of services + Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: http://kubernetes.io/docs/user-guide/secrets + // +optional + Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` +} + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ServiceAccounts. + // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"` +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` + // Port numbers available on the related IP addresses. + // +optional + Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // The Hostname of this endpoint + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"` + // Reference to object providing the endpoint. + // +optional + TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The port number of the endpoint. + Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` + + // The IP protocol for this port. + // Must be UDP or TCP. + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` +} + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of endpoints. + Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + // External ID of the node assigned by some machine database (e.g. a cloud provider). + // Deprecated. + // +optional + ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` + // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> + // +optional + ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration + // +optional + Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` + // If specified, the node's taints. + // +optional + Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is prefered. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` + // SystemUUID reported by the node. For unique machine identification + // MachineID is prefered. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` + // Boot ID reported by the node. + BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` + // Kubelet Version reported by the node. + KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` + // KubeProxy Version reported by the node. + KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` + // The Operating System reported by the node + OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` + // The Architecture reported by the node + Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details. + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` + // NodePhase is the recently observed lifecycle phase of the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // The field is never populated, and now is deprecated. + // +optional + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` + // Conditions is an array of current observed node conditions. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // +optional + Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // +optional + Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` + // Set of ids/uuids to uniquely identify the node. + // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // +optional + NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` + // List of container images on this node + // +optional + Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"` + + // DevicePath represents the device path where the volume should be available + DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"` +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"` + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` + // (brief) reason why this entry was added to the list. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating why this entry was added to the list. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` + // The size of the image in bytes. + // +optional + SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" + // NodeInodePressure means the kubelet is under pressure due to insufficient available inodes. + NodeInodePressure NodeConditionType = "InodePressure" +) + +// NodeCondition contains condition information for a node. +type NodeCondition struct { + // Type of node condition. + Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we got an update on a given condition. + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +type NodeAddressType string + +// These are valid address type of node. +const ( + // Deprecated: NodeLegacyHostIP will be removed in 1.7. + NodeLegacyHostIP NodeAddressType = "LegacyHostIP" + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +// NodeAddress contains information for the node's address. +type NodeAddress struct { + // Node address type, one of Hostname, ExternalIP or InternalIP. + Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` + // The node address. + Address string `json:"address" protobuf:"bytes,2,opt,name=address"` +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. + ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" + // Number of Pods that may be running on this Node: see ResourcePods +) + +const ( + // Namespace prefix for opaque counted resources (alpha). + ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient=true +// +nonNamespaced=true + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +type Node struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a node. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// NodeList is the whole list of all Nodes which have been registered with master. +type NodeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of nodes + Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceSpec describes the attributes on a Namespace. +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // +optional + Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` +} + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // +optional + Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient=true +// +nonNamespaced=true + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +type Namespace struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Namespace objects in the list. + // More info: http://kubernetes.io/docs/user-guide/namespaces + Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Binding ties one object to another. +// For example, a pod is bound to a node by a scheduler. +type Binding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The target object that you want to bind to the standard object. + Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. + // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. + // This policy is cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +// DeleteOptions may be provided when deleting an API object +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +type DeleteOptions struct { + metav1.TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // +optional + PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` +} + +// ListOptions is the query options to a standard REST list call. +// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. +// +k8s:openapi-gen=false +type ListOptions struct { + metav1.TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` +} + +// PodLogOptions is the query options for a Pod's logs REST call. +type PodLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow the log stream of the pod. Defaults to false. + // +optional + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous terminated container logs. Defaults to false. + // +optional + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodAttachOptions struct { + metav1.TypeMeta `json:",inline"` + + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodExecOptions struct { + metav1.TypeMeta `json:",inline"` + + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` + + // Command is the remote command to execute. argv array. Not executed within a shell. + Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +type PodPortForwardOptions struct { + metav1.TypeMeta `json:",inline"` + + // List of ports to forward + // Required when using WebSockets + // +optional + Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` +} + +// PodProxyOptions is the query options to a Pod's proxy call. +type PodProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to pod. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// NodeProxyOptions is the query options to a Node's proxy call. +type NodeProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to node. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // Kind of the referent. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // Namespace of the referent. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` + // Specific resourceVersion to which this reference is made, if any. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +type LocalObjectReference struct { + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// SerializedReference is a reference to serialized object. +type SerializedReference struct { + metav1.TypeMeta `json:",inline"` + // The reference to an object in the system. + // +optional + Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` +} + +// EventSource contains information for an event. +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` + // Node name on which the event is generated. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient=true + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // The object that this event is about. + InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` + + // The number of times this event has occurred. + // +optional + Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` +} + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of events + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// LimitType is a type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +type LimitRangeItem struct { + // Type of resource that this limit applies to. + // +optional + Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` + // Max usage constraints on this kind by resource name. + // +optional + Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` + // Min usage constraints on this kind by resource name. + // +optional + Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced. + Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` +} + +// +genclient=true + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +type LimitRange struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the limits enforced. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of LimitRange objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // Used is the current observed total usage of the resource in the namespace. + // +optional + Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` +} + +// +genclient=true + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage. + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceQuotaList is a list of ResourceQuota items. +type ResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ResourceQuota objects. + // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN + // or leading dot followed by valid DNS_SUBDOMAIN. + // The serialized form of the secret data is a base64 encoded string, + // representing the arbitrary (possibly non-string) data value here. + // Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default. Arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secert. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" +) + +// SecretList is a list of Secret. +type SecretList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of secret objects. + // More info: http://kubernetes.io/docs/user-guide/secrets + Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// ConfigMap holds configuration data for pods to consume. +type ConfigMap struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data contains the configuration data. + // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. + // +optional + Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta `json:",inline"` + + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ConfigMaps. + Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +// Information about the condition of a component. +type ComponentCondition struct { + // Type of condition for a component. + // Valid value: "Healthy" + Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Message about the condition for a component. + // For example, information about a health check. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // Condition error code for a component. + // For example, a health check error code. + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` +} + +// +genclient=true +// +nonNamespaced=true + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of component conditions observed + // +optional + Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +type ComponentStatusList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ComponentStatus objects. + Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of downward API volume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +const ( + DownwardAPIVolumeSourceDefaultMode int32 = 0644 +) + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` +} + +// SELinuxOptions are the labels to be applied to the container +type SELinuxOptions struct { + // User is a SELinux user label that applies to the container. + // +optional + User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` + // Role is a SELinux role label that applies to the container. + // +optional + Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` + // Type is a SELinux type label that applies to the container. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` + // Level is SELinux level label that applies to the container. + // +optional + Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` +} + +// RangeAllocation is not a public type. +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Range is string that identifies the range represented by 'data'. + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` + // Data is a bit array containing all allocated addresses in the previous segment. + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int = 1 + + // When the --failure-domains scheduler flag is not specified, + // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. + DefaultFailureDomains string = metav1.LabelHostname + "," + metav1.LabelZoneFailureDomain + "," + metav1.LabelZoneRegion +) diff --git a/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..75416d59a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/types_swagger_doc_generated.go @@ -0,0 +1,1960 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AWSElasticBlockStoreVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", +} + +func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { + return map_AWSElasticBlockStoreVolumeSource +} + +var map_Affinity = map[string]string{ + "": "Affinity is a group of affinity scheduling rules.", + "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", +} + +func (Affinity) SwaggerDoc() map[string]string { + return map_Affinity +} + +var map_AttachedVolume = map[string]string{ + "": "AttachedVolume describes a volume attached to a node", + "name": "Name of the attached volume", + "devicePath": "DevicePath represents the device path where the volume should be available", +} + +func (AttachedVolume) SwaggerDoc() map[string]string { + return map_AttachedVolume +} + +var map_AvoidPods = map[string]string{ + "": "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", + "preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", +} + +func (AvoidPods) SwaggerDoc() map[string]string { + return map_AvoidPods +} + +var map_AzureDiskVolumeSource = map[string]string{ + "": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "diskName": "The Name of the data disk in the blob storage", + "diskURI": "The URI the data disk in the blob storage", + "cachingMode": "Host Caching mode: None, Read Only, Read Write.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { + return map_AzureDiskVolumeSource +} + +var map_AzureFileVolumeSource = map[string]string{ + "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "secretName": "the name of secret that contains Azure Storage Account Name and Key", + "shareName": "Share Name", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (AzureFileVolumeSource) SwaggerDoc() map[string]string { + return map_AzureFileVolumeSource +} + +var map_Binding = map[string]string{ + "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "target": "The target object that you want to bind to the standard object.", +} + +func (Binding) SwaggerDoc() map[string]string { + return map_Binding +} + +var map_Capabilities = map[string]string{ + "": "Adds and removes POSIX capabilities from running containers.", + "add": "Added capabilities", + "drop": "Removed capabilities", +} + +func (Capabilities) SwaggerDoc() map[string]string { + return map_Capabilities +} + +var map_CephFSVolumeSource = map[string]string{ + "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", +} + +func (CephFSVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSVolumeSource +} + +var map_CinderVolumeSource = map[string]string{ + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", +} + +func (CinderVolumeSource) SwaggerDoc() map[string]string { + return map_CinderVolumeSource +} + +var map_ComponentCondition = map[string]string{ + "": "Information about the condition of a component.", + "type": "Type of condition for a component. Valid value: \"Healthy\"", + "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + "message": "Message about the condition for a component. For example, information about a health check.", + "error": "Condition error code for a component. For example, a health check error code.", +} + +func (ComponentCondition) SwaggerDoc() map[string]string { + return map_ComponentCondition +} + +var map_ComponentStatus = map[string]string{ + "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "conditions": "List of component conditions observed", +} + +func (ComponentStatus) SwaggerDoc() map[string]string { + return map_ComponentStatus +} + +var map_ComponentStatusList = map[string]string{ + "": "Status of all the conditions for the component as a list of ComponentStatus objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ComponentStatus objects.", +} + +func (ComponentStatusList) SwaggerDoc() map[string]string { + return map_ComponentStatusList +} + +var map_ConfigMap = map[string]string{ + "": "ConfigMap holds configuration data for pods to consume.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.", +} + +func (ConfigMap) SwaggerDoc() map[string]string { + return map_ConfigMap +} + +var map_ConfigMapEnvSource = map[string]string{ + "": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the ConfigMap must be defined", +} + +func (ConfigMapEnvSource) SwaggerDoc() map[string]string { + return map_ConfigMapEnvSource +} + +var map_ConfigMapKeySelector = map[string]string{ + "": "Selects a key from a ConfigMap.", + "key": "The key to select.", + "optional": "Specify whether the ConfigMap or it's key must be defined", +} + +func (ConfigMapKeySelector) SwaggerDoc() map[string]string { + return map_ConfigMapKeySelector +} + +var map_ConfigMapList = map[string]string{ + "": "ConfigMapList is a resource containing a list of ConfigMap objects.", + "metadata": "More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of ConfigMaps.", +} + +func (ConfigMapList) SwaggerDoc() map[string]string { + return map_ConfigMapList +} + +var map_ConfigMapProjection = map[string]string{ + "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapProjection) SwaggerDoc() map[string]string { + return map_ConfigMapProjection +} + +var map_ConfigMapVolumeSource = map[string]string{ + "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the ConfigMap or it's keys must be defined", +} + +func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { + return map_ConfigMapVolumeSource +} + +var map_Container = map[string]string{ + "": "A single application container that you want to run within a pod.", + "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "image": "Docker image name. More info: http://kubernetes.io/docs/user-guide/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images", + "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (Container) SwaggerDoc() map[string]string { + return map_Container +} + +var map_ContainerImage = map[string]string{ + "": "Describe a container image", + "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "sizeBytes": "The size of the image in bytes.", +} + +func (ContainerImage) SwaggerDoc() map[string]string { + return map_ContainerImage +} + +var map_ContainerPort = map[string]string{ + "": "ContainerPort represents a network port in a single container.", + "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", + "protocol": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".", + "hostIP": "What host IP to bind the external port to.", +} + +func (ContainerPort) SwaggerDoc() map[string]string { + return map_ContainerPort +} + +var map_ContainerState = map[string]string{ + "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + "waiting": "Details about a waiting container", + "running": "Details about a running container", + "terminated": "Details about a terminated container", +} + +func (ContainerState) SwaggerDoc() map[string]string { + return map_ContainerState +} + +var map_ContainerStateRunning = map[string]string{ + "": "ContainerStateRunning is a running state of a container.", + "startedAt": "Time at which the container was last (re-)started", +} + +func (ContainerStateRunning) SwaggerDoc() map[string]string { + return map_ContainerStateRunning +} + +var map_ContainerStateTerminated = map[string]string{ + "": "ContainerStateTerminated is a terminated state of a container.", + "exitCode": "Exit status from the last termination of the container", + "signal": "Signal from the last termination of the container", + "reason": "(brief) reason from the last termination of the container", + "message": "Message regarding the last termination of the container", + "startedAt": "Time at which previous execution of the container started", + "finishedAt": "Time at which the container last terminated", + "containerID": "Container's ID in the format 'docker://<container_id>'", +} + +func (ContainerStateTerminated) SwaggerDoc() map[string]string { + return map_ContainerStateTerminated +} + +var map_ContainerStateWaiting = map[string]string{ + "": "ContainerStateWaiting is a waiting state of a container.", + "reason": "(brief) reason the container is not yet running.", + "message": "Message regarding why the container is not yet running.", +} + +func (ContainerStateWaiting) SwaggerDoc() map[string]string { + return map_ContainerStateWaiting +} + +var map_ContainerStatus = map[string]string{ + "": "ContainerStatus contains details for the current status of this container.", + "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", + "state": "Details about the container's current condition.", + "lastState": "Details about the container's last termination condition.", + "ready": "Specifies whether the container has passed its readiness probe.", + "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", + "image": "The image the container is running. More info: http://kubernetes.io/docs/user-guide/images", + "imageID": "ImageID of the container's image.", + "containerID": "Container's ID in the format 'docker://<container_id>'. More info: http://kubernetes.io/docs/user-guide/container-environment#container-information", +} + +func (ContainerStatus) SwaggerDoc() map[string]string { + return map_ContainerStatus +} + +var map_DaemonEndpoint = map[string]string{ + "": "DaemonEndpoint contains information about a single Daemon endpoint.", + "Port": "Port number of the given endpoint.", +} + +func (DaemonEndpoint) SwaggerDoc() map[string]string { + return map_DaemonEndpoint +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_DownwardAPIProjection = map[string]string{ + "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "items": "Items is a list of DownwardAPIVolume file", +} + +func (DownwardAPIProjection) SwaggerDoc() map[string]string { + return map_DownwardAPIProjection +} + +var map_DownwardAPIVolumeFile = map[string]string{ + "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeFile +} + +var map_DownwardAPIVolumeSource = map[string]string{ + "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "items": "Items is a list of downward API volume file", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeSource +} + +var map_EmptyDirVolumeSource = map[string]string{ + "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", +} + +func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { + return map_EmptyDirVolumeSource +} + +var map_EndpointAddress = map[string]string{ + "": "EndpointAddress is a tuple that describes single IP address.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "hostname": "The Hostname of this endpoint", + "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + "targetRef": "Reference to object providing the endpoint.", +} + +func (EndpointAddress) SwaggerDoc() map[string]string { + return map_EndpointAddress +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort is a tuple that describes a single port.", + "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "port": "The port number of the endpoint.", + "protocol": "The IP protocol for this port. Must be UDP or TCP. Default is TCP.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSubset = map[string]string{ + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + "ports": "Port numbers available on the related IP addresses.", +} + +func (EndpointSubset) SwaggerDoc() map[string]string { + return map_EndpointSubset +} + +var map_Endpoints = map[string]string{ + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", +} + +func (Endpoints) SwaggerDoc() map[string]string { + return map_Endpoints +} + +var map_EndpointsList = map[string]string{ + "": "EndpointsList is a list of endpoints.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of endpoints.", +} + +func (EndpointsList) SwaggerDoc() map[string]string { + return map_EndpointsList +} + +var map_EnvFromSource = map[string]string{ + "": "EnvFromSource represents the source of a set of ConfigMaps", + "prefix": "An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "configMapRef": "The ConfigMap to select from", + "secretRef": "The Secret to select from", +} + +func (EnvFromSource) SwaggerDoc() map[string]string { + return map_EnvFromSource +} + +var map_EnvVar = map[string]string{ + "": "EnvVar represents an environment variable present in a Container.", + "name": "Name of the environment variable. Must be a C_IDENTIFIER.", + "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", +} + +func (EnvVar) SwaggerDoc() map[string]string { + return map_EnvVar +} + +var map_EnvVarSource = map[string]string{ + "": "EnvVarSource represents a source for the value of an EnvVar.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "configMapKeyRef": "Selects a key of a ConfigMap.", + "secretKeyRef": "Selects a key of a secret in the pod's namespace", +} + +func (EnvVarSource) SwaggerDoc() map[string]string { + return map_EnvVarSource +} + +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of events.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of events", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSource = map[string]string{ + "": "EventSource contains information for an event.", + "component": "Component from which the event is generated.", + "host": "Node name on which the event is generated.", +} + +func (EventSource) SwaggerDoc() map[string]string { + return map_EventSource +} + +var map_ExecAction = map[string]string{ + "": "ExecAction describes a \"run in container\" action.", + "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", +} + +func (ExecAction) SwaggerDoc() map[string]string { + return map_ExecAction +} + +var map_FCVolumeSource = map[string]string{ + "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "targetWWNs": "Required: FC target worldwide names (WWNs)", + "lun": "Required: FC target lun number", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (FCVolumeSource) SwaggerDoc() map[string]string { + return map_FCVolumeSource +} + +var map_FlexVolumeSource = map[string]string{ + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexVolumeSource) SwaggerDoc() map[string]string { + return map_FlexVolumeSource +} + +var map_FlockerVolumeSource = map[string]string{ + "": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + "datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset", +} + +func (FlockerVolumeSource) SwaggerDoc() map[string]string { + return map_FlockerVolumeSource +} + +var map_GCEPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", +} + +func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_GCEPersistentDiskVolumeSource +} + +var map_GitRepoVolumeSource = map[string]string{ + "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", + "repository": "Repository URL", + "revision": "Commit hash for the specified revision.", + "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", +} + +func (GitRepoVolumeSource) SwaggerDoc() map[string]string { + return map_GitRepoVolumeSource +} + +var map_GlusterfsVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsVolumeSource +} + +var map_HTTPGetAction = map[string]string{ + "": "HTTPGetAction describes an action based on HTTP Get requests.", + "path": "Path to access on the HTTP server.", + "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.", + "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.", +} + +func (HTTPGetAction) SwaggerDoc() map[string]string { + return map_HTTPGetAction +} + +var map_HTTPHeader = map[string]string{ + "": "HTTPHeader describes a custom header to be used in HTTP probes", + "name": "The header field name", + "value": "The header field value", +} + +func (HTTPHeader) SwaggerDoc() map[string]string { + return map_HTTPHeader +} + +var map_Handler = map[string]string{ + "": "Handler defines a specific action that should be taken", + "exec": "One and only one of the following should be specified. Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", +} + +func (Handler) SwaggerDoc() map[string]string { + return map_Handler +} + +var map_HostPathVolumeSource = map[string]string{ + "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "path": "Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", +} + +func (HostPathVolumeSource) SwaggerDoc() map[string]string { + return map_HostPathVolumeSource +} + +var map_ISCSIVolumeSource = map[string]string{ + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI target lun number.", + "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", +} + +func (ISCSIVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIVolumeSource +} + +var map_KeyToPath = map[string]string{ + "": "Maps a string key to a path within a volume.", + "key": "The key to project.", + "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (KeyToPath) SwaggerDoc() map[string]string { + return map_KeyToPath +} + +var map_Lifecycle = map[string]string{ + "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details", +} + +func (Lifecycle) SwaggerDoc() map[string]string { + return map_Lifecycle +} + +var map_LimitRange = map[string]string{ + "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (LimitRange) SwaggerDoc() map[string]string { + return map_LimitRange +} + +var map_LimitRangeItem = map[string]string{ + "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + "type": "Type of resource that this limit applies to.", + "max": "Max usage constraints on this kind by resource name.", + "min": "Min usage constraints on this kind by resource name.", + "default": "Default resource requirement limit value by resource name if resource limit is omitted.", + "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", +} + +func (LimitRangeItem) SwaggerDoc() map[string]string { + return map_LimitRangeItem +} + +var map_LimitRangeList = map[string]string{ + "": "LimitRangeList is a list of LimitRange items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md", +} + +func (LimitRangeList) SwaggerDoc() map[string]string { + return map_LimitRangeList +} + +var map_LimitRangeSpec = map[string]string{ + "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + "limits": "Limits is the list of LimitRangeItem objects that are enforced.", +} + +func (LimitRangeSpec) SwaggerDoc() map[string]string { + return map_LimitRangeSpec +} + +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call. DEPRECATED: This type has been moved to meta/v1 and will be removed soon.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_LoadBalancerIngress = map[string]string{ + "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", +} + +func (LoadBalancerIngress) SwaggerDoc() map[string]string { + return map_LoadBalancerIngress +} + +var map_LoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", +} + +func (LoadBalancerStatus) SwaggerDoc() map[string]string { + return map_LoadBalancerStatus +} + +var map_LocalObjectReference = map[string]string{ + "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", +} + +func (LocalObjectReference) SwaggerDoc() map[string]string { + return map_LocalObjectReference +} + +var map_NFSVolumeSource = map[string]string{ + "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "server": "Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "path": "Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", +} + +func (NFSVolumeSource) SwaggerDoc() map[string]string { + return map_NFSVolumeSource +} + +var map_Namespace = map[string]string{ + "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Namespace) SwaggerDoc() map[string]string { + return map_Namespace +} + +var map_NamespaceList = map[string]string{ + "": "NamespaceList is a list of Namespaces.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: http://kubernetes.io/docs/user-guide/namespaces", +} + +func (NamespaceList) SwaggerDoc() map[string]string { + return map_NamespaceList +} + +var map_NamespaceSpec = map[string]string{ + "": "NamespaceSpec describes the attributes on a Namespace.", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers", +} + +func (NamespaceSpec) SwaggerDoc() map[string]string { + return map_NamespaceSpec +} + +var map_NamespaceStatus = map[string]string{ + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases", +} + +func (NamespaceStatus) SwaggerDoc() map[string]string { + return map_NamespaceStatus +} + +var map_Node = map[string]string{ + "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeAddress = map[string]string{ + "": "NodeAddress contains information for the node's address.", + "type": "Node address type, one of Hostname, ExternalIP or InternalIP.", + "address": "The node address.", +} + +func (NodeAddress) SwaggerDoc() map[string]string { + return map_NodeAddress +} + +var map_NodeAffinity = map[string]string{ + "": "Node affinity is a group of node affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", +} + +func (NodeAffinity) SwaggerDoc() map[string]string { + return map_NodeAffinity +} + +var map_NodeCondition = map[string]string{ + "": "NodeCondition contains condition information for a node.", + "type": "Type of node condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastHeartbeatTime": "Last time we got an update on a given condition.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (NodeCondition) SwaggerDoc() map[string]string { + return map_NodeCondition +} + +var map_NodeDaemonEndpoints = map[string]string{ + "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + "kubeletEndpoint": "Endpoint on which Kubelet is listening.", +} + +func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { + return map_NodeDaemonEndpoints +} + +var map_NodeList = map[string]string{ + "": "NodeList is the whole list of all Nodes which have been registered with master.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of nodes", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeProxyOptions = map[string]string{ + "": "NodeProxyOptions is the query options to a Node's proxy call.", + "path": "Path is the URL path to use for the current proxy request to node.", +} + +func (NodeProxyOptions) SwaggerDoc() map[string]string { + return map_NodeProxyOptions +} + +var map_NodeSelector = map[string]string{ + "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", +} + +func (NodeSelector) SwaggerDoc() map[string]string { + return map_NodeSelector +} + +var map_NodeSelectorRequirement = map[string]string{ + "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "The label key that the selector applies to.", + "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", +} + +func (NodeSelectorRequirement) SwaggerDoc() map[string]string { + return map_NodeSelectorRequirement +} + +var map_NodeSelectorTerm = map[string]string{ + "": "A null or empty node selector term matches no objects.", + "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.", +} + +func (NodeSelectorTerm) SwaggerDoc() map[string]string { + return map_NodeSelectorTerm +} + +var map_NodeSpec = map[string]string{ + "": "NodeSpec describes the attributes that a node is created with.", + "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", + "providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration", + "taints": "If specified, the node's taints.", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus is information about the current status of a node.", + "capacity": "Capacity represents the total resources of a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity for more details.", + "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase The field is never populated, and now is deprecated.", + "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses", + "daemonEndpoints": "Endpoints of daemons running on the Node.", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info", + "images": "List of container images on this node", + "volumesInUse": "List of attachable volumes in use (mounted) by the node.", + "volumesAttached": "List of volumes that are attached to the node.", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_NodeSystemInfo = map[string]string{ + "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is prefered. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is prefered. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", + "bootID": "Boot ID reported by the node.", + "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", + "kubeletVersion": "Kubelet Version reported by the node.", + "kubeProxyVersion": "KubeProxy Version reported by the node.", + "operatingSystem": "The Operating System reported by the node", + "architecture": "The Architecture reported by the node", +} + +func (NodeSystemInfo) SwaggerDoc() map[string]string { + return map_NodeSystemInfo +} + +var map_ObjectFieldSelector = map[string]string{ + "": "ObjectFieldSelector selects an APIVersioned field of an object.", + "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "fieldPath": "Path of the field to select in the specified API version.", +} + +func (ObjectFieldSelector) SwaggerDoc() map[string]string { + return map_ObjectFieldSelector +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: http://kubernetes.io/docs/user-guide/namespaces", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "apiVersion": "API version of the referent.", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_PersistentVolume = map[string]string{ + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistent-volumes", +} + +func (PersistentVolume) SwaggerDoc() map[string]string { + return map_PersistentVolume +} + +var map_PersistentVolumeClaim = map[string]string{ + "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaim) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaim +} + +var map_PersistentVolumeClaimList = map[string]string{ + "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimList +} + +var map_PersistentVolumeClaimSpec = map[string]string{ + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "selector": "A label query over volumes to consider for binding.", + "resources": "Resources represents the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources", + "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "Name of the StorageClass required by the claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#class-1", +} + +func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimSpec +} + +var map_PersistentVolumeClaimStatus = map[string]string{ + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "Phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1", + "capacity": "Represents the actual resources of the underlying volume.", +} + +func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimStatus +} + +var map_PersistentVolumeClaimVolumeSource = map[string]string{ + "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", +} + +func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimVolumeSource +} + +var map_PersistentVolumeList = map[string]string{ + "": "PersistentVolumeList is a list of PersistentVolume items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: http://kubernetes.io/docs/user-guide/persistent-volumes", +} + +func (PersistentVolumeList) SwaggerDoc() map[string]string { + return map_PersistentVolumeList +} + +var map_PersistentVolumeSource = map[string]string{ + "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", +} + +func (PersistentVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeSource +} + +var map_PersistentVolumeSpec = map[string]string{ + "": "PersistentVolumeSpec is the specification of a persistent volume.", + "capacity": "A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy", + "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", +} + +func (PersistentVolumeSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeSpec +} + +var map_PersistentVolumeStatus = map[string]string{ + "": "PersistentVolumeStatus is the current status of a persistent volume.", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#phase", + "message": "A human-readable message indicating details about why the volume is in this state.", + "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", +} + +func (PersistentVolumeStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeStatus +} + +var map_PhotonPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Photon Controller persistent disk resource.", + "pdID": "ID that identifies Photon Controller persistent disk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_PhotonPersistentDiskVolumeSource +} + +var map_Pod = map[string]string{ + "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Pod) SwaggerDoc() map[string]string { + return map_Pod +} + +var map_PodAffinity = map[string]string{ + "": "Pod affinity is a group of inter pod affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAffinity) SwaggerDoc() map[string]string { + return map_PodAffinity +} + +var map_PodAffinityTerm = map[string]string{ + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running", + "labelSelector": "A label query over a set of resources, in this case pods.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", +} + +func (PodAffinityTerm) SwaggerDoc() map[string]string { + return map_PodAffinityTerm +} + +var map_PodAntiAffinity = map[string]string{ + "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAntiAffinity) SwaggerDoc() map[string]string { + return map_PodAntiAffinity +} + +var map_PodAttachOptions = map[string]string{ + "": "PodAttachOptions is the query options to a Pod's remote attach call.", + "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", + "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", + "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", +} + +func (PodAttachOptions) SwaggerDoc() map[string]string { + return map_PodAttachOptions +} + +var map_PodCondition = map[string]string{ + "": "PodCondition contains details for the current condition of this pod.", + "type": "Type is the type of the condition. Currently only Ready. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "lastProbeTime": "Last time we probed the condition.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Human-readable message indicating details about last transition.", +} + +func (PodCondition) SwaggerDoc() map[string]string { + return map_PodCondition +} + +var map_PodExecOptions = map[string]string{ + "": "PodExecOptions is the query options to a Pod's remote exec call.", + "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", + "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", + "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "command": "Command is the remote command to execute. argv array. Not executed within a shell.", +} + +func (PodExecOptions) SwaggerDoc() map[string]string { + return map_PodExecOptions +} + +var map_PodList = map[string]string{ + "": "PodList is a list of Pods.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: http://kubernetes.io/docs/user-guide/pods", +} + +func (PodList) SwaggerDoc() map[string]string { + return map_PodList +} + +var map_PodLogOptions = map[string]string{ + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", +} + +func (PodLogOptions) SwaggerDoc() map[string]string { + return map_PodLogOptions +} + +var map_PodPortForwardOptions = map[string]string{ + "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", + "ports": "List of ports to forward Required when using WebSockets", +} + +func (PodPortForwardOptions) SwaggerDoc() map[string]string { + return map_PodPortForwardOptions +} + +var map_PodProxyOptions = map[string]string{ + "": "PodProxyOptions is the query options to a Pod's proxy call.", + "path": "Path is the URL path to use for the current proxy request to pod.", +} + +func (PodProxyOptions) SwaggerDoc() map[string]string { + return map_PodProxyOptions +} + +var map_PodSecurityContext = map[string]string{ + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", +} + +func (PodSecurityContext) SwaggerDoc() map[string]string { + return map_PodSecurityContext +} + +var map_PodSignature = map[string]string{ + "": "Describes the class of pods that should avoid this node. Exactly one field should be set.", + "podController": "Reference to controller whose pods should avoid this node.", +} + +func (PodSignature) SwaggerDoc() map[string]string { + return map_PodSignature +} + +var map_PodSpec = map[string]string{ + "": "PodSpec is a description of a pod.", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy", + "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection/README", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md", + "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "hostPID": "Use the host's pid namespace. Optional: Default to false.", + "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", + "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", + "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "subdomain": "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.", + "affinity": "If specified, the pod's scheduling constraints", + "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "tolerations": "If specified, the pod's tolerations.", +} + +func (PodSpec) SwaggerDoc() map[string]string { + return map_PodSpec +} + +var map_PodStatus = map[string]string{ + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", + "phase": "Current condition of the pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase", + "conditions": "Current service state of pod. More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://kubernetes.io/docs/user-guide/pod-states#container-statuses", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md", +} + +func (PodStatus) SwaggerDoc() map[string]string { + return map_PodStatus +} + +var map_PodStatusResult = map[string]string{ + "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (PodStatusResult) SwaggerDoc() map[string]string { + return map_PodStatusResult +} + +var map_PodTemplate = map[string]string{ + "": "PodTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (PodTemplate) SwaggerDoc() map[string]string { + return map_PodTemplate +} + +var map_PodTemplateList = map[string]string{ + "": "PodTemplateList is a list of PodTemplates.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of pod templates", +} + +func (PodTemplateList) SwaggerDoc() map[string]string { + return map_PodTemplateList +} + +var map_PodTemplateSpec = map[string]string{ + "": "PodTemplateSpec describes the data a pod should have when created from a template", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (PodTemplateSpec) SwaggerDoc() map[string]string { + return map_PodTemplateSpec +} + +var map_PortworxVolumeSource = map[string]string{ + "": "PortworxVolumeSource represents a Portworx volume resource.", + "volumeID": "VolumeID uniquely identifies a Portworx volume", + "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (PortworxVolumeSource) SwaggerDoc() map[string]string { + return map_PortworxVolumeSource +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_PreferAvoidPodsEntry = map[string]string{ + "": "Describes a class of pods that should avoid this node.", + "podSignature": "The class of pods.", + "evictionTime": "Time at which this entry was added to the list.", + "reason": "(brief) reason why this entry was added to the list.", + "message": "Human readable message indicating why this entry was added to the list.", +} + +func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string { + return map_PreferAvoidPodsEntry +} + +var map_PreferredSchedulingTerm = map[string]string{ + "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "preference": "A node selector term, associated with the corresponding weight.", +} + +func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { + return map_PreferredSchedulingTerm +} + +var map_Probe = map[string]string{ + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes", + "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", + "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", +} + +func (Probe) SwaggerDoc() map[string]string { + return map_Probe +} + +var map_ProjectedVolumeSource = map[string]string{ + "": "Represents a projected volume source", + "sources": "list of volume projections", + "defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (ProjectedVolumeSource) SwaggerDoc() map[string]string { + return map_ProjectedVolumeSource +} + +var map_QuobyteVolumeSource = map[string]string{ + "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "volume": "Volume is a string that references an already created Quobyte volume by name.", + "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "user": "User to map volume access to Defaults to serivceaccount user", + "group": "Group to map volume access to Default is no group", +} + +func (QuobyteVolumeSource) SwaggerDoc() map[string]string { + return map_QuobyteVolumeSource +} + +var map_RBDVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.", + "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDVolumeSource) SwaggerDoc() map[string]string { + return map_RBDVolumeSource +} + +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is not a public type.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "range": "Range is string that identifies the range represented by 'data'.", + "data": "Data is a bit array containing all allocated addresses in the previous segment.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_ReplicationController = map[string]string{ + "": "ReplicationController represents the configuration of a replication controller.", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ReplicationController) SwaggerDoc() map[string]string { + return map_ReplicationController +} + +var map_ReplicationControllerCondition = map[string]string{ + "": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + "type": "Type of replication controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicationControllerCondition) SwaggerDoc() map[string]string { + return map_ReplicationControllerCondition +} + +var map_ReplicationControllerList = map[string]string{ + "": "ReplicationControllerList is a collection of replication controllers.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: http://kubernetes.io/docs/user-guide/replication-controller", +} + +func (ReplicationControllerList) SwaggerDoc() map[string]string { + return map_ReplicationControllerList +} + +var map_ReplicationControllerSpec = map[string]string{ + "": "ReplicationControllerSpec is the specification of a replication controller.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", +} + +func (ReplicationControllerSpec) SwaggerDoc() map[string]string { + return map_ReplicationControllerSpec +} + +var map_ReplicationControllerStatus = map[string]string{ + "": "ReplicationControllerStatus represents the current status of a replication controller.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", + "readyReplicas": "The number of ready replicas for this replication controller.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", + "conditions": "Represents the latest available observations of a replication controller's current state.", +} + +func (ReplicationControllerStatus) SwaggerDoc() map[string]string { + return map_ReplicationControllerStatus +} + +var map_ResourceFieldSelector = map[string]string{ + "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "containerName": "Container name: required for volumes, optional for env vars", + "resource": "Required: resource to select", + "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", +} + +func (ResourceFieldSelector) SwaggerDoc() map[string]string { + return map_ResourceFieldSelector +} + +var map_ResourceQuota = map[string]string{ + "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ResourceQuota) SwaggerDoc() map[string]string { + return map_ResourceQuota +} + +var map_ResourceQuotaList = map[string]string{ + "": "ResourceQuotaList is a list of ResourceQuota items.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", +} + +func (ResourceQuotaList) SwaggerDoc() map[string]string { + return map_ResourceQuotaList +} + +var map_ResourceQuotaSpec = map[string]string{ + "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", +} + +func (ResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ResourceQuotaSpec +} + +var map_ResourceQuotaStatus = map[string]string{ + "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "used": "Used is the current observed total usage of the resource in the namespace.", +} + +func (ResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatus +} + +var map_ResourceRequirements = map[string]string{ + "": "ResourceRequirements describes the compute resource requirements.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/", +} + +func (ResourceRequirements) SwaggerDoc() map[string]string { + return map_ResourceRequirements +} + +var map_SELinuxOptions = map[string]string{ + "": "SELinuxOptions are the labels to be applied to the container", + "user": "User is a SELinux user label that applies to the container.", + "role": "Role is a SELinux role label that applies to the container.", + "type": "Type is a SELinux type label that applies to the container.", + "level": "Level is SELinux level label that applies to the container.", +} + +func (SELinuxOptions) SwaggerDoc() map[string]string { + return map_SELinuxOptions +} + +var map_ScaleIOVolumeSource = map[string]string{ + "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the Protection Domain for the configured storage (defaults to \"default\").", + "storagePool": "The Storage Pool associated with the protection domain (defaults to \"default\").", + "storageMode": "Indicates whether the storage for a volume should be thick or thin (defaults to \"thin\").", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOVolumeSource +} + +var map_Secret = map[string]string{ + "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", + "type": "Used to facilitate programmatic handling of secret data.", +} + +func (Secret) SwaggerDoc() map[string]string { + return map_Secret +} + +var map_SecretEnvSource = map[string]string{ + "": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the Secret must be defined", +} + +func (SecretEnvSource) SwaggerDoc() map[string]string { + return map_SecretEnvSource +} + +var map_SecretKeySelector = map[string]string{ + "": "SecretKeySelector selects a key of a Secret.", + "key": "The key of the secret to select from. Must be a valid secret key.", + "optional": "Specify whether the Secret or it's key must be defined", +} + +func (SecretKeySelector) SwaggerDoc() map[string]string { + return map_SecretKeySelector +} + +var map_SecretList = map[string]string{ + "": "SecretList is a list of Secret.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: http://kubernetes.io/docs/user-guide/secrets", +} + +func (SecretList) SwaggerDoc() map[string]string { + return map_SecretList +} + +var map_SecretProjection = map[string]string{ + "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the Secret or its key must be defined", +} + +func (SecretProjection) SwaggerDoc() map[string]string { + return map_SecretProjection +} + +var map_SecretVolumeSource = map[string]string{ + "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "secretName": "Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the Secret or it's keys must be defined", +} + +func (SecretVolumeSource) SwaggerDoc() map[string]string { + return map_SecretVolumeSource +} + +var map_SecurityContext = map[string]string{ + "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", +} + +func (SecurityContext) SwaggerDoc() map[string]string { + return map_SecurityContext +} + +var map_SerializedReference = map[string]string{ + "": "SerializedReference is a reference to serialized object.", + "reference": "The reference to an object in the system.", +} + +func (SerializedReference) SwaggerDoc() map[string]string { + return map_SerializedReference +} + +var map_Service = map[string]string{ + "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Service) SwaggerDoc() map[string]string { + return map_Service +} + +var map_ServiceAccount = map[string]string{ + "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://kubernetes.io/docs/user-guide/secrets", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://kubernetes.io/docs/user-guide/secrets#manually-specifying-an-imagepullsecret", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", +} + +func (ServiceAccount) SwaggerDoc() map[string]string { + return map_ServiceAccount +} + +var map_ServiceAccountList = map[string]string{ + "": "ServiceAccountList is a list of ServiceAccount objects", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts", +} + +func (ServiceAccountList) SwaggerDoc() map[string]string { + return map_ServiceAccountList +} + +var map_ServiceList = map[string]string{ + "": "ServiceList holds a list of services.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of services", +} + +func (ServiceList) SwaggerDoc() map[string]string { + return map_ServiceList +} + +var map_ServicePort = map[string]string{ + "": "ServicePort contains information on service's port.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", + "port": "The port that will be exposed by this service.", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://kubernetes.io/docs/user-guide/services#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://kubernetes.io/docs/user-guide/services#type--nodeport", +} + +func (ServicePort) SwaggerDoc() map[string]string { + return map_ServicePort +} + +var map_ServiceProxyOptions = map[string]string{ + "": "ServiceProxyOptions is the query options to a Service's proxy call.", + "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", +} + +func (ServiceProxyOptions) SwaggerDoc() map[string]string { + return map_ServiceProxyOptions +} + +var map_ServiceSpec = map[string]string{ + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://kubernetes.io/docs/user-guide/services#overview", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: http://kubernetes.io/docs/user-guide/services#overview", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", + "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://kubernetes.io/docs/user-guide/services-firewalls", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", +} + +func (ServiceSpec) SwaggerDoc() map[string]string { + return map_ServiceSpec +} + +var map_ServiceStatus = map[string]string{ + "": "ServiceStatus represents the current status of a service.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", +} + +func (ServiceStatus) SwaggerDoc() map[string]string { + return map_ServiceStatus +} + +var map_TCPSocketAction = map[string]string{ + "": "TCPSocketAction describes an action based on opening a socket", + "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", +} + +func (TCPSocketAction) SwaggerDoc() map[string]string { + return map_TCPSocketAction +} + +var map_Taint = map[string]string{ + "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", + "key": "Required. The taint key to be applied to a node.", + "value": "Required. The taint value corresponding to the taint key.", + "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", +} + +func (Taint) SwaggerDoc() map[string]string { + return map_Taint +} + +var map_Toleration = map[string]string{ + "": "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.", + "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", +} + +func (Toleration) SwaggerDoc() map[string]string { + return map_Toleration +} + +var map_Volume = map[string]string{ + "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names", +} + +func (Volume) SwaggerDoc() map[string]string { + return map_Volume +} + +var map_VolumeMount = map[string]string{ + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", +} + +func (VolumeMount) SwaggerDoc() map[string]string { + return map_VolumeMount +} + +var map_VolumeProjection = map[string]string{ + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", +} + +func (VolumeProjection) SwaggerDoc() map[string]string { + return map_VolumeProjection +} + +var map_VolumeSource = map[string]string{ + "": "Represents the source of a volume to mount. Only one of its members may be specified.", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore", + "gitRepo": "GitRepo represents a git repository at a particular revision.", + "secret": "Secret represents a secret that should populate this volume. More info: http://kubernetes.io/docs/user-guide/volumes#secrets", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://kubernetes.io/docs/user-guide/volumes#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", +} + +func (VolumeSource) SwaggerDoc() map[string]string { + return map_VolumeSource +} + +var map_VsphereVirtualDiskVolumeSource = map[string]string{ + "": "Represents a vSphere volume resource.", + "volumePath": "Path that identifies vSphere volume vmdk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { + return map_VsphereVirtualDiskVolumeSource +} + +var map_WeightedPodAffinityTerm = map[string]string{ + "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", +} + +func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { + return map_WeightedPodAffinityTerm +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go new file mode 100644 index 000000000..7d534c8c7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.conversion.go @@ -0,0 +1,4702 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + api "k8s.io/client-go/pkg/api" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, + Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_api_Affinity, + Convert_api_Affinity_To_v1_Affinity, + Convert_v1_AttachedVolume_To_api_AttachedVolume, + Convert_api_AttachedVolume_To_v1_AttachedVolume, + Convert_v1_AvoidPods_To_api_AvoidPods, + Convert_api_AvoidPods_To_v1_AvoidPods, + Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, + Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, + Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, + Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_api_Binding, + Convert_api_Binding_To_v1_Binding, + Convert_v1_Capabilities_To_api_Capabilities, + Convert_api_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, + Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, + Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ComponentCondition_To_api_ComponentCondition, + Convert_api_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_api_ComponentStatus, + Convert_api_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_api_ComponentStatusList, + Convert_api_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_api_ConfigMap, + Convert_api_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, + Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, + Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, + Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_api_ConfigMapList, + Convert_api_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, + Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, + Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, + Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_api_Container, + Convert_api_Container_To_v1_Container, + Convert_v1_ContainerImage_To_api_ContainerImage, + Convert_api_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_api_ContainerPort, + Convert_api_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_api_ContainerState, + Convert_api_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, + Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, + Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, + Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_api_ContainerStatus, + Convert_api_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, + Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_api_DeleteOptions, + Convert_api_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, + Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, + Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, + Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, + Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, + Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_api_EndpointAddress, + Convert_api_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_api_EndpointPort, + Convert_api_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_api_EndpointSubset, + Convert_api_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_api_Endpoints, + Convert_api_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_api_EndpointsList, + Convert_api_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_api_EnvFromSource, + Convert_api_EnvFromSource_To_v1_EnvFromSource, + Convert_v1_EnvVar_To_api_EnvVar, + Convert_api_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_api_EnvVarSource, + Convert_api_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_api_Event, + Convert_api_Event_To_v1_Event, + Convert_v1_EventList_To_api_EventList, + Convert_api_EventList_To_v1_EventList, + Convert_v1_EventSource_To_api_EventSource, + Convert_api_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_api_ExecAction, + Convert_api_ExecAction_To_v1_ExecAction, + Convert_v1_FCVolumeSource_To_api_FCVolumeSource, + Convert_api_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, + Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, + Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, + Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, + Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, + Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_api_HTTPGetAction, + Convert_api_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_api_HTTPHeader, + Convert_api_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_api_Handler, + Convert_api_Handler_To_v1_Handler, + Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, + Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, + Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_api_KeyToPath, + Convert_api_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_api_Lifecycle, + Convert_api_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_api_LimitRange, + Convert_api_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_api_LimitRangeItem, + Convert_api_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_api_LimitRangeList, + Convert_api_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, + Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_api_List, + Convert_api_List_To_v1_List, + Convert_v1_ListOptions_To_api_ListOptions, + Convert_api_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, + Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, + Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_api_LocalObjectReference, + Convert_api_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, + Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_api_Namespace, + Convert_api_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_api_NamespaceList, + Convert_api_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_api_NamespaceSpec, + Convert_api_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_api_NamespaceStatus, + Convert_api_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_api_Node, + Convert_api_Node_To_v1_Node, + Convert_v1_NodeAddress_To_api_NodeAddress, + Convert_api_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_api_NodeAffinity, + Convert_api_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_api_NodeCondition, + Convert_api_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, + Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_api_NodeList, + Convert_api_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, + Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_api_NodeResources, + Convert_api_NodeResources_To_v1_NodeResources, + Convert_v1_NodeSelector_To_api_NodeSelector, + Convert_api_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, + Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, + Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_api_NodeSpec, + Convert_api_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_api_NodeStatus, + Convert_api_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, + Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, + Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_api_ObjectMeta, + Convert_api_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_api_ObjectReference, + Convert_api_ObjectReference_To_v1_ObjectReference, + Convert_v1_PersistentVolume_To_api_PersistentVolume, + Convert_api_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, + Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, + Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, + Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, + Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, + Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, + Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, + Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, + Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, + Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, + Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, + Convert_v1_Pod_To_api_Pod, + Convert_api_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_api_PodAffinity, + Convert_api_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, + Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, + Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_api_PodAttachOptions, + Convert_api_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_api_PodCondition, + Convert_api_PodCondition_To_v1_PodCondition, + Convert_v1_PodExecOptions_To_api_PodExecOptions, + Convert_api_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_api_PodList, + Convert_api_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_api_PodLogOptions, + Convert_api_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, + Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, + Convert_v1_PodProxyOptions_To_api_PodProxyOptions, + Convert_api_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_api_PodSecurityContext, + Convert_api_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSignature_To_api_PodSignature, + Convert_api_PodSignature_To_v1_PodSignature, + Convert_v1_PodSpec_To_api_PodSpec, + Convert_api_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_api_PodStatus, + Convert_api_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_api_PodStatusResult, + Convert_api_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_api_PodTemplate, + Convert_api_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_api_PodTemplateList, + Convert_api_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, + Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, + Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, + Convert_v1_Preconditions_To_api_Preconditions, + Convert_api_Preconditions_To_v1_Preconditions, + Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, + Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, + Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, + Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_api_Probe, + Convert_api_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, + Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, + Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, + Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, + Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, + Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_api_RangeAllocation, + Convert_api_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_api_ReplicationController, + Convert_api_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, + Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, + Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, + Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, + Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, + Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, + Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_api_ResourceQuota, + Convert_api_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, + Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, + Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, + Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_api_ResourceRequirements, + Convert_api_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_api_SELinuxOptions, + Convert_api_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, + Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, + Convert_v1_Secret_To_api_Secret, + Convert_api_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_api_SecretEnvSource, + Convert_api_SecretEnvSource_To_v1_SecretEnvSource, + Convert_v1_SecretKeySelector_To_api_SecretKeySelector, + Convert_api_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_api_SecretList, + Convert_api_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_api_SecretProjection, + Convert_api_SecretProjection_To_v1_SecretProjection, + Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, + Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_api_SecurityContext, + Convert_api_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_api_SerializedReference, + Convert_api_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_api_Service, + Convert_api_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_api_ServiceAccount, + Convert_api_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_api_ServiceAccountList, + Convert_api_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_api_ServiceList, + Convert_api_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_api_ServicePort, + Convert_api_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, + Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_api_ServiceSpec, + Convert_api_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_api_ServiceStatus, + Convert_api_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_Sysctl_To_api_Sysctl, + Convert_api_Sysctl_To_v1_Sysctl, + Convert_v1_TCPSocketAction_To_api_TCPSocketAction, + Convert_api_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_api_Taint, + Convert_api_Taint_To_v1_Taint, + Convert_v1_Toleration_To_api_Toleration, + Convert_api_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_api_Volume, + Convert_api_Volume_To_v1_Volume, + Convert_v1_VolumeMount_To_api_VolumeMount, + Convert_api_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_api_VolumeProjection, + Convert_api_VolumeProjection_To_v1_VolumeProjection, + Convert_v1_VolumeSource_To_api_VolumeSource, + Convert_api_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, + Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, + Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ) +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) +} + +func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + out.NodeAffinity = (*NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { + return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { + out.Name = api.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) +} + +func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { + out.Name = UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { + return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +func Convert_v1_AvoidPods_To_api_AvoidPods(in *AvoidPods, out *api.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) +} + +func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *AvoidPods, s conversion.Scope) error { + return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + return nil +} + +func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + return nil +} + +func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_api_Binding(in, out, s) +} + +func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { + return autoConvert_api_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) +} + +func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { + out.Add = *(*[]Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { + return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) +} + +func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + if in.Monitors == nil { + out.Monitors = make([]string, 0) + } else { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + } + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) +} + +func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { + return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + out.Type = api.ComponentConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) +} + +func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { + out.Type = ComponentConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { + return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) +} + +func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { + return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) +} + +func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ComponentStatus, 0) + } else { + out.Items = *(*[]ComponentStatus)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { + return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) +} + +func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { + return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) +} + +func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ConfigMap, 0) + } else { + out.Items = *(*[]ConfigMap)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { + return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) +} + +func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *ConfigMapProjection, s conversion.Scope) error { + return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*api.SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_api_Container(in, out, s) +} + +func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.LivenessProbe = (*Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) + out.SecurityContext = (*SecurityContext)(unsafe.Pointer(in.SecurityContext)) + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { + return autoConvert_api_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) +} + +func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { + if in.Names == nil { + out.Names = make([]string, 0) + } else { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + } + out.SizeBytes = in.SizeBytes + return nil +} + +func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { + return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = api.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) +} + +func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { + return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) +} + +func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + out.Waiting = (*ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) +} + +func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { + return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) +} + +func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) +} + +func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) +} + +func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { + return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) +} + +func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { + return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) +} + +func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { + return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) +} + +func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = api.StorageMedium(in.Medium) + return nil +} + +func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = StorageMedium(in.Medium) + return nil +} + +func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) +} + +func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { + return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = api.Protocol(in.Protocol) + return nil +} + +func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) +} + +func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = Protocol(in.Protocol) + return nil +} + +func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { + return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) +} + +func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { + return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) +} + +func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subsets == nil { + out.Subsets = make([]EndpointSubset, 0) + } else { + out.Subsets = *(*[]EndpointSubset)(unsafe.Pointer(&in.Subsets)) + } + return nil +} + +func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { + return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) +} + +func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Endpoints, 0) + } else { + out.Items = *(*[]Endpoints)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { + return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) +} + +func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *EnvFromSource, s conversion.Scope) error { + return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) +} + +func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { + return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) +} + +func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { + return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + return nil +} + +func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_api_Event(in, out, s) +} + +func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + return nil +} + +func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { + return autoConvert_api_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_api_EventList(in, out, s) +} + +func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Event, 0) + } else { + out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_api_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) +} + +func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { + return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) +} + +func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { + return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) +} + +func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + if in.TargetWWNs == nil { + out.TargetWWNs = make([]string, 0) + } else { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + } + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { + return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) +} + +func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { + return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) +} + +func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = api.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) +} + +func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { + return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) +} + +func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { + return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_api_Handler(in, out, s) +} + +func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { + out.Exec = (*ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { + return autoConvert_api_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) +} + +func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + return nil +} + +func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + return nil +} + +func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) +} + +func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { + return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) +} + +func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { + out.PostStart = (*Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { + return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) +} + +func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { + return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + out.Type = api.LimitType(in.Type) + out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) +} + +func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { + out.Type = LimitType(in.Type) + out.Max = *(*ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { + return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) +} + +func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]LimitRange, 0) + } else { + out.Items = *(*[]LimitRange)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { + return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) +} + +func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { + if in.Limits == nil { + out.Limits = make([]LimitRangeItem, 0) + } else { + out.Limits = *(*[]LimitRangeItem)(unsafe.Pointer(&in.Limits)) + } + return nil +} + +func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { + return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { + return autoConvert_v1_List_To_api_List(in, out, s) +} + +func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]runtime.RawExtension, 0) + } + return nil +} + +func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { + return autoConvert_api_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) +} + +func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) +} + +func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) +} + +func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) +} + +func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { + return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) +} + +func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { + return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) +} + +func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { + return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) +} + +func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Namespace, 0) + } else { + out.Items = *(*[]Namespace)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { + return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) +} + +func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { + return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + out.Phase = api.NamespacePhase(in.Phase) + return nil +} + +func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) +} + +func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { + out.Phase = NamespacePhase(in.Phase) + return nil +} + +func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { + return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_api_Node(in, out, s) +} + +func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { + return autoConvert_api_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + out.Type = api.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) +} + +func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { + out.Type = NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { + return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) +} + +func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { + return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + out.Type = api.NodeConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) +} + +func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { + out.Type = NodeConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { + return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) +} + +func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Node, 0) + } else { + out.Items = *(*[]Node)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { + return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) +} + +func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { + return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_v1_NodeResources_To_api_NodeResources(in *NodeResources, out *api.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) +} + +func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *NodeResources, s conversion.Scope) error { + return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) +} + +func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + if in.NodeSelectorTerms == nil { + out.NodeSelectorTerms = make([]NodeSelectorTerm, 0) + } else { + out.NodeSelectorTerms = *(*[]NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + } + return nil +} + +func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { + return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) +} + +func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + if in.MatchExpressions == nil { + out.MatchExpressions = make([]NodeSelectorRequirement, 0) + } else { + out.MatchExpressions = *(*[]NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + } + return nil +} + +func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) + return nil +} + +func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) +} + +func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]Taint)(unsafe.Pointer(&in.Taints)) + return nil +} + +func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { + return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = api.NodePhase(in.Phase) + out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) +} + +func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = NodePhase(in.Phase) + out.Conditions = *(*[]NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { + return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) +} + +func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { + return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) +} + +func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) +} + +func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { + return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) +} + +func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) +} + +func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { + return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]PersistentVolumeClaim, 0) + } else { + out.Items = *(*[]PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + return nil +} + +func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + return nil +} + +func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = api.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) +} + +func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PersistentVolume, 0) + } + return nil +} + +func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) +} + +func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + return nil +} + +func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + return nil +} + +func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = api.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) +} + +func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { + return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) +} + +func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) +} + +func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { + return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) +} + +func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { + out.Type = api.PodConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) +} + +func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + out.Type = PodConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { + return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) +} + +func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + if in.Command == nil { + out.Command = make([]string, 0) + } else { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + } + return nil +} + +func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { + return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_api_PodList(in, out, s) +} + +func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Pod, 0) + } + return nil +} + +func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { + return autoConvert_api_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) +} + +func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { + return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) +} + +func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) +} + +func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { + return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +func Convert_v1_PodSignature_To_api_PodSignature(in *PodSignature, out *api.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) +} + +func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *PodSignature, s conversion.Scope) error { + return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.InitContainers = *(*[]api.Container)(unsafe.Pointer(&in.InitContainers)) + out.Containers = *(*[]api.Container)(unsafe.Pointer(&in.Containers)) + out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(api.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) + return nil +} + +func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.InitContainers = *(*[]Container)(unsafe.Pointer(&in.InitContainers)) + if in.Containers == nil { + out.Containers = make([]Container, 0) + } else { + out.Containers = *(*[]Container)(unsafe.Pointer(&in.Containers)) + } + out.RestartPolicy = RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]Toleration)(unsafe.Pointer(&in.Tolerations)) + return nil +} + +func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { + out.Phase = api.PodPhase(in.Phase) + out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = api.PodQOSClass(in.QOSClass) + return nil +} + +func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) +} + +func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + out.Phase = PodPhase(in.Phase) + out.Conditions = *(*[]PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { + return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) +} + +func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { + return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) +} + +func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PodTemplate, 0) + } + return nil +} + +func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { + return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) +} + +func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) +} + +func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { + return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_api_Probe(in, out, s) +} + +func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { + return autoConvert_api_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + if in.Sources == nil { + out.Sources = make([]VolumeProjection, 0) + } else { + out.Sources = *(*[]VolumeProjection)(unsafe.Pointer(&in.Sources)) + } + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) +} + +func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + if in.CephMonitors == nil { + out.CephMonitors = make([]string, 0) + } else { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + } + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { + return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) +} + +func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + if in.Data == nil { + out.Data = make([]byte, 0) + } else { + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + } + return nil +} + +func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { + return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) +} + +func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { + return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = api.ReplicationControllerConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { + out.Type = ReplicationControllerConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) +} + +func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]ReplicationController, 0) + } + return nil +} + +func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(api.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) +} + +func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) +} + +func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { + return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) +} + +func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ResourceQuota, 0) + } else { + out.Items = *(*[]ResourceQuota)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) +} + +func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { + return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) +} + +func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { + return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = api.SecretType(in.Type) + return nil +} + +func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = SecretType(in.Type) + return nil +} + +func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { + return autoConvert_api_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) +} + +func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *SecretEnvSource, s conversion.Scope) error { + return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) +} + +func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { + return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) +} + +func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Secret, 0) + } + return nil +} + +func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { + return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretProjection_To_api_SecretProjection(in *SecretProjection, out *api.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) +} + +func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *SecretProjection, s conversion.Scope) error { + return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) +} + +func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { + return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + return nil +} + +func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) +} + +func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + out.Capabilities = (*Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + return nil +} + +func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { + return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) +} + +func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) +} + +func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { + return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_api_Service(in, out, s) +} + +func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { + return autoConvert_api_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) +} + +func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { + return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) +} + +func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ServiceAccount, 0) + } else { + out.Items = *(*[]ServiceAccount)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { + return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]api.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) +} + +func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Service, 0) + } + return nil +} + +func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { + return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = api.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) +} + +func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { + return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) +} + +func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = api.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + // INFO: in.DeprecatedPublicIPs opted out of conversion generation + out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + return nil +} + +func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { + out.Type = ServiceType(in.Type) + out.Ports = *(*[]ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = ServiceAffinity(in.SessionAffinity) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + return nil +} + +func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) +} + +func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { + return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_v1_Sysctl_To_api_Sysctl(in *Sysctl, out *api.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) +} + +func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *Sysctl, s conversion.Scope) error { + return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) +} + +func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { + return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded + return nil +} + +func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_api_Taint(in, out, s) +} + +func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + out.TimeAdded = in.TimeAdded + return nil +} + +func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { + return autoConvert_api_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = api.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = api.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) +} + +func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { + return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_api_Volume(in, out, s) +} + +func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { + return autoConvert_api_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) +} + +func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + return nil +} + +func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { + return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) +} + +func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + out.Secret = (*SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *VolumeProjection, s conversion.Scope) error { + return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { + out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) +} + +func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + out.HostPath = (*HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + return nil +} + +func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { + return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + return nil +} + +func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + return nil +} + +func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..463e94680 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,3500 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +func DeepCopy_v1_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_v1_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +func DeepCopy_v1_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +func DeepCopy_v1_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +func DeepCopy_v1_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_v1_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +func DeepCopy_v1_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +func DeepCopy_v1_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_v1_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_v1_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_v1_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +func DeepCopy_v1_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +func DeepCopy_v1_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_v1_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_v1_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.RawExtension) + } + } + } + return nil + } +} + +func DeepCopy_v1_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +func DeepCopy_v1_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +func DeepCopy_v1_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +func DeepCopy_v1_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +func DeepCopy_v1_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]meta_v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*meta_v1.OwnerReference) + } + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if err := DeepCopy_v1_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +func DeepCopy_v1_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.OwnerReference) + } + } + return nil + } +} + +func DeepCopy_v1_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_v1_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_v1_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_v1_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_v1_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_v1_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_v1_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_v1_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + if in.StringData != nil { + in, out := &in.StringData, &out.StringData + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_v1_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_v1_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +func DeepCopy_v1_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_v1_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DeprecatedPublicIPs != nil { + in, out := &in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +func DeepCopy_v1_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +func DeepCopy_v1_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +func DeepCopy_v1_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +func DeepCopy_v1_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_v1_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_v1_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_v1_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + **out = **in + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_v1_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_v1_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_v1_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_v1_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go new file mode 100644 index 000000000..121b39185 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/v1/zz_generated.defaults.go @@ -0,0 +1,631 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*ConfigMap)) }) + scheme.AddTypeDefaultingFunc(&ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*ConfigMapList)) }) + scheme.AddTypeDefaultingFunc(&Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*Endpoints)) }) + scheme.AddTypeDefaultingFunc(&EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*EndpointsList)) }) + scheme.AddTypeDefaultingFunc(&LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*LimitRange)) }) + scheme.AddTypeDefaultingFunc(&LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*LimitRangeList)) }) + scheme.AddTypeDefaultingFunc(&Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*Namespace)) }) + scheme.AddTypeDefaultingFunc(&NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*NamespaceList)) }) + scheme.AddTypeDefaultingFunc(&Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*Node)) }) + scheme.AddTypeDefaultingFunc(&NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*NodeList)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*PersistentVolume)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*PersistentVolumeClaim)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeClaimList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaimList(obj.(*PersistentVolumeClaimList)) }) + scheme.AddTypeDefaultingFunc(&PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*PersistentVolumeList)) }) + scheme.AddTypeDefaultingFunc(&Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*Pod)) }) + scheme.AddTypeDefaultingFunc(&PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*PodAttachOptions)) }) + scheme.AddTypeDefaultingFunc(&PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*PodExecOptions)) }) + scheme.AddTypeDefaultingFunc(&PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*PodList)) }) + scheme.AddTypeDefaultingFunc(&PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*PodTemplate)) }) + scheme.AddTypeDefaultingFunc(&PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*PodTemplateList)) }) + scheme.AddTypeDefaultingFunc(&ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*ReplicationController)) }) + scheme.AddTypeDefaultingFunc(&ReplicationControllerList{}, func(obj interface{}) { SetObjectDefaults_ReplicationControllerList(obj.(*ReplicationControllerList)) }) + scheme.AddTypeDefaultingFunc(&ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*ResourceQuota)) }) + scheme.AddTypeDefaultingFunc(&ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*ResourceQuotaList)) }) + scheme.AddTypeDefaultingFunc(&Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*Secret)) }) + scheme.AddTypeDefaultingFunc(&SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*SecretList)) }) + scheme.AddTypeDefaultingFunc(&Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*Service)) }) + scheme.AddTypeDefaultingFunc(&ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*ServiceList)) }) + return nil +} + +func SetObjectDefaults_ConfigMap(in *ConfigMap) { + SetDefaults_ConfigMap(in) +} + +func SetObjectDefaults_ConfigMapList(in *ConfigMapList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ConfigMap(a) + } +} + +func SetObjectDefaults_Endpoints(in *Endpoints) { + SetDefaults_Endpoints(in) +} + +func SetObjectDefaults_EndpointsList(in *EndpointsList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Endpoints(a) + } +} + +func SetObjectDefaults_LimitRange(in *LimitRange) { + for i := range in.Spec.Limits { + a := &in.Spec.Limits[i] + SetDefaults_LimitRangeItem(a) + SetDefaults_ResourceList(&a.Max) + SetDefaults_ResourceList(&a.Min) + SetDefaults_ResourceList(&a.Default) + SetDefaults_ResourceList(&a.DefaultRequest) + SetDefaults_ResourceList(&a.MaxLimitRequestRatio) + } +} + +func SetObjectDefaults_LimitRangeList(in *LimitRangeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_LimitRange(a) + } +} + +func SetObjectDefaults_Namespace(in *Namespace) { + SetDefaults_NamespaceStatus(&in.Status) +} + +func SetObjectDefaults_NamespaceList(in *NamespaceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Namespace(a) + } +} + +func SetObjectDefaults_Node(in *Node) { + SetDefaults_Node(in) + SetDefaults_NodeStatus(&in.Status) + SetDefaults_ResourceList(&in.Status.Capacity) + SetDefaults_ResourceList(&in.Status.Allocatable) +} + +func SetObjectDefaults_NodeList(in *NodeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Node(a) + } +} + +func SetObjectDefaults_PersistentVolume(in *PersistentVolume) { + SetDefaults_PersistentVolume(in) + SetDefaults_ResourceList(&in.Spec.Capacity) + if in.Spec.PersistentVolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(in.Spec.PersistentVolumeSource.RBD) + } + if in.Spec.PersistentVolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + } + if in.Spec.PersistentVolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) + } + if in.Spec.PersistentVolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) + } +} + +func SetObjectDefaults_PersistentVolumeClaim(in *PersistentVolumeClaim) { + SetDefaults_PersistentVolumeClaim(in) + SetDefaults_ResourceList(&in.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Resources.Requests) + SetDefaults_ResourceList(&in.Status.Capacity) +} + +func SetObjectDefaults_PersistentVolumeClaimList(in *PersistentVolumeClaimList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolumeClaim(a) + } +} + +func SetObjectDefaults_PersistentVolumeList(in *PersistentVolumeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolume(a) + } +} + +func SetObjectDefaults_Pod(in *Pod) { + SetDefaults_Pod(in) + SetDefaults_PodSpec(&in.Spec) + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.InitContainers { + a := &in.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Containers { + a := &in.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodAttachOptions(in *PodAttachOptions) { + SetDefaults_PodAttachOptions(in) +} + +func SetObjectDefaults_PodExecOptions(in *PodExecOptions) { + SetDefaults_PodExecOptions(in) +} + +func SetObjectDefaults_PodList(in *PodList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Pod(a) + } +} + +func SetObjectDefaults_PodTemplate(in *PodTemplate) { + SetDefaults_PodSpec(&in.Template.Spec) + for i := range in.Template.Spec.Volumes { + a := &in.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.InitContainers { + a := &in.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Containers { + a := &in.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodTemplateList(in *PodTemplateList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodTemplate(a) + } +} + +func SetObjectDefaults_ReplicationController(in *ReplicationController) { + SetDefaults_ReplicationController(in) + if in.Spec.Template != nil { + SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + } +} + +func SetObjectDefaults_ReplicationControllerList(in *ReplicationControllerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicationController(a) + } +} + +func SetObjectDefaults_ResourceQuota(in *ResourceQuota) { + SetDefaults_ResourceList(&in.Spec.Hard) + SetDefaults_ResourceList(&in.Status.Hard) + SetDefaults_ResourceList(&in.Status.Used) +} + +func SetObjectDefaults_ResourceQuotaList(in *ResourceQuotaList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ResourceQuota(a) + } +} + +func SetObjectDefaults_Secret(in *Secret) { + SetDefaults_Secret(in) +} + +func SetObjectDefaults_SecretList(in *SecretList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Secret(a) + } +} + +func SetObjectDefaults_Service(in *Service) { + SetDefaults_ServiceSpec(&in.Spec) +} + +func SetObjectDefaults_ServiceList(in *ServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Service(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go new file mode 100644 index 000000000..c018bcc4e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/api/zz_generated.deepcopy.go @@ -0,0 +1,3527 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package api + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AWSElasticBlockStoreVolumeSource, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Affinity, InType: reflect.TypeOf(&Affinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AttachedVolume, InType: reflect.TypeOf(&AttachedVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AvoidPods, InType: reflect.TypeOf(&AvoidPods{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureDiskVolumeSource, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_AzureFileVolumeSource, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Binding, InType: reflect.TypeOf(&Binding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Capabilities, InType: reflect.TypeOf(&Capabilities{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CephFSVolumeSource, InType: reflect.TypeOf(&CephFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_CinderVolumeSource, InType: reflect.TypeOf(&CinderVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentCondition, InType: reflect.TypeOf(&ComponentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatus, InType: reflect.TypeOf(&ComponentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ComponentStatusList, InType: reflect.TypeOf(&ComponentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMap, InType: reflect.TypeOf(&ConfigMap{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapEnvSource, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapKeySelector, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapList, InType: reflect.TypeOf(&ConfigMapList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapProjection, InType: reflect.TypeOf(&ConfigMapProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConfigMapVolumeSource, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Container, InType: reflect.TypeOf(&Container{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerImage, InType: reflect.TypeOf(&ContainerImage{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerPort, InType: reflect.TypeOf(&ContainerPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerState, InType: reflect.TypeOf(&ContainerState{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateRunning, InType: reflect.TypeOf(&ContainerStateRunning{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateTerminated, InType: reflect.TypeOf(&ContainerStateTerminated{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStateWaiting, InType: reflect.TypeOf(&ContainerStateWaiting{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ContainerStatus, InType: reflect.TypeOf(&ContainerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ConversionError, InType: reflect.TypeOf(&ConversionError{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DaemonEndpoint, InType: reflect.TypeOf(&DaemonEndpoint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DeleteOptions, InType: reflect.TypeOf(&DeleteOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIProjection, InType: reflect.TypeOf(&DownwardAPIProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeFile, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_DownwardAPIVolumeSource, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EmptyDirVolumeSource, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointAddress, InType: reflect.TypeOf(&EndpointAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointPort, InType: reflect.TypeOf(&EndpointPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointSubset, InType: reflect.TypeOf(&EndpointSubset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Endpoints, InType: reflect.TypeOf(&Endpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EndpointsList, InType: reflect.TypeOf(&EndpointsList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvFromSource, InType: reflect.TypeOf(&EnvFromSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVar, InType: reflect.TypeOf(&EnvVar{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EnvVarSource, InType: reflect.TypeOf(&EnvVarSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Event, InType: reflect.TypeOf(&Event{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventList, InType: reflect.TypeOf(&EventList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_EventSource, InType: reflect.TypeOf(&EventSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ExecAction, InType: reflect.TypeOf(&ExecAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FCVolumeSource, InType: reflect.TypeOf(&FCVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlexVolumeSource, InType: reflect.TypeOf(&FlexVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_FlockerVolumeSource, InType: reflect.TypeOf(&FlockerVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GCEPersistentDiskVolumeSource, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GitRepoVolumeSource, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_GlusterfsVolumeSource, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPGetAction, InType: reflect.TypeOf(&HTTPGetAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HTTPHeader, InType: reflect.TypeOf(&HTTPHeader{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Handler, InType: reflect.TypeOf(&Handler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_HostPathVolumeSource, InType: reflect.TypeOf(&HostPathVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ISCSIVolumeSource, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_KeyToPath, InType: reflect.TypeOf(&KeyToPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Lifecycle, InType: reflect.TypeOf(&Lifecycle{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRange, InType: reflect.TypeOf(&LimitRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeItem, InType: reflect.TypeOf(&LimitRangeItem{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeList, InType: reflect.TypeOf(&LimitRangeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LimitRangeSpec, InType: reflect.TypeOf(&LimitRangeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_List, InType: reflect.TypeOf(&List{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ListOptions, InType: reflect.TypeOf(&ListOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerIngress, InType: reflect.TypeOf(&LoadBalancerIngress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LoadBalancerStatus, InType: reflect.TypeOf(&LoadBalancerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_LocalObjectReference, InType: reflect.TypeOf(&LocalObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NFSVolumeSource, InType: reflect.TypeOf(&NFSVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Namespace, InType: reflect.TypeOf(&Namespace{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceList, InType: reflect.TypeOf(&NamespaceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceSpec, InType: reflect.TypeOf(&NamespaceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NamespaceStatus, InType: reflect.TypeOf(&NamespaceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Node, InType: reflect.TypeOf(&Node{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAddress, InType: reflect.TypeOf(&NodeAddress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeAffinity, InType: reflect.TypeOf(&NodeAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeCondition, InType: reflect.TypeOf(&NodeCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeDaemonEndpoints, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeList, InType: reflect.TypeOf(&NodeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeProxyOptions, InType: reflect.TypeOf(&NodeProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeResources, InType: reflect.TypeOf(&NodeResources{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelector, InType: reflect.TypeOf(&NodeSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorRequirement, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSelectorTerm, InType: reflect.TypeOf(&NodeSelectorTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSpec, InType: reflect.TypeOf(&NodeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeStatus, InType: reflect.TypeOf(&NodeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_NodeSystemInfo, InType: reflect.TypeOf(&NodeSystemInfo{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectFieldSelector, InType: reflect.TypeOf(&ObjectFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectMeta, InType: reflect.TypeOf(&ObjectMeta{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ObjectReference, InType: reflect.TypeOf(&ObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolume, InType: reflect.TypeOf(&PersistentVolume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaim, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimList, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimSpec, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimStatus, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeClaimVolumeSource, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeList, InType: reflect.TypeOf(&PersistentVolumeList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSource, InType: reflect.TypeOf(&PersistentVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeSpec, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PersistentVolumeStatus, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PhotonPersistentDiskVolumeSource, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Pod, InType: reflect.TypeOf(&Pod{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinity, InType: reflect.TypeOf(&PodAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAffinityTerm, InType: reflect.TypeOf(&PodAffinityTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAntiAffinity, InType: reflect.TypeOf(&PodAntiAffinity{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodAttachOptions, InType: reflect.TypeOf(&PodAttachOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodCondition, InType: reflect.TypeOf(&PodCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodExecOptions, InType: reflect.TypeOf(&PodExecOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodList, InType: reflect.TypeOf(&PodList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodLogOptions, InType: reflect.TypeOf(&PodLogOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodPortForwardOptions, InType: reflect.TypeOf(&PodPortForwardOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodProxyOptions, InType: reflect.TypeOf(&PodProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSecurityContext, InType: reflect.TypeOf(&PodSecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSignature, InType: reflect.TypeOf(&PodSignature{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodSpec, InType: reflect.TypeOf(&PodSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatus, InType: reflect.TypeOf(&PodStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodStatusResult, InType: reflect.TypeOf(&PodStatusResult{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplate, InType: reflect.TypeOf(&PodTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateList, InType: reflect.TypeOf(&PodTemplateList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PodTemplateSpec, InType: reflect.TypeOf(&PodTemplateSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PortworxVolumeSource, InType: reflect.TypeOf(&PortworxVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Preconditions, InType: reflect.TypeOf(&Preconditions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferAvoidPodsEntry, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_PreferredSchedulingTerm, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Probe, InType: reflect.TypeOf(&Probe{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ProjectedVolumeSource, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_QuobyteVolumeSource, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RBDVolumeSource, InType: reflect.TypeOf(&RBDVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_RangeAllocation, InType: reflect.TypeOf(&RangeAllocation{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationController, InType: reflect.TypeOf(&ReplicationController{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerCondition, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerList, InType: reflect.TypeOf(&ReplicationControllerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerSpec, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ReplicationControllerStatus, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceFieldSelector, InType: reflect.TypeOf(&ResourceFieldSelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuota, InType: reflect.TypeOf(&ResourceQuota{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaList, InType: reflect.TypeOf(&ResourceQuotaList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaSpec, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceQuotaStatus, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ResourceRequirements, InType: reflect.TypeOf(&ResourceRequirements{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SELinuxOptions, InType: reflect.TypeOf(&SELinuxOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ScaleIOVolumeSource, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Secret, InType: reflect.TypeOf(&Secret{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretEnvSource, InType: reflect.TypeOf(&SecretEnvSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretKeySelector, InType: reflect.TypeOf(&SecretKeySelector{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretList, InType: reflect.TypeOf(&SecretList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretProjection, InType: reflect.TypeOf(&SecretProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecretVolumeSource, InType: reflect.TypeOf(&SecretVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SecurityContext, InType: reflect.TypeOf(&SecurityContext{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_SerializedReference, InType: reflect.TypeOf(&SerializedReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Service, InType: reflect.TypeOf(&Service{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccount, InType: reflect.TypeOf(&ServiceAccount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceAccountList, InType: reflect.TypeOf(&ServiceAccountList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceList, InType: reflect.TypeOf(&ServiceList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServicePort, InType: reflect.TypeOf(&ServicePort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceProxyOptions, InType: reflect.TypeOf(&ServiceProxyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceSpec, InType: reflect.TypeOf(&ServiceSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_ServiceStatus, InType: reflect.TypeOf(&ServiceStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Sysctl, InType: reflect.TypeOf(&Sysctl{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_TCPSocketAction, InType: reflect.TypeOf(&TCPSocketAction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Taint, InType: reflect.TypeOf(&Taint{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Toleration, InType: reflect.TypeOf(&Toleration{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_Volume, InType: reflect.TypeOf(&Volume{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeMount, InType: reflect.TypeOf(&VolumeMount{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeProjection, InType: reflect.TypeOf(&VolumeProjection{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VolumeSource, InType: reflect.TypeOf(&VolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_VsphereVirtualDiskVolumeSource, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_api_WeightedPodAffinityTerm, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, + ) +} + +func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AWSElasticBlockStoreVolumeSource) + out := out.(*AWSElasticBlockStoreVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Affinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Affinity) + out := out.(*Affinity) + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { + return err + } + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_AttachedVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AttachedVolume) + out := out.(*AttachedVolume) + *out = *in + return nil + } +} + +func DeepCopy_api_AvoidPods(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AvoidPods) + out := out.(*AvoidPods) + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferAvoidPodsEntry(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureDiskVolumeSource) + out := out.(*AzureDiskVolumeSource) + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_AzureFileVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*AzureFileVolumeSource) + out := out.(*AzureFileVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Binding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Binding) + out := out.(*Binding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_api_Capabilities(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Capabilities) + out := out.(*Capabilities) + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_CephFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CephFSVolumeSource) + out := out.(*CephFSVolumeSource) + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_CinderVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CinderVolumeSource) + out := out.(*CinderVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_ComponentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentCondition) + out := out.(*ComponentCondition) + *out = *in + return nil + } +} + +func DeepCopy_api_ComponentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatus) + out := out.(*ComponentStatus) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ComponentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ComponentStatusList) + out := out.(*ComponentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ComponentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ConfigMap(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMap) + out := out.(*ConfigMap) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_api_ConfigMapEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapEnvSource) + out := out.(*ConfigMapEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ConfigMapKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapKeySelector) + out := out.(*ConfigMapKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ConfigMapList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapList) + out := out.(*ConfigMapList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + if err := DeepCopy_api_ConfigMap(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ConfigMapProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapProjection) + out := out.(*ConfigMapProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ConfigMapVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConfigMapVolumeSource) + out := out.(*ConfigMapVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Container(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Container) + out := out.(*Container) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + if err := DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + if err := DeepCopy_api_Probe(*in, *out, c); err != nil { + return err + } + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { + return err + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_ContainerImage(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerImage) + out := out.(*ContainerImage) + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ContainerPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerPort) + out := out.(*ContainerPort) + *out = *in + return nil + } +} + +func DeepCopy_api_ContainerState(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerState) + out := out.(*ContainerState) + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { + return err + } + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_ContainerStateRunning(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateRunning) + out := out.(*ContainerStateRunning) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + return nil + } +} + +func DeepCopy_api_ContainerStateTerminated(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateTerminated) + out := out.(*ContainerStateTerminated) + *out = *in + out.StartedAt = in.StartedAt.DeepCopy() + out.FinishedAt = in.FinishedAt.DeepCopy() + return nil + } +} + +func DeepCopy_api_ContainerStateWaiting(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStateWaiting) + out := out.(*ContainerStateWaiting) + *out = *in + return nil + } +} + +func DeepCopy_api_ContainerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ContainerStatus) + out := out.(*ContainerStatus) + *out = *in + if err := DeepCopy_api_ContainerState(&in.State, &out.State, c); err != nil { + return err + } + if err := DeepCopy_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ConversionError(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ConversionError) + out := out.(*ConversionError) + *out = *in + // in.In is kind 'Interface' + if in.In != nil { + if newVal, err := c.DeepCopy(&in.In); err != nil { + return err + } else { + out.In = *newVal.(*interface{}) + } + } + // in.Out is kind 'Interface' + if in.Out != nil { + if newVal, err := c.DeepCopy(&in.Out); err != nil { + return err + } else { + out.Out = *newVal.(*interface{}) + } + } + return nil + } +} + +func DeepCopy_api_DaemonEndpoint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonEndpoint) + out := out.(*DaemonEndpoint) + *out = *in + return nil + } +} + +func DeepCopy_api_DeleteOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeleteOptions) + out := out.(*DeleteOptions) + *out = *in + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { + return err + } + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + return nil + } +} + +func DeepCopy_api_DownwardAPIProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIProjection) + out := out.(*DownwardAPIProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_DownwardAPIVolumeFile(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeFile) + out := out.(*DownwardAPIVolumeFile) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_DownwardAPIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DownwardAPIVolumeSource) + out := out.(*DownwardAPIVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + if err := DeepCopy_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_EmptyDirVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EmptyDirVolumeSource) + out := out.(*EmptyDirVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_EndpointAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointAddress) + out := out.(*EndpointAddress) + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_EndpointPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointPort) + out := out.(*EndpointPort) + *out = *in + return nil + } +} + +func DeepCopy_api_EndpointSubset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointSubset) + out := out.(*EndpointSubset) + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointAddress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_Endpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Endpoints) + out := out.(*Endpoints) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + if err := DeepCopy_api_EndpointSubset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_EndpointsList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EndpointsList) + out := out.(*EndpointsList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + if err := DeepCopy_api_Endpoints(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_EnvFromSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvFromSource) + out := out.(*EnvFromSource) + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + if err := DeepCopy_api_ConfigMapEnvSource(*in, *out, c); err != nil { + return err + } + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + if err := DeepCopy_api_SecretEnvSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_EnvVar(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVar) + out := out.(*EnvVar) + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_EnvVarSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EnvVarSource) + out := out.(*EnvVarSource) + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { + return err + } + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_Event(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Event) + out := out.(*Event) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + out.FirstTimestamp = in.FirstTimestamp.DeepCopy() + out.LastTimestamp = in.LastTimestamp.DeepCopy() + return nil + } +} + +func DeepCopy_api_EventList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventList) + out := out.(*EventList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + if err := DeepCopy_api_Event(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_EventSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*EventSource) + out := out.(*EventSource) + *out = *in + return nil + } +} + +func DeepCopy_api_ExecAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ExecAction) + out := out.(*ExecAction) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_FCVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FCVolumeSource) + out := out.(*FCVolumeSource) + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_FlexVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlexVolumeSource) + out := out.(*FlexVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_api_FlockerVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FlockerVolumeSource) + out := out.(*FlockerVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_GCEPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GCEPersistentDiskVolumeSource) + out := out.(*GCEPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_GitRepoVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GitRepoVolumeSource) + out := out.(*GitRepoVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_GlusterfsVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*GlusterfsVolumeSource) + out := out.(*GlusterfsVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_HTTPGetAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPGetAction) + out := out.(*HTTPGetAction) + *out = *in + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_HTTPHeader(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPHeader) + out := out.(*HTTPHeader) + *out = *in + return nil + } +} + +func DeepCopy_api_Handler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Handler) + out := out.(*Handler) + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { + return err + } + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { + return err + } + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return nil + } +} + +func DeepCopy_api_HostPathVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPathVolumeSource) + out := out.(*HostPathVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_ISCSIVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ISCSIVolumeSource) + out := out.(*ISCSIVolumeSource) + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_KeyToPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*KeyToPath) + out := out.(*KeyToPath) + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Lifecycle(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Lifecycle) + out := out.(*Lifecycle) + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + if err := DeepCopy_api_Handler(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_LimitRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRange) + out := out.(*LimitRange) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_LimitRangeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_LimitRangeItem(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeItem) + out := out.(*LimitRangeItem) + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_LimitRangeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeList) + out := out.(*LimitRangeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRange(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_LimitRangeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LimitRangeSpec) + out := out.(*LimitRangeSpec) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + if err := DeepCopy_api_LimitRangeItem(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_List(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*List) + out := out.(*List) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*runtime.Object) + } + } + } + return nil + } +} + +func DeepCopy_api_ListOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ListOptions) + out := out.(*ListOptions) + *out = *in + // in.LabelSelector is kind 'Interface' + if in.LabelSelector != nil { + if newVal, err := c.DeepCopy(&in.LabelSelector); err != nil { + return err + } else { + out.LabelSelector = *newVal.(*labels.Selector) + } + } + // in.FieldSelector is kind 'Interface' + if in.FieldSelector != nil { + if newVal, err := c.DeepCopy(&in.FieldSelector); err != nil { + return err + } else { + out.FieldSelector = *newVal.(*fields.Selector) + } + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_LoadBalancerIngress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerIngress) + out := out.(*LoadBalancerIngress) + *out = *in + return nil + } +} + +func DeepCopy_api_LoadBalancerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LoadBalancerStatus) + out := out.(*LoadBalancerStatus) + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_LocalObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalObjectReference) + out := out.(*LocalObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_api_NFSVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NFSVolumeSource) + out := out.(*NFSVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Namespace(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Namespace) + out := out.(*Namespace) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NamespaceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_NamespaceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceList) + out := out.(*NamespaceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + if err := DeepCopy_api_Namespace(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NamespaceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceSpec) + out := out.(*NamespaceSpec) + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_NamespaceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NamespaceStatus) + out := out.(*NamespaceStatus) + *out = *in + return nil + } +} + +func DeepCopy_api_Node(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Node) + out := out.(*Node) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_NodeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_NodeStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_NodeAddress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAddress) + out := out.(*NodeAddress) + *out = *in + return nil + } +} + +func DeepCopy_api_NodeAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeAffinity) + out := out.(*NodeAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { + return err + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeCondition) + out := out.(*NodeCondition) + *out = *in + out.LastHeartbeatTime = in.LastHeartbeatTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_NodeDaemonEndpoints(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeDaemonEndpoints) + out := out.(*NodeDaemonEndpoints) + *out = *in + return nil + } +} + +func DeepCopy_api_NodeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeList) + out := out.(*NodeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + if err := DeepCopy_api_Node(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeProxyOptions) + out := out.(*NodeProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_NodeResources(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeResources) + out := out.(*NodeResources) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_NodeSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelector) + out := out.(*NodeSelector) + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeSelectorRequirement(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorRequirement) + out := out.(*NodeSelectorRequirement) + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_NodeSelectorTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSelectorTerm) + out := out.(*NodeSelectorTerm) + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSpec) + out := out.(*NodeSpec) + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + if err := DeepCopy_api_Taint(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_NodeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeStatus) + out := out.(*NodeStatus) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_NodeCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerImage(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_NodeSystemInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NodeSystemInfo) + out := out.(*NodeSystemInfo) + *out = *in + return nil + } +} + +func DeepCopy_api_ObjectFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectFieldSelector) + out := out.(*ObjectFieldSelector) + *out = *in + return nil + } +} + +func DeepCopy_api_ObjectMeta(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMeta) + out := out.(*ObjectMeta) + *out = *in + out.CreationTimestamp = in.CreationTimestamp.DeepCopy() + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]v1.OwnerReference, len(*in)) + for i := range *in { + if newVal, err := c.DeepCopy(&(*in)[i]); err != nil { + return err + } else { + (*out)[i] = *newVal.(*v1.OwnerReference) + } + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectReference) + out := out.(*ObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_api_PersistentVolume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolume) + out := out.(*PersistentVolume) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaim(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaim) + out := out.(*PersistentVolumeClaim) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimList) + out := out.(*PersistentVolumeClaimList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimSpec) + out := out.(*PersistentVolumeClaimSpec) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := DeepCopy_api_ResourceRequirements(&in.Resources, &out.Resources, c); err != nil { + return err + } + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimStatus) + out := out.(*PersistentVolumeClaimStatus) + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeClaimVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeClaimVolumeSource) + out := out.(*PersistentVolumeClaimVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_PersistentVolumeList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeList) + out := out.(*PersistentVolumeList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + if err := DeepCopy_api_PersistentVolume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSource) + out := out.(*PersistentVolumeSource) + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeSpec) + out := out.(*PersistentVolumeSpec) + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if err := DeepCopy_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { + return err + } + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PersistentVolumeStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PersistentVolumeStatus) + out := out.(*PersistentVolumeStatus) + *out = *in + return nil + } +} + +func DeepCopy_api_PhotonPersistentDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PhotonPersistentDiskVolumeSource) + out := out.(*PhotonPersistentDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Pod(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Pod) + out := out.(*Pod) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PodAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinity) + out := out.(*PodAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAffinityTerm) + out := out.(*PodAffinityTerm) + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_PodAntiAffinity(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAntiAffinity) + out := out.(*PodAntiAffinity) + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + if err := DeepCopy_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodAttachOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodAttachOptions) + out := out.(*PodAttachOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_PodCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodCondition) + out := out.(*PodCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_PodExecOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodExecOptions) + out := out.(*PodExecOptions) + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_PodList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodList) + out := out.(*PodList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + if err := DeepCopy_api_Pod(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodLogOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodLogOptions) + out := out.(*PodLogOptions) + *out = *in + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PodPortForwardOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPortForwardOptions) + out := out.(*PodPortForwardOptions) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_PodProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodProxyOptions) + out := out.(*PodProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_PodSecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityContext) + out := out.(*PodSecurityContext) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PodSignature(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSignature) + out := out.(*PodSignature) + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.OwnerReference) + } + } + return nil + } +} + +func DeepCopy_api_PodSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSpec) + out := out.(*PodSpec) + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + if err := DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + if err := DeepCopy_api_Container(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { + return err + } + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + if err := DeepCopy_api_Affinity(*in, *out, c); err != nil { + return err + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + if err := DeepCopy_api_Toleration(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatus) + out := out.(*PodStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + if err := DeepCopy_api_ContainerStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodStatusResult(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodStatusResult) + out := out.(*PodStatusResult) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PodTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplate) + out := out.(*PodTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PodTemplateList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateList) + out := out.(*PodTemplateList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + if err := DeepCopy_api_PodTemplate(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_PodTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodTemplateSpec) + out := out.(*PodTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_PodSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_PortworxVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PortworxVolumeSource) + out := out.(*PortworxVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_Preconditions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Preconditions) + out := out.(*Preconditions) + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return nil + } +} + +func DeepCopy_api_PreferAvoidPodsEntry(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferAvoidPodsEntry) + out := out.(*PreferAvoidPodsEntry) + *out = *in + if err := DeepCopy_api_PodSignature(&in.PodSignature, &out.PodSignature, c); err != nil { + return err + } + out.EvictionTime = in.EvictionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_PreferredSchedulingTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PreferredSchedulingTerm) + out := out.(*PreferredSchedulingTerm) + *out = *in + if err := DeepCopy_api_NodeSelectorTerm(&in.Preference, &out.Preference, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_Probe(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Probe) + out := out.(*Probe) + *out = *in + if err := DeepCopy_api_Handler(&in.Handler, &out.Handler, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ProjectedVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ProjectedVolumeSource) + out := out.(*ProjectedVolumeSource) + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + if err := DeepCopy_api_VolumeProjection(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_api_QuobyteVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*QuobyteVolumeSource) + out := out.(*QuobyteVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_RBDVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RBDVolumeSource) + out := out.(*RBDVolumeSource) + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_RangeAllocation(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RangeAllocation) + out := out.(*RangeAllocation) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ReplicationController(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationController) + out := out.(*ReplicationController) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ReplicationControllerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ReplicationControllerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ReplicationControllerCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerCondition) + out := out.(*ReplicationControllerCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_api_ReplicationControllerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerList) + out := out.(*ReplicationControllerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationController(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ReplicationControllerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerSpec) + out := out.(*ReplicationControllerSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_ReplicationControllerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerStatus) + out := out.(*ReplicationControllerStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + if err := DeepCopy_api_ReplicationControllerCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ResourceFieldSelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceFieldSelector) + out := out.(*ResourceFieldSelector) + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return nil + } +} + +func DeepCopy_api_ResourceQuota(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuota) + out := out.(*ResourceQuota) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ResourceQuotaSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ResourceQuotaStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ResourceQuotaList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaList) + out := out.(*ResourceQuotaList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + if err := DeepCopy_api_ResourceQuota(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ResourceQuotaSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaSpec) + out := out.(*ResourceQuotaSpec) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ResourceQuotaStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceQuotaStatus) + out := out.(*ResourceQuotaStatus) + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_ResourceRequirements(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceRequirements) + out := out.(*ResourceRequirements) + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} + +func DeepCopy_api_SELinuxOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxOptions) + out := out.(*SELinuxOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_ScaleIOVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleIOVolumeSource) + out := out.(*ScaleIOVolumeSource) + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Secret(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Secret) + out := out.(*Secret) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*[]byte) + } + } + } + return nil + } +} + +func DeepCopy_api_SecretEnvSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretEnvSource) + out := out.(*SecretEnvSource) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecretKeySelector(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretKeySelector) + out := out.(*SecretKeySelector) + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecretList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretList) + out := out.(*SecretList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + if err := DeepCopy_api_Secret(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_SecretProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretProjection) + out := out.(*SecretProjection) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecretVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecretVolumeSource) + out := out.(*SecretVolumeSource) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + if err := DeepCopy_api_KeyToPath(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SecurityContext(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SecurityContext) + out := out.(*SecurityContext) + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { + return err + } + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_SerializedReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SerializedReference) + out := out.(*SerializedReference) + *out = *in + return nil + } +} + +func DeepCopy_api_Service(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Service) + out := out.(*Service) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_api_ServiceSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_api_ServiceStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_ServiceAccount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccount) + out := out.(*ServiceAccount) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return nil + } +} + +func DeepCopy_api_ServiceAccountList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceAccountList) + out := out.(*ServiceAccountList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + if err := DeepCopy_api_ServiceAccount(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ServiceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceList) + out := out.(*ServiceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + if err := DeepCopy_api_Service(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_api_ServicePort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServicePort) + out := out.(*ServicePort) + *out = *in + return nil + } +} + +func DeepCopy_api_ServiceProxyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceProxyOptions) + out := out.(*ServiceProxyOptions) + *out = *in + return nil + } +} + +func DeepCopy_api_ServiceSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceSpec) + out := out.(*ServiceSpec) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_api_ServiceStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ServiceStatus) + out := out.(*ServiceStatus) + *out = *in + if err := DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_Sysctl(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Sysctl) + out := out.(*Sysctl) + *out = *in + return nil + } +} + +func DeepCopy_api_TCPSocketAction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TCPSocketAction) + out := out.(*TCPSocketAction) + *out = *in + return nil + } +} + +func DeepCopy_api_Taint(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Taint) + out := out.(*Taint) + *out = *in + out.TimeAdded = in.TimeAdded.DeepCopy() + return nil + } +} + +func DeepCopy_api_Toleration(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Toleration) + out := out.(*Toleration) + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return nil + } +} + +func DeepCopy_api_Volume(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Volume) + out := out.(*Volume) + *out = *in + if err := DeepCopy_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_api_VolumeMount(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeMount) + out := out.(*VolumeMount) + *out = *in + return nil + } +} + +func DeepCopy_api_VolumeProjection(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeProjection) + out := out.(*VolumeProjection) + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + if err := DeepCopy_api_SecretProjection(*in, *out, c); err != nil { + return err + } + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + if err := DeepCopy_api_DownwardAPIProjection(*in, *out, c); err != nil { + return err + } + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + if err := DeepCopy_api_ConfigMapProjection(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_VolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VolumeSource) + out := out.(*VolumeSource) + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + **out = **in + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + **out = **in + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + if err := DeepCopy_api_AzureDiskVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + if err := DeepCopy_api_ProjectedVolumeSource(*in, *out, c); err != nil { + return err + } + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + if err := DeepCopy_api_ScaleIOVolumeSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_api_VsphereVirtualDiskVolumeSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*VsphereVirtualDiskVolumeSource) + out := out.(*VsphereVirtualDiskVolumeSource) + *out = *in + return nil + } +} + +func DeepCopy_api_WeightedPodAffinityTerm(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*WeightedPodAffinityTerm) + out := out.(*WeightedPodAffinityTerm) + *out = *in + if err := DeepCopy_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS b/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS new file mode 100755 index 000000000..1cdc56eec --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/OWNERS @@ -0,0 +1,21 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- bprashanth +- pmorie +- sttts +- saad-ali +- ncdc +- timstclair +- timothysc +- dims +- errordeveloper +- mml +- m1093782566 +- mbohlool +- david-mcmahon +- kevin-wangzefeng +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/doc.go b/vendor/k8s.io/client-go/pkg/apis/apps/doc.go new file mode 100644 index 000000000..d27cee51c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go b/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go new file mode 100644 index 000000000..ca50f3ea4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the apps API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/apps" + "k8s.io/client-go/pkg/apis/apps/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: apps.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/apps", + AddInternalObjectsToScheme: apps.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/register.go b/vendor/k8s.io/client-go/pkg/apis/apps/register.go new file mode 100644 index 000000000..d1d3bab26 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/apis/extensions" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &extensions.Deployment{}, + &extensions.DeploymentList{}, + &extensions.DeploymentRollback{}, + &extensions.Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/types.go b/vendor/k8s.io/client-go/pkg/apis/apps/types.go new file mode 100644 index 000000000..cd5ce8284 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/types.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// +genclient=true + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas int32 + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template api.PodTemplateSpec + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []api.PersistentVolumeClaim + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // most recent generation observed by this StatefulSet. + // +optional + ObservedGeneration *int64 + + // Replicas is the number of actual replicas. + Replicas int32 +} + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []StatefulSet +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go new file mode 100644 index 000000000..96d3330f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/conversion.go @@ -0,0 +1,297 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/apps" + "k8s.io/client-go/pkg/apis/extensions" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions to handle the *int32 -> int32 + // conversion. A pointer is useful in the versioned type so we can default + // it, but a plain int32 is more convenient in the internal type. These + // functions are the same as the autogenerated ones in every other way. + err := scheme.AddConversionFuncs( + Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, + // extensions + // TODO: below conversions should be dropped in favor of auto-generated + // ones, see https://github.com/kubernetes/kubernetextensionsssues/39865 + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + err = scheme.AddFieldLabelConversionFunc("apps/v1beta1", "StatefulSet", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported for StatefulSet: %s", label) + } + }) + if err != nil { + return err + } + err = api.Scheme.AddFieldLabelConversionFunc("apps/v1beta1", "Deployment", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for Deployment", label) + } + }) + if err != nil { + return err + } + + return nil +} + +func Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + if err := s.Convert(*in, *out, 0); err != nil { + return err + } + } else { + out.Selector = nil + } + if err := s.Convert(&in.Template, &out.Template, 0); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.VolumeClaimTemplates = nil + } + out.ServiceName = in.ServiceName + return nil +} + +func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = int32(in.Replicas) + + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + return nil +} + +func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + return nil +} + +func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo.Revision = in.RollbackTo.Revision + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) + } + out.MinReadySeconds = int32(in.MinReadySeconds) + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(RollbackConfig) + out.RollbackTo.Revision = int64(in.RollbackTo.Revision) + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if out.MaxSurge == nil { + out.MaxSurge = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go new file mode 100644 index 000000000..004cecd3f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/defaults.go @@ -0,0 +1,103 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_StatefulSet, + SetDefaults_Deployment, + ) +} + +func SetDefaults_StatefulSet(obj *StatefulSet) { + labels := obj.Spec.Template.Labels + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} + +// SetDefaults_Deployment sets additional defaults compared to its counterpart +// in extensions. These addons are: +// - MaxUnavailable during rolling update set to 25% (1 in extensions) +// - MaxSurge value during rolling update set to 25% (1 in extensions) +// - RevisionHistoryLimit set to 2 (not set in extensions) +// - ProgressDeadlineSeconds set to 600s (not set in extensions) +func SetDefaults_Deployment(obj *Deployment) { + // Default labels and selector to labels from pod template spec. + labels := obj.Spec.Template.Labels + + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = RollingUpdateDeploymentStrategyType + } + if strategy.Type == RollingUpdateDeploymentStrategyType { + if strategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 25% by default. + maxUnavailable := intstr.FromString("25%") + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 25% by default. + maxSurge := intstr.FromString("25%") + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } + if obj.Spec.RevisionHistoryLimit == nil { + obj.Spec.RevisionHistoryLimit = new(int32) + *obj.Spec.RevisionHistoryLimit = 2 + } + if obj.Spec.ProgressDeadlineSeconds == nil { + obj.Spec.ProgressDeadlineSeconds = new(int32) + *obj.Spec.ProgressDeadlineSeconds = 600 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go new file mode 100644 index 000000000..a397b30e9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go new file mode 100644 index 000000000..3e215241b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.pb.go @@ -0,0 +1,3939 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/apps/v1beta1/generated.proto + + It has these top-level messages: + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + RollbackConfig + RollingUpdateDeployment + Scale + ScaleSpec + ScaleStatus + StatefulSet + StatefulSetList + StatefulSetSpec + StatefulSetStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func init() { + proto.RegisterType((*Deployment)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.DeploymentStrategy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.ScaleStatus") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.client-go.pkg.apis.apps.v1beta1.StatefulSetStatus") +} +func (m *Deployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Deployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *DeploymentList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + for k := range m.UpdatedAnnotations { + data[i] = 0x12 + i++ + v := m.UpdatedAnnotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n7, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n9, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n10, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + } + data[i] = 0x38 + i++ + if m.Paused { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RollbackTo != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n11, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.ProgressDeadlineSeconds != nil { + data[i] = 0x48 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *RollbackConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n13, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.MaxSurge != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n14, err := m.MaxSurge.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n16, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n17, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil +} + +func (m *StatefulSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n18, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n19, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n20, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func (m *StatefulSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n21, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n22, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n23, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollbackConfig) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Deployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1525 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x58, 0xcb, 0x6f, 0x5b, 0xc5, + 0x17, 0xce, 0x4d, 0xec, 0xc4, 0x99, 0x34, 0x4e, 0x33, 0xc9, 0xaf, 0xf1, 0x2f, 0x45, 0x4e, 0xe5, + 0x45, 0x1f, 0xa8, 0xbd, 0xa6, 0x69, 0xa1, 0x8f, 0x40, 0x45, 0xdc, 0x96, 0x52, 0x94, 0xd0, 0x6a, + 0xec, 0x54, 0xb4, 0x14, 0x89, 0xb1, 0x3d, 0xbd, 0x9d, 0xfa, 0xbe, 0x74, 0x67, 0x6c, 0xc5, 0x3b, + 0x36, 0x2c, 0x90, 0x58, 0xb0, 0x62, 0x87, 0xd8, 0x23, 0x24, 0x76, 0xfc, 0x0d, 0x11, 0x6c, 0xba, + 0x44, 0x2c, 0x22, 0xe2, 0xfe, 0x17, 0x5d, 0xa1, 0x99, 0x3b, 0xf7, 0xe5, 0x6b, 0x27, 0x8e, 0x11, + 0xdd, 0xb0, 0xf3, 0x9d, 0x39, 0xdf, 0x77, 0xce, 0xcc, 0x7c, 0xe7, 0xcc, 0x19, 0x83, 0x6b, 0xad, + 0xeb, 0x4c, 0xa7, 0x4e, 0xb9, 0xd5, 0xae, 0x13, 0xcf, 0x26, 0x9c, 0xb0, 0xb2, 0xdb, 0x32, 0xca, + 0xd8, 0xa5, 0xac, 0x8c, 0x5d, 0x97, 0x95, 0x3b, 0x97, 0xeb, 0x84, 0xe3, 0xcb, 0x65, 0x83, 0xd8, + 0xc4, 0xc3, 0x9c, 0x34, 0x75, 0xd7, 0x73, 0xb8, 0x03, 0xcf, 0xf9, 0x40, 0x3d, 0x02, 0xea, 0x6e, + 0xcb, 0xd0, 0x05, 0x50, 0x17, 0x40, 0x5d, 0x01, 0x57, 0x2f, 0x19, 0x94, 0x3f, 0x6f, 0xd7, 0xf5, + 0x86, 0x63, 0x95, 0x0d, 0xc7, 0x70, 0xca, 0x12, 0x5f, 0x6f, 0x3f, 0x93, 0x5f, 0xf2, 0x43, 0xfe, + 0xf2, 0x79, 0x57, 0xaf, 0xaa, 0x80, 0xb0, 0x4b, 0x2d, 0xdc, 0x78, 0x4e, 0x6d, 0xe2, 0x75, 0xa3, + 0x90, 0x2c, 0xc2, 0x71, 0xb9, 0x93, 0x8a, 0x66, 0xb5, 0x3c, 0x0c, 0xe5, 0xb5, 0x6d, 0x4e, 0x2d, + 0x92, 0x02, 0xbc, 0x77, 0x14, 0x80, 0x35, 0x9e, 0x13, 0x0b, 0xa7, 0x70, 0x57, 0x86, 0xe1, 0xda, + 0x9c, 0x9a, 0x65, 0x6a, 0x73, 0xc6, 0xbd, 0x14, 0x28, 0xb6, 0x26, 0x46, 0xbc, 0x0e, 0xf1, 0xa2, + 0x05, 0x91, 0x5d, 0x6c, 0xb9, 0x26, 0x19, 0xb4, 0xa6, 0x8b, 0x43, 0x8f, 0x66, 0x90, 0xf5, 0x07, + 0x87, 0x1c, 0x24, 0xd9, 0xe5, 0xc4, 0x66, 0xd4, 0xb1, 0x87, 0x1e, 0x67, 0xe9, 0xe7, 0x49, 0x00, + 0xee, 0x10, 0xd7, 0x74, 0xba, 0x16, 0xb1, 0x39, 0xfc, 0x12, 0xe4, 0xc4, 0x56, 0x37, 0x31, 0xc7, + 0x05, 0xed, 0x8c, 0x76, 0x7e, 0x6e, 0xfd, 0x1d, 0x5d, 0x1d, 0x78, 0x7c, 0xe5, 0xd1, 0x91, 0x0b, + 0x6b, 0xbd, 0x73, 0x59, 0x7f, 0x50, 0x7f, 0x41, 0x1a, 0x7c, 0x9b, 0x70, 0x5c, 0x81, 0x7b, 0xfb, + 0x6b, 0x13, 0xbd, 0xfd, 0x35, 0x10, 0x8d, 0xa1, 0x90, 0x15, 0x3e, 0x06, 0x19, 0xe6, 0x92, 0x46, + 0x61, 0x52, 0xb2, 0x5f, 0xd3, 0x47, 0x94, 0x93, 0x1e, 0x05, 0x59, 0x75, 0x49, 0xa3, 0x72, 0x42, + 0x39, 0xc9, 0x88, 0x2f, 0x24, 0x29, 0x21, 0x06, 0xd3, 0x8c, 0x63, 0xde, 0x66, 0x85, 0x29, 0x49, + 0x7e, 0x63, 0x1c, 0x72, 0x49, 0x50, 0xc9, 0x2b, 0xfa, 0x69, 0xff, 0x1b, 0x29, 0xe2, 0xd2, 0xc1, + 0x14, 0x58, 0x8a, 0x8c, 0x6f, 0x3b, 0x76, 0x93, 0x72, 0xea, 0xd8, 0x70, 0x03, 0x64, 0x78, 0xd7, + 0x25, 0x72, 0xcf, 0x66, 0x2b, 0xe7, 0x82, 0xe0, 0x6a, 0x5d, 0x97, 0xbc, 0xde, 0x5f, 0x5b, 0x19, + 0x00, 0x11, 0x53, 0x48, 0x82, 0xe0, 0xa3, 0x30, 0xee, 0x49, 0x09, 0xbf, 0x95, 0x74, 0xfe, 0x7a, + 0x7f, 0xed, 0x50, 0x49, 0xe8, 0x21, 0x67, 0x32, 0x58, 0x78, 0x16, 0x4c, 0x7b, 0x04, 0x33, 0xc7, + 0x2e, 0x64, 0x24, 0x6f, 0xb8, 0x28, 0x24, 0x47, 0x91, 0x9a, 0x85, 0x17, 0xc0, 0x8c, 0x45, 0x18, + 0xc3, 0x06, 0x29, 0x64, 0xa5, 0xe1, 0x82, 0x32, 0x9c, 0xd9, 0xf6, 0x87, 0x51, 0x30, 0x0f, 0x5f, + 0x80, 0xbc, 0x89, 0x19, 0xdf, 0x71, 0x9b, 0x98, 0x93, 0x1a, 0xb5, 0x48, 0x61, 0x5a, 0x6e, 0xf5, + 0xdb, 0xa3, 0xa9, 0x44, 0x20, 0x2a, 0xa7, 0x14, 0x7b, 0x7e, 0x2b, 0xc1, 0x84, 0xfa, 0x98, 0x61, + 0x07, 0x40, 0x31, 0x52, 0xf3, 0xb0, 0xcd, 0xfc, 0x2d, 0x13, 0xfe, 0x66, 0x8e, 0xed, 0x6f, 0x55, + 0xf9, 0x83, 0x5b, 0x29, 0x36, 0x34, 0xc0, 0x43, 0x69, 0x4f, 0x03, 0xf9, 0xe8, 0xc0, 0xb6, 0x28, + 0xe3, 0xf0, 0x69, 0x2a, 0x2d, 0xf4, 0xd1, 0x02, 0x10, 0x68, 0x99, 0x14, 0x27, 0x55, 0x10, 0xb9, + 0x60, 0x24, 0x96, 0x12, 0x9f, 0x81, 0x2c, 0xe5, 0xc4, 0x12, 0xc7, 0x3f, 0x75, 0x7e, 0x6e, 0xfd, + 0xca, 0x18, 0xb2, 0xad, 0xcc, 0x2b, 0xfe, 0xec, 0x7d, 0xc1, 0x84, 0x7c, 0xc2, 0xd2, 0xb7, 0x53, + 0x00, 0x46, 0x46, 0xc8, 0x31, 0xcd, 0x3a, 0x6e, 0xb4, 0xe0, 0x19, 0x90, 0xb1, 0xb1, 0x15, 0xa8, + 0x35, 0x4c, 0xa5, 0x4f, 0xb1, 0x45, 0x90, 0x9c, 0x81, 0x3f, 0x6a, 0x00, 0xb6, 0xe5, 0x51, 0x34, + 0x37, 0x6d, 0xdb, 0xe1, 0x58, 0xec, 0x4e, 0x10, 0x60, 0x75, 0x8c, 0x00, 0x03, 0xdf, 0xfa, 0x4e, + 0x8a, 0xf5, 0xae, 0xcd, 0xbd, 0x6e, 0x74, 0x4a, 0x69, 0x03, 0x34, 0x20, 0x14, 0xd8, 0x02, 0xc0, + 0x53, 0x9c, 0x35, 0x47, 0x25, 0xfc, 0xe8, 0xd5, 0x24, 0x08, 0xe7, 0xb6, 0x63, 0x3f, 0xa3, 0x46, + 0x54, 0xb2, 0x50, 0x48, 0x89, 0x62, 0xf4, 0xab, 0x77, 0xc1, 0xca, 0x90, 0xb8, 0xe1, 0x49, 0x30, + 0xd5, 0x22, 0x5d, 0x7f, 0x2b, 0x91, 0xf8, 0x09, 0x97, 0x41, 0xb6, 0x83, 0xcd, 0x36, 0xf1, 0xb3, + 0x19, 0xf9, 0x1f, 0x37, 0x27, 0xaf, 0x6b, 0xa5, 0x3f, 0xb3, 0x71, 0x65, 0x89, 0xca, 0x05, 0xcf, + 0x83, 0x9c, 0x47, 0x5c, 0x93, 0x36, 0x30, 0x93, 0x1c, 0xd9, 0xca, 0x09, 0xa1, 0x12, 0xa4, 0xc6, + 0x50, 0x38, 0x0b, 0xbf, 0x00, 0x39, 0x46, 0x4c, 0xd2, 0xe0, 0x8e, 0xa7, 0x8a, 0xe7, 0x95, 0x11, + 0x35, 0x88, 0xeb, 0xc4, 0xac, 0x2a, 0xa8, 0x4f, 0x1f, 0x7c, 0xa1, 0x90, 0x12, 0x7e, 0x0e, 0x72, + 0x9c, 0x58, 0xae, 0x89, 0x39, 0x51, 0xbb, 0x79, 0x69, 0xf8, 0x6e, 0x0a, 0xda, 0x87, 0x4e, 0xb3, + 0xa6, 0x00, 0xb2, 0x22, 0x87, 0x0a, 0x0f, 0x46, 0x51, 0x48, 0x08, 0x29, 0xc8, 0x31, 0x2e, 0xae, + 0x1d, 0xa3, 0x2b, 0x6b, 0xd1, 0xdc, 0xfa, 0xc6, 0x58, 0xb5, 0xd9, 0xa7, 0x88, 0x5c, 0x05, 0x23, + 0x28, 0xa4, 0x87, 0x9b, 0x60, 0xc1, 0xa2, 0x36, 0x22, 0xb8, 0xd9, 0xad, 0x92, 0x86, 0x63, 0x37, + 0x99, 0x2c, 0x6a, 0xd9, 0xca, 0x8a, 0x02, 0x2d, 0x6c, 0x27, 0xa7, 0x51, 0xbf, 0x3d, 0xdc, 0x02, + 0xcb, 0x1e, 0xe9, 0x50, 0x71, 0x71, 0x7e, 0x4c, 0x19, 0x77, 0xbc, 0xee, 0x16, 0xb5, 0x28, 0x97, + 0xa5, 0x2e, 0x5b, 0x29, 0xf4, 0xf6, 0xd7, 0x96, 0xd1, 0x80, 0x79, 0x34, 0x10, 0x25, 0xaa, 0xb0, + 0x8b, 0xdb, 0x8c, 0x34, 0x65, 0xe9, 0xca, 0x45, 0x55, 0xf8, 0xa1, 0x1c, 0x45, 0x6a, 0x16, 0x1a, + 0x09, 0x41, 0xe7, 0xfe, 0x99, 0xa0, 0xf3, 0xc3, 0xc5, 0x0c, 0x77, 0xc0, 0x8a, 0xeb, 0x39, 0x86, + 0x47, 0x18, 0xbb, 0x43, 0x70, 0xd3, 0xa4, 0x36, 0x09, 0x76, 0x6a, 0x56, 0xae, 0xf0, 0x74, 0x6f, + 0x7f, 0x6d, 0xe5, 0xe1, 0x60, 0x13, 0x34, 0x0c, 0x5b, 0xfa, 0x3e, 0x03, 0x4e, 0xf6, 0xdf, 0xa3, + 0xf0, 0x13, 0x00, 0x9d, 0xba, 0xec, 0x7d, 0x9a, 0xf7, 0xfc, 0xce, 0x83, 0x3a, 0xb6, 0x14, 0xfa, + 0x54, 0x94, 0xf1, 0x0f, 0x52, 0x16, 0x68, 0x00, 0x0a, 0x5e, 0x8c, 0xa5, 0xca, 0xa4, 0x0c, 0x34, + 0xd4, 0xc1, 0x80, 0x74, 0xd9, 0x04, 0x0b, 0xaa, 0x6a, 0x04, 0x93, 0x52, 0xd6, 0x31, 0x1d, 0xec, + 0x24, 0xa7, 0x51, 0xbf, 0x3d, 0xbc, 0x07, 0x16, 0x71, 0x07, 0x53, 0x13, 0xd7, 0x4d, 0x12, 0x92, + 0x64, 0x24, 0xc9, 0xff, 0x15, 0xc9, 0xe2, 0x66, 0xbf, 0x01, 0x4a, 0x63, 0xe0, 0x36, 0x58, 0x6a, + 0xdb, 0x69, 0x2a, 0x5f, 0x97, 0xa7, 0x15, 0xd5, 0xd2, 0x4e, 0xda, 0x04, 0x0d, 0xc2, 0x41, 0x17, + 0x80, 0x46, 0x70, 0xe5, 0xb3, 0xc2, 0xb4, 0xac, 0xc9, 0xef, 0x8f, 0x91, 0x4f, 0x61, 0xdf, 0x10, + 0xd5, 0xbf, 0x70, 0x88, 0xa1, 0x98, 0x0f, 0xb8, 0x01, 0xe6, 0x3d, 0x91, 0x21, 0x61, 0xe8, 0x33, + 0x32, 0xf4, 0xff, 0x29, 0xd8, 0x3c, 0x8a, 0x4f, 0xa2, 0xa4, 0x6d, 0xe9, 0x77, 0x2d, 0x7e, 0x09, + 0x05, 0x29, 0x0b, 0x6f, 0x26, 0x5a, 0xa6, 0xb3, 0x7d, 0x2d, 0xd3, 0xa9, 0x34, 0x22, 0xd6, 0x31, + 0x75, 0xc1, 0xbc, 0x10, 0x34, 0xb5, 0x0d, 0xff, 0x10, 0x55, 0x41, 0xfc, 0xf0, 0x58, 0xe9, 0x12, + 0xa2, 0x63, 0xd7, 0xe8, 0xa2, 0x5c, 0x4d, 0x7c, 0x12, 0x25, 0x3d, 0x95, 0x6e, 0x81, 0x7c, 0x32, + 0xd7, 0x7c, 0x5d, 0xfa, 0x89, 0xaf, 0x94, 0x1d, 0xd3, 0xa5, 0x3f, 0x8e, 0x42, 0x8b, 0xd2, 0x2b, + 0x0d, 0xac, 0x0c, 0xf1, 0x0e, 0x4d, 0x90, 0xb7, 0xf0, 0x6e, 0x4c, 0x07, 0x47, 0xf6, 0xe0, 0xe2, + 0xf5, 0xa1, 0xfb, 0xaf, 0x0f, 0xfd, 0xbe, 0xcd, 0x1f, 0x78, 0x55, 0xee, 0x51, 0xdb, 0xa8, 0x40, + 0xd1, 0x5f, 0x6d, 0x27, 0xb8, 0x50, 0x1f, 0x37, 0x7c, 0x02, 0x72, 0x16, 0xde, 0xad, 0xb6, 0x3d, + 0x23, 0xd8, 0xbf, 0xe3, 0xfb, 0x91, 0xb7, 0xc9, 0xb6, 0x62, 0x41, 0x21, 0x5f, 0xe9, 0x87, 0x49, + 0x90, 0xad, 0x36, 0xb0, 0x49, 0xde, 0xc0, 0x8b, 0xa2, 0x96, 0x78, 0x51, 0xac, 0x8f, 0xac, 0x01, + 0x19, 0xdf, 0xd0, 0xc7, 0xc4, 0xd3, 0xbe, 0xc7, 0xc4, 0xd5, 0x63, 0xf2, 0x1e, 0xfe, 0x8e, 0xb8, + 0x01, 0x66, 0x43, 0xf7, 0x89, 0xc2, 0xa6, 0x1d, 0x55, 0xd8, 0x4a, 0x3f, 0x4d, 0x82, 0xb9, 0x98, + 0x8b, 0xe3, 0xa1, 0xa1, 0x9b, 0xe8, 0x22, 0x44, 0xe5, 0xa8, 0x8c, 0xb3, 0x30, 0x3d, 0xe8, 0x20, + 0xfc, 0xe6, 0x2d, 0xba, 0x90, 0xd3, 0x8d, 0xc5, 0x2d, 0x90, 0xe7, 0xd8, 0x33, 0x08, 0x0f, 0xe6, + 0xe4, 0x86, 0xce, 0x46, 0xcf, 0x80, 0x5a, 0x62, 0x16, 0xf5, 0x59, 0xaf, 0x6e, 0x80, 0xf9, 0x84, + 0xb3, 0x63, 0x75, 0x5c, 0xbf, 0x88, 0xcd, 0xe2, 0x98, 0x93, 0x67, 0x6d, 0xb3, 0x4a, 0xde, 0xc4, + 0xfb, 0xf6, 0x49, 0x42, 0x8d, 0xd7, 0x47, 0xdf, 0xdc, 0x28, 0xca, 0xa1, 0x9a, 0xac, 0xf7, 0x69, + 0xf2, 0xe6, 0x58, 0xec, 0x87, 0x2b, 0xf3, 0x37, 0x0d, 0x2c, 0xc4, 0xac, 0xdf, 0xc0, 0xf3, 0xe7, + 0x71, 0xf2, 0xf9, 0x73, 0x75, 0x9c, 0x45, 0x0d, 0x79, 0xff, 0xfc, 0x3a, 0x95, 0x58, 0xcc, 0x7f, + 0xa8, 0xe3, 0xfe, 0x5a, 0x03, 0xcb, 0x1d, 0xc7, 0x6c, 0x5b, 0xe4, 0xb6, 0x89, 0xa9, 0x15, 0x58, + 0x88, 0xfe, 0xe5, 0x88, 0x37, 0xa6, 0xf4, 0x44, 0x3c, 0x46, 0x19, 0x27, 0x36, 0x7f, 0x14, 0x71, + 0x54, 0xde, 0x52, 0xfe, 0x96, 0x1f, 0x0d, 0x20, 0x46, 0x03, 0xdd, 0xc1, 0x77, 0xc1, 0x9c, 0x68, + 0xe4, 0x68, 0x83, 0x88, 0xd7, 0xa5, 0xfa, 0x7f, 0x61, 0x49, 0x11, 0xcd, 0x55, 0xa3, 0x29, 0x14, + 0xb7, 0x2b, 0x7d, 0xa3, 0x81, 0xc5, 0x94, 0x66, 0xe1, 0x47, 0x87, 0x74, 0x93, 0xa7, 0xfe, 0xad, + 0x4e, 0xb2, 0x72, 0x61, 0xef, 0xa0, 0x38, 0xf1, 0xf2, 0xa0, 0x38, 0xf1, 0xc7, 0x41, 0x71, 0xe2, + 0xab, 0x5e, 0x51, 0xdb, 0xeb, 0x15, 0xb5, 0x97, 0xbd, 0xa2, 0xf6, 0x57, 0xaf, 0xa8, 0x7d, 0xf7, + 0xaa, 0x38, 0xf1, 0x64, 0x46, 0x29, 0xf2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xb9, 0xde, + 0x1a, 0x56, 0x15, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto new file mode 100644 index 000000000..8ca77b2e6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/generated.proto @@ -0,0 +1,342 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.apps.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map<string, string> updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + repeated DeploymentCondition conditions = 6; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +message RollbackConfig { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map<string, string> selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string targetSelector = 3; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // most recent generation observed by this StatefulSet. + // +optional + optional int64 observedGeneration = 1; + + // Replicas is the number of actual replicas. + optional int32 replicas = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go new file mode 100644 index 000000000..6e618e1d8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go new file mode 100644 index 000000000..409504120 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.generated.go @@ -0,0 +1,6485 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg4_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg3_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg4_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg3_v1.PodTemplateSpec + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.TargetSelector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "targetSelector": + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv8 := &x.TargetSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv13 := &x.Selector + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv15 := &x.TargetSelector + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = StatefulSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = StatefulSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = StatefulSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = StatefulSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = len(x.VolumeClaimTemplates) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.VolumeClaimTemplates == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSlicev1_PersistentVolumeClaim(([]pkg3_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "volumeClaimTemplates": + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv9 := &x.VolumeClaimTemplates + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv9), d) + } + } + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv11 := &x.ServiceName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.VolumeClaimTemplates = nil + } else { + yyv19 := &x.VolumeClaimTemplates + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicev1_PersistentVolumeClaim((*[]pkg3_v1.PersistentVolumeClaim)(yyv19), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv21 := &x.ServiceName + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StatefulSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceStatefulSet(([]StatefulSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StatefulSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StatefulSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceStatefulSet((*[]StatefulSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StatefulSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceStatefulSet((*[]StatefulSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil + yyq2[8] = x.ProgressDeadlineSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy19 := &x.Strategy + yy19.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.RevisionHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paused")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy36 := *x.ProgressDeadlineSeconds + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeInt(int64(yy36)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy38 := *x.ProgressDeadlineSeconds + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeInt(int64(yy38)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "strategy": + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv9 := &x.Strategy + yyv9.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv10 := &x.MinReadySeconds + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "revisionHistoryLimit": + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv14 := &x.Paused + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + case "progressDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv25 := &x.Strategy + yyv25.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv26 := &x.MinReadySeconds + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*int32)(yyv26)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv30 := &x.Paused + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[3] = len(x.UpdatedAnnotations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy16 := &x.RollbackTo + yy16.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.RollbackTo + yy18.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "updatedAnnotations": + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv10 := &x.UpdatedAnnotations + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv12 := &x.RollbackTo + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv18 := &x.Name + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv20 := &x.UpdatedAnnotations + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecMapStringStringX(yyv20, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv22 := &x.RollbackTo + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Revision != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "revision": + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv4 := &x.Revision + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv7 := &x.Revision + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + yyq2[1] = x.MaxSurge != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + case "maxSurge": + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.ReadyReplicas != 0 + yyq2[4] = x.AvailableReplicas != 0 + yyq2[5] = x.UnavailableReplicas != 0 + yyq2[6] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "updatedReplicas": + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv8 := &x.UpdatedReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv10 := &x.ReadyReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv12 := &x.AvailableReplicas + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "unavailableReplicas": + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv14 := &x.UnavailableReplicas + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv16 := &x.Conditions + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv19 := &x.ObservedGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv21 := &x.Replicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv23 := &x.UpdatedReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv25 := &x.ReadyReplicas + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv27 := &x.AvailableReplicas + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv29 := &x.UnavailableReplicas + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv31 := &x.Conditions + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastUpdateTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastUpdateTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastUpdateTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg3_v1.PersistentVolumeClaim, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg3_v1.PersistentVolumeClaim, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg3_v1.PersistentVolumeClaim{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) + } + } else { + yyv1 = make([]pkg3_v1.PersistentVolumeClaim, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg3_v1.PersistentVolumeClaim{}) // var yyz1 pkg3_v1.PersistentVolumeClaim + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg3_v1.PersistentVolumeClaim{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg3_v1.PersistentVolumeClaim{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceStatefulSet(v []StatefulSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStatefulSet(v *[]StatefulSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StatefulSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StatefulSet, yyrl1) + } + } else { + yyv1 = make([]StatefulSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StatefulSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StatefulSet{}) // var yyz1 StatefulSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StatefulSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StatefulSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DeploymentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Deployment, yyrl1) + } + } else { + yyv1 = make([]Deployment, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Deployment{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go new file mode 100644 index 000000000..a5675d5d3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types.go @@ -0,0 +1,375 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api/v1" +) + +const ( + // StatefulSetInitAnnotation if present, and set to false, indicates that a Pod's readiness should be ignored. + StatefulSetInitAnnotation = "pod.alpha.kubernetes.io/initialized" +) + +// ScaleSpec describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +genclient=true +// +noMethods=true + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // most recent generation observed by this StatefulSet. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Replicas is the number of actual replicas. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` +} + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..44e9f3e45 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,208 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "Indicates that the deployment is paused.", + "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "conditions": "Represents the latest available observations of a deployment's current state.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_RollbackConfig = map[string]string{ + "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "ServiceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "most recent generation observed by this StatefulSet.", + "replicas": "Replicas is the number of actual replicas.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..9226c82d9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.conversion.go @@ -0,0 +1,166 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + apps "k8s.io/client-go/pkg/apis/apps" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_StatefulSet_To_apps_StatefulSet, + Convert_apps_StatefulSet_To_v1beta1_StatefulSet, + Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList, + Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList, + Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec, + Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec, + Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus, + Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus, + ) +} + +func autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_StatefulSet_To_apps_StatefulSet(in *StatefulSet, out *apps.StatefulSet, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSet_To_apps_StatefulSet(in, out, s) +} + +func autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_apps_StatefulSet_To_v1beta1_StatefulSet(in *apps.StatefulSet, out *StatefulSet, s conversion.Scope) error { + return autoConvert_apps_StatefulSet_To_v1beta1_StatefulSet(in, out, s) +} + +func autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apps.StatefulSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_StatefulSet_To_apps_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in *StatefulSetList, out *apps.StatefulSetList, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetList_To_apps_StatefulSetList(in, out, s) +} + +func autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *StatefulSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + if err := Convert_apps_StatefulSet_To_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]StatefulSet, 0) + } + return nil +} + +func Convert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in *apps.StatefulSetList, out *StatefulSetList, s conversion.Scope) error { + return autoConvert_apps_StatefulSetList_To_v1beta1_StatefulSetList(in, out, s) +} + +func autoConvert_v1beta1_StatefulSetSpec_To_apps_StatefulSetSpec(in *StatefulSetSpec, out *apps.StatefulSetSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + out.VolumeClaimTemplates = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.ServiceName = in.ServiceName + return nil +} + +func autoConvert_apps_StatefulSetSpec_To_v1beta1_StatefulSetSpec(in *apps.StatefulSetSpec, out *StatefulSetSpec, s conversion.Scope) error { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + out.VolumeClaimTemplates = *(*[]api_v1.PersistentVolumeClaim)(unsafe.Pointer(&in.VolumeClaimTemplates)) + out.ServiceName = in.ServiceName + return nil +} + +func autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.Replicas = in.Replicas + return nil +} + +func Convert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in *StatefulSetStatus, out *apps.StatefulSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_StatefulSetStatus_To_apps_StatefulSetStatus(in, out, s) +} + +func autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *StatefulSetStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.Replicas = in.Replicas + return nil +} + +func Convert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in *apps.StatefulSetStatus, out *StatefulSetStatus, s conversion.Scope) error { + return autoConvert_apps_StatefulSetStatus_To_v1beta1_StatefulSetStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d9b9d3d3c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,355 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})}, + ) +} + +func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSet) + out := out.(*StatefulSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_StatefulSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetList) + out := out.(*StatefulSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_StatefulSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetSpec) + out := out.(*StatefulSetSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_StatefulSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetStatus) + out := out.(*StatefulSetStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..9b822c84c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/v1beta1/zz_generated.defaults.go @@ -0,0 +1,326 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) + scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) + scheme.AddTypeDefaultingFunc(&StatefulSet{}, func(obj interface{}) { SetObjectDefaults_StatefulSet(obj.(*StatefulSet)) }) + scheme.AddTypeDefaultingFunc(&StatefulSetList{}, func(obj interface{}) { SetObjectDefaults_StatefulSetList(obj.(*StatefulSetList)) }) + return nil +} + +func SetObjectDefaults_Deployment(in *Deployment) { + SetDefaults_Deployment(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DeploymentList(in *DeploymentList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Deployment(a) + } +} + +func SetObjectDefaults_StatefulSet(in *StatefulSet) { + SetDefaults_StatefulSet(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.VolumeClaimTemplates { + a := &in.Spec.VolumeClaimTemplates[i] + v1.SetDefaults_PersistentVolumeClaim(a) + v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests) + v1.SetDefaults_ResourceList(&a.Status.Capacity) + } +} + +func SetObjectDefaults_StatefulSetList(in *StatefulSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_StatefulSet(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go new file mode 100644 index 000000000..5048531ca --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/apps/zz_generated.deepcopy.go @@ -0,0 +1,125 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package apps + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSet, InType: reflect.TypeOf(&StatefulSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetList, InType: reflect.TypeOf(&StatefulSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetSpec, InType: reflect.TypeOf(&StatefulSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_apps_StatefulSetStatus, InType: reflect.TypeOf(&StatefulSetStatus{})}, + ) +} + +func DeepCopy_apps_StatefulSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSet) + out := out.(*StatefulSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_apps_StatefulSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_apps_StatefulSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_apps_StatefulSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetList) + out := out.(*StatefulSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + if err := DeepCopy_apps_StatefulSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_apps_StatefulSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetSpec) + out := out.(*StatefulSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]api.PersistentVolumeClaim, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_apps_StatefulSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StatefulSetStatus) + out := out.(*StatefulSetStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS new file mode 100755 index 000000000..4135522b2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/OWNERS @@ -0,0 +1,9 @@ +reviewers: +- liggitt +- lavalamp +- wojtek-t +- deads2k +- sttts +- timothysc +- mbohlool +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go new file mode 100644 index 000000000..194de434d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authentication.k8s.io +package authentication diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go b/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go new file mode 100644 index 000000000..b8a9521a6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/install/install.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/authentication" + "k8s.io/client-go/pkg/apis/authentication/v1" + "k8s.io/client-go/pkg/apis/authentication/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: authentication.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/authentication", + RootScopedKinds: sets.NewString("TokenReview"), + AddInternalObjectsToScheme: authentication.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/register.go new file mode 100644 index 000000000..b0ac3c28b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/types.go new file mode 100644 index 000000000..9c1e66b7b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/types.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key will be every after the prefix. + // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple + // times to have multiple elements in the slice under a single key + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +type TokenReview struct { + metav1.TypeMeta + // ObjectMeta fulfills the metav1.ObjectMetaAccessor interface so that the stock + // REST handler paths work + metav1.ObjectMeta + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec + + // Status is filled in by the server and indicates whether the request can be authenticated. + Status TokenReviewStatus +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + Token string +} + +// TokenReviewStatus is the result of the token authentication request. +// This type mirrors the authentication.Token interface +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + Authenticated bool + // User is the UserInfo associated with the provided token. + User UserInfo + // Error indicates that the token couldn't be checked + Error string +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + Username string + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string + // The names of groups this user is a part of. + Groups []string + // Any additional information provided by the authenticator. + Extra map[string]ExtraValue +} + +// ExtraValue masks the value so protobuf can generate +type ExtraValue []string diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go new file mode 100644 index 000000000..2ff5732d6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go new file mode 100644 index 000000000..d63d91754 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go new file mode 100644 index 000000000..8140e47c5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authentication.k8s.io +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go new file mode 100644 index 000000000..e3dfb9d3a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.pb.go @@ -0,0 +1,1281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1.UserInfo") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *TokenReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Token))) + i += copy(data[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Authenticated { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UserInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x22 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 655 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xb5, 0xf3, 0x53, 0x25, 0x93, 0xaf, 0x1f, 0x65, 0x24, 0xa4, 0x28, 0x12, 0x4e, 0x14, 0x58, + 0x74, 0x51, 0xc6, 0xa4, 0xa0, 0x52, 0x15, 0x10, 0xaa, 0x45, 0x85, 0xba, 0x00, 0xa4, 0x81, 0x22, + 0xc4, 0x06, 0x26, 0xce, 0xad, 0x63, 0x52, 0xff, 0x68, 0x3c, 0x36, 0xed, 0xae, 0x8f, 0xc0, 0x92, + 0x25, 0xaf, 0xc1, 0x1b, 0x74, 0x47, 0x77, 0xb0, 0x40, 0x15, 0x0d, 0x2f, 0x82, 0x66, 0x3c, 0xd4, + 0x2e, 0x69, 0x85, 0xda, 0xdd, 0xcc, 0x99, 0x7b, 0xce, 0xbd, 0xe7, 0xde, 0xb9, 0xe8, 0xc1, 0x64, + 0x35, 0x21, 0x7e, 0x64, 0x4f, 0xd2, 0x21, 0xf0, 0x10, 0x04, 0x24, 0x76, 0x3c, 0xf1, 0x6c, 0x16, + 0xfb, 0x89, 0xcd, 0x52, 0x31, 0x86, 0x50, 0xf8, 0x2e, 0x13, 0x7e, 0x14, 0xda, 0xd9, 0xc0, 0xf6, + 0x20, 0x04, 0xce, 0x04, 0x8c, 0x48, 0xcc, 0x23, 0x11, 0xe1, 0xa5, 0x9c, 0x4d, 0x0a, 0x36, 0x89, + 0x27, 0x1e, 0x91, 0x6c, 0x72, 0x9a, 0x4d, 0xb2, 0x41, 0xe7, 0x96, 0xe7, 0x8b, 0x71, 0x3a, 0x24, + 0x6e, 0x14, 0xd8, 0x5e, 0xe4, 0x45, 0xb6, 0x12, 0x19, 0xa6, 0xdb, 0xea, 0xa6, 0x2e, 0xea, 0x94, + 0x8b, 0x77, 0xee, 0xea, 0xd2, 0x58, 0xec, 0x07, 0xcc, 0x1d, 0xfb, 0x21, 0xf0, 0xbd, 0xa2, 0xb8, + 0x00, 0x04, 0x3b, 0xa3, 0xa4, 0x8e, 0x7d, 0x1e, 0x8b, 0xa7, 0xa1, 0xf0, 0x03, 0x98, 0x21, 0xac, + 0xfc, 0x8b, 0x90, 0xb8, 0x63, 0x08, 0xd8, 0x0c, 0xef, 0xce, 0x79, 0xbc, 0x54, 0xf8, 0x3b, 0xb6, + 0x1f, 0x8a, 0x44, 0xf0, 0x19, 0x52, 0xc9, 0x53, 0x02, 0x3c, 0x03, 0x5e, 0x18, 0x82, 0x5d, 0x16, + 0xc4, 0x3b, 0x70, 0x86, 0xa7, 0xfe, 0x3d, 0x84, 0x36, 0x76, 0x05, 0x67, 0xaf, 0xd8, 0x4e, 0x0a, + 0xb8, 0x8b, 0xea, 0xbe, 0x80, 0x20, 0x69, 0x9b, 0xbd, 0xea, 0x62, 0xd3, 0x69, 0x4e, 0x8f, 0xba, + 0xf5, 0x4d, 0x09, 0xd0, 0x1c, 0x5f, 0x6b, 0x7c, 0xfa, 0xdc, 0x35, 0xf6, 0x7f, 0xf4, 0x8c, 0xfe, + 0x97, 0x0a, 0x6a, 0xbd, 0x8c, 0x26, 0x10, 0x52, 0xc8, 0x7c, 0xf8, 0x80, 0xdf, 0xa1, 0x86, 0xec, + 0xdb, 0x88, 0x09, 0xd6, 0x36, 0x7b, 0xe6, 0x62, 0x6b, 0xf9, 0x36, 0xd1, 0x23, 0x2c, 0xdb, 0x28, + 0x86, 0x28, 0xa3, 0x49, 0x36, 0x20, 0xcf, 0x87, 0xef, 0xc1, 0x15, 0x4f, 0x41, 0x30, 0x07, 0x1f, + 0x1c, 0x75, 0x8d, 0xe9, 0x51, 0x17, 0x15, 0x18, 0x3d, 0x51, 0xc5, 0x6f, 0x51, 0x2d, 0x89, 0xc1, + 0x6d, 0x57, 0x94, 0xfa, 0x43, 0x72, 0x91, 0x0f, 0x42, 0x4a, 0xa5, 0xbe, 0x88, 0xc1, 0x75, 0xfe, + 0xd3, 0xa9, 0x6a, 0xf2, 0x46, 0x95, 0x30, 0xf6, 0xd0, 0x5c, 0x22, 0x98, 0x48, 0x93, 0x76, 0x55, + 0xa5, 0x78, 0x74, 0xf9, 0x14, 0x4a, 0xc6, 0xf9, 0x5f, 0x27, 0x99, 0xcb, 0xef, 0x54, 0xcb, 0xf7, + 0x57, 0xd0, 0x95, 0xbf, 0xea, 0xc1, 0x37, 0x50, 0x5d, 0x48, 0x48, 0xf5, 0xae, 0xe9, 0xcc, 0x6b, + 0x66, 0x3d, 0x8f, 0xcb, 0xdf, 0xfa, 0x5f, 0x4d, 0x74, 0x75, 0x26, 0x0b, 0xbe, 0x8f, 0xe6, 0x4b, + 0xc5, 0xc0, 0x48, 0x49, 0x34, 0x9c, 0x6b, 0x5a, 0x62, 0x7e, 0xbd, 0xfc, 0x48, 0x4f, 0xc7, 0xe2, + 0xd7, 0xa8, 0x96, 0x26, 0xc0, 0x75, 0x53, 0x57, 0x2e, 0xe6, 0x78, 0x2b, 0x01, 0xbe, 0x19, 0x6e, + 0x47, 0x45, 0x37, 0x25, 0x42, 0x95, 0xa2, 0x74, 0x04, 0x9c, 0x47, 0x5c, 0x35, 0xb3, 0xe4, 0x68, + 0x43, 0x82, 0x34, 0x7f, 0xeb, 0x7f, 0xab, 0xa0, 0xc6, 0x1f, 0x15, 0xbc, 0x84, 0x1a, 0x92, 0x19, + 0xb2, 0x00, 0x74, 0x1b, 0x16, 0x34, 0x49, 0xc5, 0x48, 0x9c, 0x9e, 0x44, 0xe0, 0xeb, 0xa8, 0x9a, + 0xfa, 0x23, 0x55, 0x78, 0xd3, 0x69, 0xe9, 0xc0, 0xea, 0xd6, 0xe6, 0x63, 0x2a, 0x71, 0xdc, 0x47, + 0x73, 0x1e, 0x8f, 0xd2, 0x58, 0x0e, 0x53, 0xfe, 0x65, 0x24, 0xe7, 0xf0, 0x44, 0x21, 0x54, 0xbf, + 0xe0, 0x6d, 0x54, 0x07, 0xf9, 0xf9, 0xdb, 0xb5, 0x5e, 0x75, 0xb1, 0xb5, 0xbc, 0x7e, 0x39, 0xf7, + 0x44, 0x2d, 0xd0, 0x46, 0x28, 0xf8, 0x5e, 0xc9, 0xa5, 0xc4, 0x68, 0x2e, 0xdf, 0xe1, 0x7a, 0xc9, + 0x54, 0x0c, 0x5e, 0x40, 0xd5, 0x09, 0xec, 0xe5, 0x0e, 0xa9, 0x3c, 0xe2, 0x67, 0xa8, 0x9e, 0xc9, + 0xfd, 0xd3, 0x53, 0x58, 0xbd, 0x58, 0x1d, 0xc5, 0xfe, 0xd2, 0x5c, 0x66, 0xad, 0xb2, 0x6a, 0x3a, + 0x37, 0x0f, 0x8e, 0x2d, 0xe3, 0xf0, 0xd8, 0x32, 0xbe, 0x1f, 0x5b, 0xc6, 0xfe, 0xd4, 0x32, 0x0f, + 0xa6, 0x96, 0x79, 0x38, 0xb5, 0xcc, 0x9f, 0x53, 0xcb, 0xfc, 0xf8, 0xcb, 0x32, 0xde, 0x54, 0xb2, + 0xc1, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x6b, 0x11, 0x20, 0xa4, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto new file mode 100644 index 000000000..ea5203d37 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/generated.proto @@ -0,0 +1,100 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authentication.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map<string, ExtraValue> extra = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go new file mode 100644 index 000000000..8661169af --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go new file mode 100644 index 000000000..e6ff58705 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..bb235e4ea --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go new file mode 100644 index 000000000..9c1335e91 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authentication "k8s.io/client-go/pkg/apis/authentication" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_TokenReview_To_authentication_TokenReview, + Convert_authentication_TokenReview_To_v1_TokenReview, + Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec, + Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec, + Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus, + Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus, + Convert_v1_UserInfo_To_authentication_UserInfo, + Convert_authentication_UserInfo_To_v1_UserInfo, + ) +} + +func autoConvert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + return autoConvert_v1_TokenReview_To_authentication_TokenReview(in, out, s) +} + +func autoConvert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authentication_TokenReview_To_v1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authentication_TokenReview_To_v1_TokenReview(in, out, s) +} + +func autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) +} + +func autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewSpec_To_v1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) +} + +func autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authentication_UserInfo_To_v1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewStatus_To_v1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + return autoConvert_v1_UserInfo_To_authentication_UserInfo(in, out, s) +} + +func autoConvert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authentication_UserInfo_To_v1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authentication_UserInfo_To_v1_UserInfo(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..0bc564067 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_v1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_v1_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go new file mode 100644 index 000000000..6df448eb9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go new file mode 100644 index 000000000..51f3adfc7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go new file mode 100644 index 000000000..1a4566479 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go new file mode 100644 index 000000000..342f20126 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authentication.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go new file mode 100644 index 000000000..760416e63 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.pb.go @@ -0,0 +1,1282 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authentication/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.client-go.pkg.apis.authentication.v1beta1.UserInfo") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *TokenReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *TokenReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Token))) + i += copy(data[i:], m.Token) + return i, nil +} + +func (m *TokenReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Authenticated { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Error))) + i += copy(data[i:], m.Error) + return i, nil +} + +func (m *UserInfo) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *UserInfo) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x22 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserInfo) Size() (n int) { + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 668 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x4a, + 0x14, 0x8d, 0xf3, 0xd1, 0x97, 0x4c, 0x5e, 0xdf, 0xeb, 0x1b, 0xe9, 0x49, 0x51, 0x24, 0x9c, 0x28, + 0x6c, 0x8a, 0x54, 0xc6, 0xa4, 0xa0, 0x52, 0xb5, 0x62, 0x51, 0xab, 0x05, 0x75, 0x81, 0x90, 0xa6, + 0x94, 0x05, 0x12, 0x12, 0x13, 0xe7, 0xd6, 0x31, 0x8e, 0x3f, 0x34, 0x1e, 0xa7, 0xed, 0xae, 0x3f, + 0x81, 0x25, 0x4b, 0xfe, 0x0b, 0x9b, 0x2e, 0xbb, 0x60, 0xc1, 0x02, 0x55, 0x24, 0xfc, 0x11, 0x34, + 0xe3, 0xa1, 0x76, 0x49, 0x2b, 0x44, 0xbb, 0xf3, 0x9c, 0x7b, 0xcf, 0xb9, 0xf7, 0xdc, 0xeb, 0x8b, + 0xb6, 0xfc, 0xf5, 0x84, 0x78, 0x91, 0xe5, 0xa7, 0x03, 0xe0, 0x21, 0x08, 0x48, 0xac, 0xd8, 0x77, + 0x2d, 0x16, 0x7b, 0x89, 0xc5, 0x52, 0x31, 0x82, 0x50, 0x78, 0x0e, 0x13, 0x5e, 0x14, 0x5a, 0x93, + 0xfe, 0x00, 0x04, 0xeb, 0x5b, 0x2e, 0x84, 0xc0, 0x99, 0x80, 0x21, 0x89, 0x79, 0x24, 0x22, 0xdc, + 0xcf, 0x24, 0x48, 0x2e, 0x41, 0x62, 0xdf, 0x25, 0x52, 0x82, 0x5c, 0x96, 0x20, 0x5a, 0xa2, 0x7d, + 0xdf, 0xf5, 0xc4, 0x28, 0x1d, 0x10, 0x27, 0x0a, 0x2c, 0x37, 0x72, 0x23, 0x4b, 0x29, 0x0d, 0xd2, + 0x03, 0xf5, 0x52, 0x0f, 0xf5, 0x95, 0x55, 0x68, 0x3f, 0xd2, 0x4d, 0xb2, 0xd8, 0x0b, 0x98, 0x33, + 0xf2, 0x42, 0xe0, 0xc7, 0x79, 0x9b, 0x01, 0x08, 0x66, 0x4d, 0xe6, 0xfa, 0x6a, 0x5b, 0xd7, 0xb1, + 0x78, 0x1a, 0x0a, 0x2f, 0x80, 0x39, 0xc2, 0xda, 0xef, 0x08, 0x89, 0x33, 0x82, 0x80, 0xcd, 0xf1, + 0x1e, 0x5e, 0xc7, 0x4b, 0x85, 0x37, 0xb6, 0xbc, 0x50, 0x24, 0x82, 0xcf, 0x91, 0x0a, 0x9e, 0x12, + 0xe0, 0x13, 0xe0, 0xb9, 0x21, 0x38, 0x62, 0x41, 0x3c, 0x86, 0xab, 0x3c, 0xad, 0x5c, 0xbb, 0xae, + 0x2b, 0xb2, 0x7b, 0x8f, 0x11, 0xda, 0x39, 0x12, 0x9c, 0xbd, 0x62, 0xe3, 0x14, 0x70, 0x07, 0xd5, + 0x3c, 0x01, 0x41, 0xd2, 0x32, 0xba, 0x95, 0xe5, 0x86, 0xdd, 0x98, 0x9d, 0x77, 0x6a, 0xbb, 0x12, + 0xa0, 0x19, 0xbe, 0x51, 0xff, 0xf0, 0xb1, 0x53, 0x3a, 0xf9, 0xda, 0x2d, 0xf5, 0x3e, 0x95, 0x51, + 0xf3, 0x65, 0xe4, 0x43, 0x48, 0x61, 0xe2, 0xc1, 0x21, 0x7e, 0x8b, 0xea, 0x72, 0xca, 0x43, 0x26, + 0x58, 0xcb, 0xe8, 0x1a, 0xcb, 0xcd, 0xd5, 0x07, 0x44, 0x6f, 0xbd, 0x68, 0x3a, 0xdf, 0xbb, 0xcc, + 0x26, 0x93, 0x3e, 0x79, 0x31, 0x78, 0x07, 0x8e, 0x78, 0x0e, 0x82, 0xd9, 0xf8, 0xf4, 0xbc, 0x53, + 0x9a, 0x9d, 0x77, 0x50, 0x8e, 0xd1, 0x0b, 0x55, 0x3c, 0x44, 0xd5, 0x24, 0x06, 0xa7, 0x55, 0x56, + 0xea, 0x36, 0xf9, 0xe3, 0x7f, 0x8a, 0x14, 0xfa, 0xdd, 0x8b, 0xc1, 0xb1, 0xff, 0xd6, 0xf5, 0xaa, + 0xf2, 0x45, 0x95, 0x3a, 0x1e, 0xa3, 0x85, 0x44, 0x30, 0x91, 0x26, 0xad, 0x8a, 0xaa, 0xb3, 0x7d, + 0xcb, 0x3a, 0x4a, 0xcb, 0xfe, 0x47, 0x57, 0x5a, 0xc8, 0xde, 0x54, 0xd7, 0xe8, 0xad, 0xa1, 0x7f, + 0x7f, 0x69, 0x0a, 0xdf, 0x45, 0x35, 0x21, 0x21, 0x35, 0xc5, 0x86, 0xbd, 0xa8, 0x99, 0xb5, 0x2c, + 0x2f, 0x8b, 0xf5, 0x3e, 0x1b, 0xe8, 0xbf, 0xb9, 0x2a, 0x78, 0x13, 0x2d, 0x16, 0x3a, 0x82, 0xa1, + 0x92, 0xa8, 0xdb, 0xff, 0x6b, 0x89, 0xc5, 0xad, 0x62, 0x90, 0x5e, 0xce, 0xc5, 0x6f, 0x50, 0x35, + 0x4d, 0x80, 0xeb, 0xf1, 0x6e, 0xde, 0xc0, 0xf6, 0x7e, 0x02, 0x7c, 0x37, 0x3c, 0x88, 0xf2, 0xb9, + 0x4a, 0x84, 0x2a, 0x59, 0x69, 0x0b, 0x38, 0x8f, 0xb8, 0x1a, 0x6b, 0xc1, 0xd6, 0x8e, 0x04, 0x69, + 0x16, 0xeb, 0x4d, 0xcb, 0xa8, 0xfe, 0x53, 0x05, 0xaf, 0xa0, 0xba, 0x64, 0x86, 0x2c, 0x00, 0x3d, + 0x8b, 0x25, 0x4d, 0x52, 0x39, 0x12, 0xa7, 0x17, 0x19, 0xf8, 0x0e, 0xaa, 0xa4, 0xde, 0x50, 0x75, + 0xdf, 0xb0, 0x9b, 0x3a, 0xb1, 0xb2, 0xbf, 0xbb, 0x4d, 0x25, 0x8e, 0x7b, 0x68, 0xc1, 0xe5, 0x51, + 0x1a, 0xcb, 0xb5, 0xca, 0x5f, 0x1b, 0xc9, 0x65, 0x3c, 0x53, 0x08, 0xd5, 0x11, 0xec, 0xa3, 0x1a, + 0xc8, 0x5b, 0x68, 0x55, 0xbb, 0x95, 0xe5, 0xe6, 0xea, 0xd3, 0x5b, 0x8c, 0x80, 0xa8, 0xa3, 0xda, + 0x09, 0x05, 0x3f, 0x2e, 0x58, 0x95, 0x18, 0xcd, 0x6a, 0xb4, 0x0f, 0xf5, 0xe1, 0xa9, 0x1c, 0xbc, + 0x84, 0x2a, 0x3e, 0x1c, 0x67, 0x36, 0xa9, 0xfc, 0xc4, 0x7b, 0xa8, 0x36, 0x91, 0x37, 0xa9, 0xf7, + 0xf1, 0xe4, 0x06, 0xcd, 0xe4, 0x87, 0x4d, 0x33, 0xad, 0x8d, 0xf2, 0xba, 0x61, 0xdf, 0x3b, 0x9d, + 0x9a, 0xa5, 0xb3, 0xa9, 0x59, 0xfa, 0x32, 0x35, 0x4b, 0x27, 0x33, 0xd3, 0x38, 0x9d, 0x99, 0xc6, + 0xd9, 0xcc, 0x34, 0xbe, 0xcd, 0x4c, 0xe3, 0xfd, 0x77, 0xb3, 0xf4, 0xfa, 0x2f, 0x2d, 0xf0, 0x23, + 0x00, 0x00, 0xff, 0xff, 0xb9, 0x87, 0xc6, 0x94, 0xfa, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto new file mode 100644 index 000000000..cbc050970 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/generated.proto @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authentication.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map<string, ExtraValue> extra = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go new file mode 100644 index 000000000..ddaa19702 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/register.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go new file mode 100644 index 000000000..b8990af18 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.generated.go @@ -0,0 +1,1568 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = TokenReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = TokenReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Token != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("token")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Token)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "token": + if r.TryDecodeAsNil() { + x.Token = "" + } else { + yyv4 := &x.Token + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Token = "" + } else { + yyv7 := &x.Token + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Authenticated != false + yyq2[1] = true + yyq2[2] = x.Error != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("authenticated")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Authenticated)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy7 := &x.User + yy7.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.User + yy9.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("error")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Error)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "authenticated": + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + yyv4 := &x.Authenticated + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "user": + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv6 := &x.User + yyv6.CodecDecodeSelf(d) + } + case "error": + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv7 := &x.Error + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Authenticated = false + } else { + yyv10 := &x.Authenticated + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = UserInfo{} + } else { + yyv12 := &x.User + yyv12.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Error = "" + } else { + yyv13 := &x.Error + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Username != "" + yyq2[1] = x.UID != "" + yyq2[2] = len(x.Groups) != 0 + yyq2[3] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv4 := &x.Username + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv6 := &x.UID + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv13 := &x.Username + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv15 := &x.UID + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go new file mode 100644 index 000000000..57c96e3bc --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types.go @@ -0,0 +1,91 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..f910bea6f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..5fc83362f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.conversion.go @@ -0,0 +1,145 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authentication "k8s.io/client-go/pkg/apis/authentication" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_TokenReview_To_authentication_TokenReview, + Convert_authentication_TokenReview_To_v1beta1_TokenReview, + Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec, + Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec, + Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus, + Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus, + Convert_v1beta1_UserInfo_To_authentication_UserInfo, + Convert_authentication_UserInfo_To_v1beta1_UserInfo, + ) +} + +func autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_TokenReview_To_authentication_TokenReview(in *TokenReview, out *authentication.TokenReview, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReview_To_authentication_TokenReview(in, out, s) +} + +func autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authentication_TokenReview_To_v1beta1_TokenReview(in *authentication.TokenReview, out *TokenReview, s conversion.Scope) error { + return autoConvert_authentication_TokenReview_To_v1beta1_TokenReview(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in *TokenReviewSpec, out *authentication.TokenReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewSpec_To_authentication_TokenReviewSpec(in, out, s) +} + +func autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + out.Token = in.Token + return nil +} + +func Convert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_v1beta1_UserInfo_To_authentication_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in *TokenReviewStatus, out *authentication.TokenReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_TokenReviewStatus_To_authentication_TokenReviewStatus(in, out, s) +} + +func autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + out.Authenticated = in.Authenticated + if err := Convert_authentication_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { + return err + } + out.Error = in.Error + return nil +} + +func Convert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { + return autoConvert_authentication_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s) +} + +func autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authentication.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_UserInfo_To_authentication_UserInfo(in *UserInfo, out *authentication.UserInfo, s conversion.Scope) error { + return autoConvert_v1beta1_UserInfo_To_authentication_UserInfo(in, out, s) +} + +func autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authentication_UserInfo_To_v1beta1_UserInfo(in *authentication.UserInfo, out *UserInfo, s conversion.Scope) error { + return autoConvert_authentication_UserInfo_To_v1beta1_UserInfo(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..01260cc13 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_v1beta1_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_v1beta1_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..e24e70be3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go new file mode 100644 index 000000000..ec322c5f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authentication/zz_generated.deepcopy.go @@ -0,0 +1,106 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authentication + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReview, InType: reflect.TypeOf(&TokenReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewSpec, InType: reflect.TypeOf(&TokenReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_TokenReviewStatus, InType: reflect.TypeOf(&TokenReviewStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authentication_UserInfo, InType: reflect.TypeOf(&UserInfo{})}, + ) +} + +func DeepCopy_authentication_TokenReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReview) + out := out.(*TokenReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authentication_TokenReviewStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authentication_TokenReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewSpec) + out := out.(*TokenReviewSpec) + *out = *in + return nil + } +} + +func DeepCopy_authentication_TokenReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*TokenReviewStatus) + out := out.(*TokenReviewStatus) + *out = *in + if err := DeepCopy_authentication_UserInfo(&in.User, &out.User, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authentication_UserInfo(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*UserInfo) + out := out.(*UserInfo) + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS b/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS new file mode 100755 index 000000000..2fef50443 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- nikhiljindal +- erictune +- sttts +- ncdc +- timothysc +- dims +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go b/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go new file mode 100644 index 000000000..91344f674 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authorization.k8s.io +package authorization diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go b/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go new file mode 100644 index 000000000..33eee6618 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/install/install.go @@ -0,0 +1,53 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/authorization" + "k8s.io/client-go/pkg/apis/authorization/v1" + "k8s.io/client-go/pkg/apis/authorization/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: authorization.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/authorization", + RootScopedKinds: sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview"), + AddInternalObjectsToScheme: authorization.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1.SchemeGroupVersion.Version: v1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/register.go b/vendor/k8s.io/client-go/pkg/apis/authorization/register.go new file mode 100644 index 000000000..5693885e4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorization + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/types.go new file mode 100644 index 000000000..d8ccfaf35 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/types.go @@ -0,0 +1,146 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorization + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a +// spec.namespace means "in all namespaces". +type SubjectAccessReview struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec holds information about the request being evaluated. + Spec SelfSubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec + + // Status is filled in by the server and indicates whether the request is allowed or not + Status SubjectAccessReviewStatus +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + Namespace string + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verb string + // Group is the API Group of the Resource. "*" means all. + Group string + // Version is the API Version of the Resource. "*" means all. + Version string + // Resource is one of the existing resource types. "*" means all. + Resource string + // Subresource is one of the existing resource types. "" means none. + Subresource string + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + Name string +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + Path string + // Verb is the standard HTTP verb + Verb string +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes +// and NonResourceAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + User string + // Groups is the groups you're testing for. + Groups []string + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + Extra map[string]ExtraValue +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +type ExtraValue []string + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes +// and NonResourceAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAttributes describes information for a resource access request + ResourceAttributes *ResourceAttributes + // NonResourceAttributes describes information for a non-resource access request + NonResourceAttributes *NonResourceAttributes +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool + // Reason is optional. It indicates why a request was allowed or denied. + Reason string + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + EvaluationError string +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go new file mode 100644 index 000000000..2ff5732d6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go new file mode 100644 index 000000000..d63d91754 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go new file mode 100644 index 000000000..41741dd53 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authorization.k8s.io +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go new file mode 100644 index 000000000..7bacc5169 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.pb.go @@ -0,0 +1,2344 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authorization/v1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + ResourceAttributes + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.NonResourceAttributes") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.ResourceAttributes") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authorization.v1.SubjectAccessReviewStatus") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + return i, nil +} + +func (m *ResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) + i += copy(data[i:], m.Subresource) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n12, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x2a + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n14, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + } + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Allowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) + i += copy(data[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x5f, 0xec, 0x09, 0x90, 0x32, 0x55, 0xc9, 0x36, 0x48, 0xb6, 0x65, 0x10, 0x0a, + 0xa2, 0xec, 0x92, 0xf2, 0xa7, 0x55, 0x39, 0xa0, 0xac, 0x08, 0x7f, 0x24, 0xda, 0xa2, 0x89, 0xc8, + 0x01, 0x2e, 0x8c, 0x37, 0x2f, 0xf6, 0xd6, 0xde, 0x9d, 0x65, 0x66, 0x76, 0xdb, 0x70, 0xea, 0x8d, + 0x2b, 0x12, 0x17, 0x8e, 0x7c, 0x05, 0x3e, 0x00, 0x9c, 0x73, 0xa3, 0x07, 0x24, 0x38, 0x20, 0x8b, + 0x2c, 0x17, 0x3e, 0x06, 0x9a, 0xd9, 0x89, 0x37, 0xc6, 0x6b, 0x2a, 0x43, 0x25, 0x7a, 0xe8, 0x6d, + 0xe7, 0xbd, 0xdf, 0xef, 0xbd, 0xdf, 0xbc, 0x79, 0xb3, 0x6f, 0xd0, 0xdb, 0xe3, 0xeb, 0xc2, 0x09, + 0x98, 0x3b, 0x4e, 0x06, 0xc0, 0x23, 0x90, 0x20, 0xdc, 0x78, 0x3c, 0x74, 0x69, 0x1c, 0x08, 0x97, + 0x26, 0x72, 0xc4, 0x78, 0xf0, 0x25, 0x95, 0x01, 0x8b, 0xdc, 0x74, 0xc7, 0x1d, 0x42, 0x04, 0x9c, + 0x4a, 0x38, 0x74, 0x62, 0xce, 0x24, 0xc3, 0xaf, 0xe4, 0x64, 0xa7, 0x20, 0x3b, 0xf1, 0x78, 0xe8, + 0x28, 0xb2, 0x33, 0x47, 0x76, 0xd2, 0x9d, 0xad, 0x57, 0x87, 0x81, 0x1c, 0x25, 0x03, 0xc7, 0x67, + 0xa1, 0x3b, 0x64, 0x43, 0xe6, 0xea, 0x18, 0x83, 0xe4, 0x48, 0xaf, 0xf4, 0x42, 0x7f, 0xe5, 0xb1, + 0xb7, 0xde, 0x30, 0xc2, 0x68, 0x1c, 0x84, 0xd4, 0x1f, 0x05, 0x11, 0xf0, 0xe3, 0x42, 0x5a, 0x08, + 0x92, 0x96, 0x28, 0xda, 0x72, 0x97, 0xb1, 0x78, 0x12, 0xc9, 0x20, 0x84, 0x05, 0xc2, 0x5b, 0x0f, + 0x23, 0x08, 0x7f, 0x04, 0x21, 0x5d, 0xe0, 0xbd, 0xbe, 0x8c, 0x97, 0xc8, 0x60, 0xe2, 0x06, 0x91, + 0x14, 0x92, 0x2f, 0x90, 0xce, 0xed, 0x49, 0x00, 0x4f, 0x81, 0x17, 0x1b, 0x82, 0x7b, 0x34, 0x8c, + 0x27, 0x50, 0xb6, 0xa7, 0x2b, 0x4b, 0x8f, 0xa8, 0x04, 0xdd, 0xbf, 0x86, 0xd0, 0xde, 0x3d, 0xc9, + 0xe9, 0x01, 0x9d, 0x24, 0x80, 0xbb, 0xa8, 0x11, 0x48, 0x08, 0x85, 0x6d, 0xf5, 0x6a, 0xdb, 0x6d, + 0xaf, 0x9d, 0x4d, 0xbb, 0x8d, 0x0f, 0x95, 0x81, 0xe4, 0xf6, 0x1b, 0xad, 0x6f, 0xbf, 0xeb, 0x56, + 0xee, 0xff, 0xd6, 0xab, 0xf4, 0x7f, 0xae, 0x22, 0xfb, 0x23, 0xe6, 0xd3, 0xc9, 0x7e, 0x32, 0xb8, + 0x03, 0xbe, 0xdc, 0xf5, 0x7d, 0x10, 0x82, 0x40, 0x1a, 0xc0, 0x5d, 0xfc, 0x39, 0x6a, 0xa9, 0x92, + 0x1f, 0x52, 0x49, 0x6d, 0xab, 0x67, 0x6d, 0xaf, 0x5f, 0x7d, 0xcd, 0x31, 0x87, 0x7f, 0xbe, 0x02, + 0xc5, 0xf1, 0x2b, 0xb4, 0x93, 0xee, 0x38, 0xb7, 0x75, 0xac, 0x9b, 0x20, 0xa9, 0x87, 0x4f, 0xa6, + 0xdd, 0x4a, 0x36, 0xed, 0xa2, 0xc2, 0x46, 0x66, 0x51, 0xf1, 0x11, 0xaa, 0x8b, 0x18, 0x7c, 0xbb, + 0xaa, 0xa3, 0xbf, 0xeb, 0xac, 0xd0, 0x5a, 0x4e, 0x89, 0xe2, 0xfd, 0x18, 0x7c, 0xef, 0x29, 0x93, + 0xb1, 0xae, 0x56, 0x44, 0xc7, 0xc7, 0x11, 0x6a, 0x0a, 0x49, 0x65, 0x22, 0xec, 0x9a, 0xce, 0xf4, + 0xde, 0x7f, 0xce, 0xa4, 0xa3, 0x79, 0xcf, 0x98, 0x5c, 0xcd, 0x7c, 0x4d, 0x4c, 0x96, 0xfe, 0x67, + 0xe8, 0xd2, 0x2d, 0x16, 0x11, 0x10, 0x2c, 0xe1, 0x3e, 0xec, 0x4a, 0xc9, 0x83, 0x41, 0x22, 0x41, + 0xe0, 0x1e, 0xaa, 0xc7, 0x54, 0x8e, 0x74, 0x39, 0xdb, 0x85, 0xd4, 0x8f, 0xa9, 0x1c, 0x11, 0xed, + 0x51, 0x88, 0x14, 0xf8, 0x40, 0x97, 0xe4, 0x1c, 0xe2, 0x00, 0xf8, 0x80, 0x68, 0x4f, 0xff, 0xc7, + 0x2a, 0xc2, 0x25, 0xa1, 0x5d, 0xd4, 0x8e, 0x68, 0x08, 0x22, 0xa6, 0x3e, 0x98, 0xf8, 0xcf, 0x1a, + 0x76, 0xfb, 0xd6, 0x99, 0x83, 0x14, 0x98, 0x87, 0x67, 0xc2, 0x2f, 0xa0, 0xc6, 0x90, 0xb3, 0x24, + 0xd6, 0x55, 0x6b, 0x7b, 0x4f, 0x1b, 0x48, 0xe3, 0x7d, 0x65, 0x24, 0xb9, 0x0f, 0xbf, 0x8c, 0xd6, + 0x52, 0xe0, 0x22, 0x60, 0x91, 0x5d, 0xd7, 0xb0, 0x0d, 0x03, 0x5b, 0x3b, 0xc8, 0xcd, 0xe4, 0xcc, + 0x8f, 0xaf, 0xa0, 0x16, 0x37, 0xc2, 0xed, 0x86, 0xc6, 0x5e, 0x30, 0xd8, 0xd6, 0xd9, 0x86, 0xc8, + 0x0c, 0x81, 0xdf, 0x44, 0xeb, 0x22, 0x19, 0xcc, 0x08, 0x4d, 0x4d, 0xb8, 0x68, 0x08, 0xeb, 0xfb, + 0x85, 0x8b, 0x9c, 0xc7, 0xa9, 0x6d, 0xa9, 0x3d, 0xda, 0x6b, 0xf3, 0xdb, 0x52, 0x25, 0x20, 0xda, + 0xd3, 0xff, 0xa5, 0x8a, 0x36, 0xf7, 0x61, 0x72, 0xf4, 0xff, 0xf4, 0xfc, 0x9d, 0xb9, 0x9e, 0xff, + 0x60, 0xb5, 0x4e, 0x2c, 0x57, 0xfd, 0xd8, 0xf4, 0xfd, 0x0f, 0x55, 0xf4, 0xfc, 0x3f, 0x68, 0xc4, + 0x5f, 0x59, 0x08, 0xf3, 0x85, 0xd6, 0x35, 0x85, 0x7e, 0x67, 0x25, 0x71, 0x8b, 0x37, 0xc0, 0x7b, + 0x2e, 0x9b, 0x76, 0x4b, 0x6e, 0x06, 0x29, 0x49, 0x89, 0xbf, 0xb1, 0xd0, 0xa5, 0xa8, 0xec, 0x8a, + 0x9a, 0x73, 0xf1, 0x56, 0x12, 0x53, 0x7a, 0xd9, 0xbd, 0xcb, 0xd9, 0xb4, 0x5b, 0xfe, 0x1f, 0x20, + 0xe5, 0xb9, 0xfb, 0x3f, 0x55, 0xd1, 0xc5, 0x27, 0x7f, 0xe2, 0x47, 0xd9, 0x91, 0x7f, 0xd6, 0xd1, + 0xe6, 0x93, 0x6e, 0xfc, 0x57, 0xdd, 0x38, 0x1b, 0x10, 0xb5, 0xf9, 0x3f, 0xe9, 0x27, 0x02, 0xb8, + 0x19, 0x10, 0x7d, 0xd4, 0xd4, 0x43, 0x40, 0xd8, 0x75, 0xfd, 0xd4, 0x40, 0xea, 0x04, 0xf4, 0x74, + 0x10, 0xc4, 0x78, 0xb0, 0x44, 0x0d, 0x50, 0x6f, 0x13, 0xbb, 0xd1, 0xab, 0x6d, 0xaf, 0x5f, 0xbd, + 0xfd, 0x28, 0x5a, 0xcb, 0xd1, 0xaf, 0x9d, 0xbd, 0x48, 0xf2, 0xe3, 0x62, 0x2a, 0x69, 0x1b, 0xc9, + 0x93, 0x6d, 0x7d, 0x61, 0x5e, 0x44, 0x1a, 0x83, 0x2f, 0xa0, 0xda, 0x18, 0x8e, 0xf3, 0xa9, 0x48, + 0xd4, 0x27, 0xbe, 0x89, 0x1a, 0xa9, 0x7a, 0x2c, 0x99, 0x02, 0x5f, 0x5b, 0x49, 0x55, 0xf1, 0xd6, + 0x22, 0x79, 0x94, 0x1b, 0xd5, 0xeb, 0x56, 0xff, 0x7b, 0x0b, 0x5d, 0x5e, 0xda, 0xa0, 0x6a, 0x4c, + 0xd2, 0xc9, 0x84, 0xdd, 0x85, 0x43, 0x2d, 0xa3, 0x55, 0x8c, 0xc9, 0xdd, 0xdc, 0x4c, 0xce, 0xfc, + 0xf8, 0x25, 0xd4, 0xe4, 0x40, 0x05, 0x8b, 0xcc, 0x68, 0x9e, 0xf5, 0x36, 0xd1, 0x56, 0x62, 0xbc, + 0x78, 0x17, 0x6d, 0x80, 0x4a, 0xaf, 0x75, 0xed, 0x71, 0xce, 0xb8, 0x39, 0xaa, 0x4d, 0x43, 0xd8, + 0xd8, 0x9b, 0x77, 0x93, 0xbf, 0xe3, 0xbd, 0x17, 0x4f, 0x4e, 0x3b, 0x95, 0x07, 0xa7, 0x9d, 0xca, + 0xaf, 0xa7, 0x9d, 0xca, 0xfd, 0xac, 0x63, 0x9d, 0x64, 0x1d, 0xeb, 0x41, 0xd6, 0xb1, 0x7e, 0xcf, + 0x3a, 0xd6, 0xd7, 0x7f, 0x74, 0x2a, 0x9f, 0x56, 0xd3, 0x9d, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x8a, 0xc4, 0x1f, 0xd5, 0x30, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto new file mode 100644 index 000000000..7036b8eb3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/generated.proto @@ -0,0 +1,185 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authorization.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string verb = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string groups = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map<string, ExtraValue> extra = 5; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go new file mode 100644 index 000000000..909bc0a7d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go new file mode 100644 index 000000000..2528afa8e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.generated.go @@ -0,0 +1,3233 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Namespace != "" + yyq2[1] = x.Verb != "" + yyq2[2] = x.Group != "" + yyq2[3] = x.Version != "" + yyq2[4] = x.Resource != "" + yyq2[5] = x.Subresource != "" + yyq2[6] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subresource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv4 := &x.Namespace + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv8 := &x.Group + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv10 := &x.Version + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv12 := &x.Resource + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "subresource": + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv14 := &x.Subresource + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv21 := &x.Verb + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv25 := &x.Version + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv27 := &x.Resource + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv29 := &x.Subresource + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv31 := &x.Name + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Verb != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv9 := &x.Path + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv11 := &x.Verb + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + yyq2[2] = x.User != "" + yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv6 := &x.User + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv15 := &x.User + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.EvaluationError != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "allowed": + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv4 := &x.Allowed + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv6 := &x.Reason + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "evaluationError": + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv8 := &x.EvaluationError + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv11 := &x.Allowed + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv15 := &x.EvaluationError + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go new file mode 100644 index 000000000..38c314ffc --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..33c0035b4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/types_swagger_doc_generated.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", + "groups": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go new file mode 100644 index 000000000..92d130844 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.conversion.go @@ -0,0 +1,263 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authorization "k8s.io/client-go/pkg/apis/authorization" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview, + Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes, + Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes, + Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview, + Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec, + Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview, + Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec, + Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus, + ) +} + +func autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..1f3199a35 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_v1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go new file mode 100644 index 000000000..6df448eb9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go new file mode 100644 index 000000000..c40138365 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + return scheme.AddConversionFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go new file mode 100644 index 000000000..cb49b06ac --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs() +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go new file mode 100644 index 000000000..738b0b6d2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=authorization.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go new file mode 100644 index 000000000..65877a2ad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.pb.go @@ -0,0 +1,2344 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/authorization/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + ResourceAttributes + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.NonResourceAttributes") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.ResourceAttributes") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.client-go.pkg.apis.authorization.v1beta1.SubjectAccessReviewStatus") +} +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *NonResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + return i, nil +} + +func (m *ResourceAttributes) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Verb))) + i += copy(data[i:], m.Verb) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Group))) + i += copy(data[i:], m.Group) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Version))) + i += copy(data[i:], m.Version) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) + i += copy(data[i:], m.Resource) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) + i += copy(data[i:], m.Subresource) + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *SubjectAccessReview) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ResourceAttributes != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ResourceAttributes.Size())) + n12, err := m.ResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.NonResourceAttributes != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NonResourceAttributes.Size())) + n13, err := m.NonResourceAttributes.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.User))) + i += copy(data[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x2a + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n14, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + } + } + return i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Allowed { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.EvaluationError))) + i += copy(data[i:], m.EvaluationError) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAttributes) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 904 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0xdc, 0x44, + 0x14, 0x5f, 0xef, 0x9f, 0x64, 0x77, 0x02, 0xa4, 0x4c, 0x55, 0xe2, 0x06, 0x69, 0x77, 0xb5, 0x48, + 0x28, 0x95, 0x8a, 0xdd, 0x94, 0x7f, 0x55, 0xc5, 0x81, 0x58, 0x44, 0x55, 0x05, 0x2d, 0x68, 0x02, + 0x39, 0xc0, 0x85, 0xb1, 0xf3, 0xba, 0x6b, 0x76, 0xed, 0xb1, 0x66, 0xc6, 0x6e, 0xc3, 0xa9, 0x1f, + 0x80, 0x03, 0xc7, 0x1e, 0xf9, 0x0a, 0x7c, 0x01, 0xae, 0xe4, 0xd8, 0x23, 0x48, 0x68, 0x45, 0xcc, + 0xb7, 0xe0, 0x84, 0x66, 0x3c, 0xbb, 0xce, 0xb2, 0x0e, 0xd5, 0x42, 0x11, 0x1c, 0x72, 0xb3, 0xdf, + 0xfb, 0xbd, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x79, 0x0f, 0xbd, 0x3f, 0xbe, 0x25, 0x9c, 0x90, 0xb9, + 0xe3, 0xd4, 0x07, 0x1e, 0x83, 0x04, 0xe1, 0x26, 0xe3, 0xa1, 0x4b, 0x93, 0x50, 0xb8, 0x34, 0x95, + 0x23, 0xc6, 0xc3, 0xaf, 0xa9, 0x0c, 0x59, 0xec, 0x66, 0xbb, 0x3e, 0x48, 0xba, 0xeb, 0x0e, 0x21, + 0x06, 0x4e, 0x25, 0x1c, 0x39, 0x09, 0x67, 0x92, 0xe1, 0x1b, 0x85, 0x07, 0xa7, 0xf4, 0xe0, 0x24, + 0xe3, 0xa1, 0xa3, 0x3c, 0x38, 0x0b, 0x1e, 0x1c, 0xe3, 0x61, 0xfb, 0x8d, 0x61, 0x28, 0x47, 0xa9, + 0xef, 0x04, 0x2c, 0x72, 0x87, 0x6c, 0xc8, 0x5c, 0xed, 0xc8, 0x4f, 0x1f, 0xe8, 0x3f, 0xfd, 0xa3, + 0xbf, 0x8a, 0x00, 0xdb, 0x6f, 0x19, 0x8a, 0x34, 0x09, 0x23, 0x1a, 0x8c, 0xc2, 0x18, 0xf8, 0x71, + 0x49, 0x32, 0x02, 0x49, 0xdd, 0x6c, 0x89, 0xd6, 0xb6, 0x7b, 0x9e, 0x15, 0x4f, 0x63, 0x19, 0x46, + 0xb0, 0x64, 0xf0, 0xce, 0xb3, 0x0c, 0x44, 0x30, 0x82, 0x88, 0x2e, 0xd9, 0xbd, 0x79, 0x9e, 0x5d, + 0x2a, 0xc3, 0x89, 0x1b, 0xc6, 0x52, 0x48, 0xbe, 0x64, 0x74, 0x26, 0x27, 0x01, 0x3c, 0x03, 0x5e, + 0x26, 0x04, 0x8f, 0x68, 0x94, 0x4c, 0xa0, 0x2a, 0xa7, 0xeb, 0xe7, 0x1e, 0x56, 0x05, 0x7a, 0xf0, + 0x2e, 0x42, 0xfb, 0x8f, 0x24, 0xa7, 0x87, 0x74, 0x92, 0x02, 0xee, 0xa1, 0x56, 0x28, 0x21, 0x12, + 0xb6, 0xd5, 0x6f, 0xec, 0x74, 0xbc, 0x4e, 0x3e, 0xed, 0xb5, 0xee, 0x2a, 0x01, 0x29, 0xe4, 0xb7, + 0xdb, 0x4f, 0xbe, 0xeb, 0xd5, 0x1e, 0xff, 0xd2, 0xaf, 0x0d, 0xa6, 0x75, 0x64, 0x7f, 0xc4, 0x02, + 0x3a, 0x39, 0x48, 0xfd, 0xaf, 0x20, 0x90, 0x7b, 0x41, 0x00, 0x42, 0x10, 0xc8, 0x42, 0x78, 0x88, + 0xbf, 0x44, 0x6d, 0x55, 0xf2, 0x23, 0x2a, 0xa9, 0x6d, 0xf5, 0xad, 0x9d, 0x8d, 0x9b, 0x37, 0x1c, + 0xd3, 0x01, 0x67, 0x2b, 0x50, 0xf6, 0x80, 0x42, 0x3b, 0xd9, 0xae, 0xf3, 0xb1, 0xf6, 0x75, 0x0f, + 0x24, 0xf5, 0xf0, 0xc9, 0xb4, 0x57, 0xcb, 0xa7, 0x3d, 0x54, 0xca, 0xc8, 0xdc, 0x2b, 0x1e, 0xa3, + 0xa6, 0x48, 0x20, 0xb0, 0xeb, 0xda, 0xfb, 0x5d, 0x67, 0xd5, 0xfe, 0x72, 0x2a, 0x68, 0x1f, 0x24, + 0x10, 0x78, 0x2f, 0x98, 0xb0, 0x4d, 0xf5, 0x47, 0x74, 0x10, 0x2c, 0xd0, 0x9a, 0x90, 0x54, 0xa6, + 0xc2, 0x6e, 0xe8, 0x70, 0x1f, 0x3e, 0x9f, 0x70, 0xda, 0xa5, 0xf7, 0x92, 0x09, 0xb8, 0x56, 0xfc, + 0x13, 0x13, 0x6a, 0xf0, 0x05, 0xba, 0x72, 0x9f, 0xc5, 0x04, 0x04, 0x4b, 0x79, 0x00, 0x7b, 0x52, + 0xf2, 0xd0, 0x4f, 0x25, 0x08, 0xdc, 0x47, 0xcd, 0x84, 0xca, 0x91, 0x2e, 0x6c, 0xa7, 0xe4, 0xfb, + 0x09, 0x95, 0x23, 0xa2, 0x35, 0x0a, 0x91, 0x01, 0xf7, 0x75, 0x71, 0xce, 0x20, 0x0e, 0x81, 0xfb, + 0x44, 0x6b, 0x06, 0x3f, 0xd4, 0x11, 0xae, 0x70, 0xed, 0xa2, 0x4e, 0x4c, 0x23, 0x10, 0x09, 0x0d, + 0xc0, 0xf8, 0x7f, 0xd9, 0x58, 0x77, 0xee, 0xcf, 0x14, 0xa4, 0xc4, 0x3c, 0x3b, 0x12, 0x7e, 0x0d, + 0xb5, 0x86, 0x9c, 0xa5, 0x89, 0x2e, 0x5d, 0xc7, 0x7b, 0xd1, 0x40, 0x5a, 0x77, 0x94, 0x90, 0x14, + 0x3a, 0x7c, 0x0d, 0xad, 0x67, 0xc0, 0x45, 0xc8, 0x62, 0xbb, 0xa9, 0x61, 0x9b, 0x06, 0xb6, 0x7e, + 0x58, 0x88, 0xc9, 0x4c, 0x8f, 0xaf, 0xa3, 0x36, 0x37, 0xc4, 0xed, 0x96, 0xc6, 0x5e, 0x32, 0xd8, + 0xf6, 0x2c, 0x21, 0x32, 0x47, 0xe0, 0xb7, 0xd1, 0x86, 0x48, 0xfd, 0xb9, 0xc1, 0x9a, 0x36, 0xb8, + 0x6c, 0x0c, 0x36, 0x0e, 0x4a, 0x15, 0x39, 0x8b, 0x53, 0x69, 0xa9, 0x1c, 0xed, 0xf5, 0xc5, 0xb4, + 0x54, 0x09, 0x88, 0xd6, 0x0c, 0x4e, 0xeb, 0x68, 0xeb, 0x00, 0x26, 0x0f, 0xfe, 0x9b, 0xee, 0x67, + 0x0b, 0xdd, 0x7f, 0xef, 0x6f, 0xb4, 0x63, 0x35, 0xf5, 0xff, 0xd7, 0x0d, 0xf8, 0xb1, 0x8e, 0x5e, + 0xfd, 0x0b, 0xa2, 0xf8, 0x1b, 0x0b, 0x61, 0xbe, 0xd4, 0xc4, 0xa6, 0xe4, 0x1f, 0xac, 0xce, 0x70, + 0xf9, 0x42, 0x78, 0xaf, 0xe4, 0xd3, 0x5e, 0xc5, 0x45, 0x21, 0x15, 0x71, 0xf1, 0x13, 0x0b, 0x5d, + 0x89, 0xab, 0x6e, 0xac, 0x39, 0xa6, 0x3b, 0xab, 0x33, 0xaa, 0x7c, 0x00, 0xbc, 0xab, 0xf9, 0xb4, + 0x57, 0xfd, 0x36, 0x90, 0x6a, 0x02, 0x83, 0x9f, 0xeb, 0xe8, 0xf2, 0xc5, 0x3b, 0xfd, 0xef, 0x74, + 0xe9, 0xef, 0x4d, 0xb4, 0x75, 0xd1, 0xa1, 0xff, 0xb0, 0x43, 0xe7, 0x83, 0xa4, 0xb1, 0xf8, 0xe2, + 0x7e, 0x26, 0x80, 0x9b, 0x41, 0xd2, 0x9f, 0x0d, 0x92, 0xa6, 0xde, 0x4d, 0x90, 0x3a, 0x0a, 0x3d, + 0x44, 0xc4, 0x6c, 0x8a, 0x1c, 0xa3, 0x16, 0xa8, 0x5d, 0xc6, 0x6e, 0xf5, 0x1b, 0x3b, 0x1b, 0x37, + 0x3f, 0x7d, 0x6e, 0xcd, 0xe6, 0xe8, 0x15, 0x69, 0x3f, 0x96, 0xfc, 0xb8, 0x1c, 0x60, 0x5a, 0x46, + 0x8a, 0x88, 0xdb, 0x99, 0x59, 0xa3, 0x34, 0x06, 0x5f, 0x42, 0x8d, 0x31, 0x1c, 0x17, 0x03, 0x94, + 0xa8, 0x4f, 0x4c, 0x50, 0x2b, 0x53, 0x1b, 0x96, 0x29, 0xf4, 0x7b, 0xab, 0x53, 0x2b, 0xb7, 0x34, + 0x52, 0xb8, 0xba, 0x5d, 0xbf, 0x65, 0x0d, 0xbe, 0xb7, 0xd0, 0xd5, 0x73, 0x5b, 0x56, 0x8d, 0x55, + 0x3a, 0x99, 0xb0, 0x87, 0x70, 0xa4, 0xb9, 0xb4, 0xcb, 0xb1, 0xba, 0x57, 0x88, 0xc9, 0x4c, 0x8f, + 0x5f, 0x47, 0x6b, 0x1c, 0xa8, 0x60, 0xb1, 0x19, 0xe5, 0xf3, 0x6e, 0x27, 0x5a, 0x4a, 0x8c, 0x16, + 0xef, 0xa1, 0x4d, 0x50, 0xe1, 0x35, 0xb9, 0x7d, 0xce, 0x19, 0x37, 0x47, 0xb6, 0x65, 0x0c, 0x36, + 0xf7, 0x17, 0xd5, 0xe4, 0xcf, 0x78, 0xef, 0xda, 0xc9, 0x69, 0xb7, 0xf6, 0xf4, 0xb4, 0x5b, 0xfb, + 0xe9, 0xb4, 0x5b, 0x7b, 0x9c, 0x77, 0xad, 0x93, 0xbc, 0x6b, 0x3d, 0xcd, 0xbb, 0xd6, 0xaf, 0x79, + 0xd7, 0xfa, 0xf6, 0xb7, 0x6e, 0xed, 0xf3, 0x75, 0x93, 0xf4, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x38, 0xbb, 0x77, 0xc4, 0x79, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto new file mode 100644 index 000000000..6b77db462 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/generated.proto @@ -0,0 +1,185 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.authorization.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string verb = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string group = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map<string, ExtraValue> extra = 5; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go new file mode 100644 index 000000000..66549ed83 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *LocalSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *SelfSubjectAccessReview) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go new file mode 100644 index 000000000..939603c58 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.generated.go @@ -0,0 +1,3233 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SelfSubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = SubjectAccessReviewSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = SubjectAccessReviewStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Namespace != "" + yyq2[1] = x.Verb != "" + yyq2[2] = x.Group != "" + yyq2[3] = x.Version != "" + yyq2[4] = x.Resource != "" + yyq2[5] = x.Subresource != "" + yyq2[6] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Group)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("version")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Version)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subresource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv4 := &x.Namespace + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv8 := &x.Group + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "version": + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv10 := &x.Version + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "resource": + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv12 := &x.Resource + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + case "subresource": + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv14 := &x.Subresource + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv16 := &x.Name + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv21 := &x.Verb + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Group = "" + } else { + yyv23 := &x.Group + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Version = "" + } else { + yyv25 := &x.Version + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resource = "" + } else { + yyv27 := &x.Resource + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*string)(yyv27)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subresource = "" + } else { + yyv29 := &x.Subresource + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*string)(yyv29)) = r.DecodeString() + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv31 := &x.Name + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*string)(yyv31)) = r.DecodeString() + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + yyq2[1] = x.Verb != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verb")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "verb": + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv6 := &x.Verb + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv9 := &x.Path + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verb = "" + } else { + yyv11 := &x.Verb + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + yyq2[2] = x.User != "" + yyq2[3] = len(x.Groups) != 0 + yyq2[4] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("user")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.User)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("group")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + case "user": + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv6 := &x.User + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "group": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv8 := &x.Groups + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv10 := &x.Extra + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.User = "" + } else { + yyv15 := &x.User + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv17 := &x.Groups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv19 := &x.Extra + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ResourceAttributes != nil + yyq2[1] = x.NonResourceAttributes != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceAttributes == nil { + r.EncodeNil() + } else { + x.ResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceAttributes == nil { + r.EncodeNil() + } else { + x.NonResourceAttributes.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "resourceAttributes": + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + case "nonResourceAttributes": + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ResourceAttributes != nil { + x.ResourceAttributes = nil + } + } else { + if x.ResourceAttributes == nil { + x.ResourceAttributes = new(ResourceAttributes) + } + x.ResourceAttributes.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NonResourceAttributes != nil { + x.NonResourceAttributes = nil + } + } else { + if x.NonResourceAttributes == nil { + x.NonResourceAttributes = new(NonResourceAttributes) + } + x.NonResourceAttributes.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.EvaluationError != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Allowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("evaluationError")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.EvaluationError)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "allowed": + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv4 := &x.Allowed + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv6 := &x.Reason + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "evaluationError": + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv8 := &x.EvaluationError + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Allowed = false + } else { + yyv11 := &x.Allowed + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.EvaluationError = "" + } else { + yyv15 := &x.EvaluationError + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go new file mode 100644 index 000000000..8a1727423 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types.go @@ -0,0 +1,176 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +nonNamespaced=true +// +noMethods=true + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient=true +// +noMethods=true + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=verb"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..8e521ba16 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", + "group": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..7702201d5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.conversion.go @@ -0,0 +1,263 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + authorization "k8s.io/client-go/pkg/apis/authorization" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, + Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview, + Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes, + Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes, + Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes, + Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes, + Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, + Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview, + Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, + Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview, + Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview, + Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, + Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec, + Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, + Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus, + ) +} + +func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) +} + +func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + out.Path = in.Path + out.Verb = in.Verb + return nil +} + +func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { + return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) +} + +func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Verb = in.Verb + out.Group = in.Group + out.Version = in.Version + out.Resource = in.Resource + out.Subresource = in.Subresource + out.Name = in.Name + return nil +} + +func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { + return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + return nil +} + +func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*authorization.ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*authorization.NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]authorization.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + out.ResourceAttributes = (*ResourceAttributes)(unsafe.Pointer(in.ResourceAttributes)) + out.NonResourceAttributes = (*NonResourceAttributes)(unsafe.Pointer(in.NonResourceAttributes)) + out.User = in.User + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s) +} + +func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) +} + +func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + out.Allowed = in.Allowed + out.Reason = in.Reason + out.EvaluationError = in.EvaluationError + return nil +} + +func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { + return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..2bbd414ad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_v1beta1_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..e24e70be3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go new file mode 100644 index 000000000..19ccebdaa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/authorization/zz_generated.deepcopy.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package authorization + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_LocalSubjectAccessReview, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_NonResourceAttributes, InType: reflect.TypeOf(&NonResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_ResourceAttributes, InType: reflect.TypeOf(&ResourceAttributes{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SelfSubjectAccessReview, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SelfSubjectAccessReviewSpec, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReview, InType: reflect.TypeOf(&SubjectAccessReview{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReviewSpec, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_authorization_SubjectAccessReviewStatus, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, + ) +} + +func DeepCopy_authorization_LocalSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*LocalSubjectAccessReview) + out := out.(*LocalSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authorization_NonResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NonResourceAttributes) + out := out.(*NonResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_authorization_ResourceAttributes(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceAttributes) + out := out.(*ResourceAttributes) + *out = *in + return nil + } +} + +func DeepCopy_authorization_SelfSubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReview) + out := out.(*SelfSubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SelfSubjectAccessReviewSpec) + out := out.(*SelfSubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return nil + } +} + +func DeepCopy_authorization_SubjectAccessReview(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReview) + out := out.(*SubjectAccessReview) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_authorization_SubjectAccessReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewSpec) + out := out.(*SubjectAccessReviewSpec) + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_authorization_SubjectAccessReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SubjectAccessReviewStatus) + out := out.(*SubjectAccessReviewStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS b/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS new file mode 100755 index 000000000..9fbc54e93 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/OWNERS @@ -0,0 +1,20 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- erictune +- sttts +- ncdc +- timothysc +- piosz +- dims +- errordeveloper +- madhusudancs +- krousey +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go new file mode 100644 index 000000000..4c377561b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/annotations.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric +// specs when converting the `Metrics` field from autoscaling/v2alpha1 +const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" + +// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric +// statuses when converting the `CurrentMetrics` field from autoscaling/v2alpha1 +const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" + +// DefaultCPUUtilization is the default value for CPU utilization, provided no other +// metrics are present. This is here because it's used by both the v2alpha1 defaulting +// logic, and the pseudo-defaulting done in v1 conversion. +const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go new file mode 100644 index 000000000..83ac82767 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go new file mode 100644 index 000000000..0ecc0af9b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/autoscaling" + "k8s.io/client-go/pkg/apis/autoscaling/v1" + "k8s.io/client-go/pkg/apis/autoscaling/v2alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: autoscaling.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/autoscaling", + AddInternalObjectsToScheme: autoscaling.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go new file mode 100644 index 000000000..2bcea84b9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go new file mode 100644 index 000000000..39f206b1f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/types.go @@ -0,0 +1,305 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector string +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string + // API version of the referent + // +optional + APIVersion string +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference + // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 + // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 + // Metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // Type is the type of metric source. It should match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // TargetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // MetricName is the name of the metric in question + MetricName string + // TargetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // Name is the name of the resource in question. + Name api.ResourceName + // TargetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 + // TargetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // ObservedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 + + // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time + + // CurrentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 + + // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 + + // CurrentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // Type is the type of metric source. It will match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // CurrentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // MetricName is the name of the metric in question + MetricName string + // CurrentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name api.ResourceName + // CurrentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 + // CurrentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity +} + +// +genclient=true + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta + // Metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec + + // Status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta + // Metadata is the standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go new file mode 100644 index 000000000..6b8306922 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/conversion.go @@ -0,0 +1,244 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/autoscaling" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + ) + if err != nil { + return err + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + if err := autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s); err != nil { + return err + } + + otherMetrics := make([]MetricSpec, 0, len(in.Spec.Metrics)) + for _, metric := range in.Spec.Metrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU && metric.Resource.TargetAverageUtilization != nil { + continue + } + + convMetric := MetricSpec{} + if err := Convert_autoscaling_MetricSpec_To_v1_MetricSpec(&metric, &convMetric, s); err != nil { + return err + } + otherMetrics = append(otherMetrics, convMetric) + } + + // NB: we need to save the status even if it maps to a CPU utilization status in order to save the raw value as well + currentMetrics := make([]MetricStatus, len(in.Status.CurrentMetrics)) + for i, currentMetric := range in.Status.CurrentMetrics { + if err := Convert_autoscaling_MetricStatus_To_v1_MetricStatus(¤tMetric, ¤tMetrics[i], s); err != nil { + return err + } + } + + if len(otherMetrics) > 0 || len(in.Status.CurrentMetrics) > 0 { + old := out.Annotations + out.Annotations = make(map[string]string, len(old)+2) + if old != nil { + for k, v := range old { + out.Annotations[k] = v + } + } + } + + if len(otherMetrics) > 0 { + otherMetricsEnc, err := json.Marshal(otherMetrics) + if err != nil { + return err + } + out.Annotations[autoscaling.MetricSpecsAnnotation] = string(otherMetricsEnc) + } + + if len(in.Status.CurrentMetrics) > 0 { + currentMetricsEnc, err := json.Marshal(currentMetrics) + if err != nil { + return err + } + out.Annotations[autoscaling.MetricStatusesAnnotation] = string(currentMetricsEnc) + } + + return nil +} + +func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + if err := autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s); err != nil { + return err + } + + if otherMetricsEnc, hasOtherMetrics := out.Annotations[autoscaling.MetricSpecsAnnotation]; hasOtherMetrics { + var otherMetrics []MetricSpec + if err := json.Unmarshal([]byte(otherMetricsEnc), &otherMetrics); err != nil { + return err + } + + // the normal Spec conversion could have populated out.Spec.Metrics with a single element, so deal with that + outMetrics := make([]autoscaling.MetricSpec, len(otherMetrics)+len(out.Spec.Metrics)) + for i, metric := range otherMetrics { + if err := Convert_v1_MetricSpec_To_autoscaling_MetricSpec(&metric, &outMetrics[i], s); err != nil { + return err + } + } + if out.Spec.Metrics != nil { + outMetrics[len(otherMetrics)] = out.Spec.Metrics[0] + } + out.Spec.Metrics = outMetrics + delete(out.Annotations, autoscaling.MetricSpecsAnnotation) + } + + if currentMetricsEnc, hasCurrentMetrics := out.Annotations[autoscaling.MetricStatusesAnnotation]; hasCurrentMetrics { + // ignore any existing status values -- the ones here have more information + var currentMetrics []MetricStatus + if err := json.Unmarshal([]byte(currentMetricsEnc), ¤tMetrics); err != nil { + return err + } + + out.Status.CurrentMetrics = make([]autoscaling.MetricStatus, len(currentMetrics)) + for i, currentMetric := range currentMetrics { + if err := Convert_v1_MetricStatus_To_autoscaling_MetricStatus(¤tMetric, &out.Status.CurrentMetrics[i], s); err != nil { + return err + } + } + delete(out.Annotations, autoscaling.MetricStatusesAnnotation) + } + + // autoscaling/v1 formerly had an implicit default applied in the controller. In v2alpha1, we apply it explicitly. + // We apply it here, explicitly, since we have access to the full set of metrics from the annotation. + if len(out.Spec.Metrics) == 0 { + // no other metrics, no explicit CPU value set + out.Spec.Metrics = []autoscaling.MetricSpec{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricSource{ + Name: api.ResourceCPU, + }, + }, + } + out.Spec.Metrics[0].Resource.TargetAverageUtilization = new(int32) + *out.Spec.Metrics[0].Resource.TargetAverageUtilization = autoscaling.DefaultCPUUtilization + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + + for _, metric := range in.Metrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { + if metric.Resource.TargetAverageUtilization != nil { + out.TargetCPUUtilizationPercentage = new(int32) + *out.TargetCPUUtilizationPercentage = *metric.Resource.TargetAverageUtilization + } + break + } + } + + return nil +} + +func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + + out.MinReplicas = in.MinReplicas + out.MaxReplicas = in.MaxReplicas + + if in.TargetCPUUtilizationPercentage != nil { + out.Metrics = []autoscaling.MetricSpec{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricSource{ + Name: api.ResourceCPU, + }, + }, + } + out.Metrics[0].Resource.TargetAverageUtilization = new(int32) + *out.Metrics[0].Resource.TargetAverageUtilization = *in.TargetCPUUtilizationPercentage + } + + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + + for _, metric := range in.CurrentMetrics { + if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == api.ResourceCPU { + if metric.Resource.CurrentAverageUtilization != nil { + + out.CurrentCPUUtilizationPercentage = new(int32) + *out.CurrentCPUUtilizationPercentage = *metric.Resource.CurrentAverageUtilization + } + } + } + return nil +} + +func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.LastScaleTime = in.LastScaleTime + + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + + if in.CurrentCPUUtilizationPercentage != nil { + out.CurrentMetrics = []autoscaling.MetricStatus{ + { + Type: autoscaling.ResourceMetricSourceType, + Resource: &autoscaling.ResourceMetricStatus{ + Name: api.ResourceCPU, + }, + }, + } + out.CurrentMetrics[0].Resource.CurrentAverageUtilization = new(int32) + *out.CurrentMetrics[0].Resource.CurrentAverageUtilization = *in.CurrentCPUUtilizationPercentage + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go new file mode 100644 index 000000000..d423ad125 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/defaults.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_HorizontalPodAutoscaler, + ) +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } + + // NB: we apply a default for CPU utilization in conversion because + // we need access to the annotations to properly apply the default. +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go new file mode 100644 index 000000000..c7be42d5a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go new file mode 100644 index 000000000..96b8335f8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.pb.go @@ -0,0 +1,3498 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus + Scale + ScaleSpec + ScaleStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v1.ScaleStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) + } + return i, nil +} + +func (m *MetricSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n7, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n8, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n9, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n11, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n13, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n14, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n15, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n16, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *PodsMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n17, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.TargetAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.CurrentAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n21, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Selector))) + i += copy(data[i:], m.Selector) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + } + return n +} + +func (m *MetricSpec) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `TargetCPUUtilizationPercentage:` + valueToStringGenerated(this.TargetCPUUtilizationPercentage) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xda, 0x4e, 0x94, 0xce, 0xa6, 0x1f, 0x4c, 0xaa, 0xd4, 0x4d, 0xa8, 0x37, 0x5a, 0x38, + 0xb4, 0xa8, 0xec, 0x12, 0x13, 0x2a, 0x22, 0x84, 0x50, 0x6c, 0x54, 0x5a, 0x51, 0xb7, 0x61, 0xe2, + 0x46, 0x7c, 0x09, 0x31, 0x59, 0x4f, 0x9d, 0x69, 0xbc, 0x1f, 0x9a, 0x1d, 0x5b, 0x4d, 0x24, 0x24, + 0x4e, 0x9c, 0xb9, 0x70, 0x46, 0xf0, 0x4f, 0x70, 0x2e, 0x12, 0x52, 0x8e, 0xbd, 0xc1, 0xc9, 0x22, + 0x0b, 0x37, 0xc4, 0x3f, 0x50, 0x71, 0x40, 0x3b, 0x3b, 0x5e, 0xef, 0xda, 0x5e, 0x27, 0x4e, 0xd3, + 0x22, 0x6e, 0xbb, 0x33, 0xef, 0xfd, 0x7e, 0xef, 0xfd, 0xe6, 0xcd, 0x9b, 0x19, 0xb0, 0xb6, 0xfb, + 0xb6, 0x6f, 0x50, 0xd7, 0xdc, 0x6d, 0x6f, 0x13, 0xe6, 0x10, 0x4e, 0x7c, 0xd3, 0xdb, 0x6d, 0x9a, + 0xd8, 0xa3, 0xbe, 0x89, 0xdb, 0xdc, 0xf5, 0x2d, 0xdc, 0xa2, 0x4e, 0xd3, 0xec, 0xac, 0x98, 0x4d, + 0xe2, 0x10, 0x86, 0x39, 0x69, 0x18, 0x1e, 0x73, 0xb9, 0x0b, 0xaf, 0x45, 0xae, 0x46, 0xdf, 0xd5, + 0xf0, 0x76, 0x9b, 0x46, 0xe8, 0x6a, 0x24, 0x5c, 0x8d, 0xce, 0xca, 0xe2, 0xeb, 0x4d, 0xca, 0x77, + 0xda, 0xdb, 0x86, 0xe5, 0xda, 0x66, 0xd3, 0x6d, 0xba, 0xa6, 0x40, 0xd8, 0x6e, 0x3f, 0x10, 0x7f, + 0xe2, 0x47, 0x7c, 0x45, 0xc8, 0x8b, 0xab, 0x32, 0x28, 0xec, 0x51, 0x1b, 0x5b, 0x3b, 0xd4, 0x21, + 0x6c, 0xaf, 0x17, 0x96, 0xc9, 0x88, 0xef, 0xb6, 0x99, 0x45, 0x06, 0xe3, 0x19, 0xeb, 0xe5, 0x9b, + 0x36, 0xe1, 0x78, 0x44, 0x16, 0x8b, 0x66, 0x96, 0x17, 0x6b, 0x3b, 0x9c, 0xda, 0xc3, 0x34, 0x37, + 0x8e, 0x72, 0xf0, 0xad, 0x1d, 0x62, 0xe3, 0x21, 0xbf, 0x37, 0xb3, 0xfc, 0xda, 0x9c, 0xb6, 0x4c, + 0xea, 0x70, 0x9f, 0xb3, 0x71, 0x39, 0xf9, 0x84, 0x75, 0x08, 0xeb, 0x27, 0x44, 0x1e, 0x61, 0xdb, + 0x6b, 0x91, 0x51, 0x39, 0x5d, 0xcf, 0x5c, 0xd4, 0x11, 0xd6, 0xfa, 0x77, 0x0a, 0x58, 0xaa, 0x32, + 0xd7, 0xf7, 0xb7, 0x08, 0xf3, 0xa9, 0xeb, 0xdc, 0xdb, 0x7e, 0x48, 0x2c, 0x8e, 0xc8, 0x03, 0xc2, + 0x88, 0x63, 0x11, 0xb8, 0x0c, 0x0a, 0xbb, 0xd4, 0x69, 0x14, 0x95, 0x65, 0xe5, 0xea, 0x99, 0xca, + 0xdc, 0x41, 0x57, 0x9b, 0x0a, 0xba, 0x5a, 0xe1, 0x43, 0xea, 0x34, 0x90, 0x98, 0x09, 0x2d, 0x1c, + 0x6c, 0x93, 0x62, 0x2e, 0x6d, 0x71, 0x17, 0xdb, 0x04, 0x89, 0x19, 0x58, 0x06, 0x00, 0x7b, 0x54, + 0x12, 0x14, 0xf3, 0xc2, 0x0e, 0x4a, 0x3b, 0xb0, 0xbe, 0x71, 0x5b, 0xce, 0xa0, 0x84, 0x95, 0xfe, + 0x6b, 0x0e, 0x5c, 0xba, 0xe5, 0x32, 0xba, 0xef, 0x3a, 0x1c, 0xb7, 0x36, 0xdc, 0xc6, 0xba, 0x2c, + 0x2a, 0xc2, 0xe0, 0x97, 0x60, 0x36, 0x5c, 0xd0, 0x06, 0xe6, 0x58, 0xc4, 0xa5, 0x96, 0xdf, 0x30, + 0x64, 0x39, 0x26, 0xf5, 0xed, 0x17, 0x64, 0x68, 0x6d, 0x74, 0x56, 0x8c, 0x28, 0xb9, 0x1a, 0xe1, + 0xb8, 0xcf, 0xdf, 0x1f, 0x43, 0x31, 0x2a, 0xdc, 0x01, 0x05, 0xdf, 0x23, 0x96, 0xc8, 0x49, 0x2d, + 0xdf, 0x34, 0x8e, 0x5d, 0xec, 0x46, 0x46, 0xcc, 0x9b, 0x1e, 0xb1, 0xfa, 0xda, 0x84, 0x7f, 0x48, + 0x30, 0x40, 0x0f, 0xcc, 0xf8, 0x1c, 0xf3, 0xb6, 0x2f, 0x74, 0x51, 0xcb, 0xb7, 0x4e, 0x81, 0x4b, + 0xe0, 0x55, 0xce, 0x49, 0xb6, 0x99, 0xe8, 0x1f, 0x49, 0x1e, 0xfd, 0x4f, 0x05, 0x2c, 0x65, 0x78, + 0xde, 0xa1, 0x3e, 0x87, 0x9f, 0x0f, 0xa9, 0x6b, 0x1c, 0x4f, 0xdd, 0xd0, 0x5b, 0x68, 0x7b, 0x41, + 0x32, 0xcf, 0xf6, 0x46, 0x12, 0xca, 0x36, 0xc1, 0x34, 0xe5, 0xc4, 0xf6, 0x8b, 0xb9, 0xe5, 0xfc, + 0x55, 0xb5, 0x5c, 0x79, 0xf6, 0x74, 0x2b, 0x67, 0x25, 0xdd, 0xf4, 0xed, 0x10, 0x18, 0x45, 0xf8, + 0xfa, 0x3f, 0xb9, 0xcc, 0x34, 0x43, 0xf9, 0xe1, 0x37, 0x0a, 0x38, 0x27, 0x7e, 0xeb, 0x98, 0x35, + 0x49, 0x58, 0xf1, 0x32, 0xdb, 0x49, 0x56, 0x7b, 0xcc, 0xce, 0xa9, 0x2c, 0xc8, 0xb0, 0xce, 0x6d, + 0xa6, 0x58, 0xd0, 0x00, 0x2b, 0x5c, 0x01, 0xaa, 0x4d, 0x1d, 0x44, 0xbc, 0x16, 0xb5, 0xb0, 0x2f, + 0x4a, 0x6e, 0xba, 0x72, 0x3e, 0xe8, 0x6a, 0x6a, 0xad, 0x3f, 0x8c, 0x92, 0x36, 0xf0, 0x2d, 0xa0, + 0xda, 0xf8, 0x51, 0xec, 0x92, 0x17, 0x2e, 0xf3, 0x92, 0x4f, 0xad, 0xf5, 0xa7, 0x50, 0xd2, 0x0e, + 0x3e, 0x04, 0x25, 0x2e, 0x68, 0xab, 0x1b, 0xf7, 0xef, 0x73, 0xda, 0xa2, 0xfb, 0x98, 0x53, 0xd7, + 0xd9, 0x20, 0xcc, 0x22, 0x0e, 0xc7, 0x4d, 0x52, 0x2c, 0x08, 0x24, 0x3d, 0xe8, 0x6a, 0xa5, 0xfa, + 0x58, 0x4b, 0x74, 0x04, 0x92, 0xfe, 0x38, 0x0f, 0xae, 0x8c, 0xad, 0x4f, 0x78, 0x13, 0x40, 0x77, + 0x5b, 0xf4, 0xb5, 0xc6, 0x07, 0x51, 0x53, 0x0a, 0xbb, 0x43, 0xb8, 0x06, 0xf9, 0xca, 0x42, 0xd0, + 0xd5, 0xe0, 0xbd, 0xa1, 0x59, 0x34, 0xc2, 0x03, 0x5a, 0xe0, 0x6c, 0x0b, 0xfb, 0x3c, 0x52, 0x99, + 0xca, 0x46, 0xa4, 0x96, 0x5f, 0x3b, 0x5e, 0xd1, 0x86, 0x1e, 0x95, 0x97, 0x82, 0xae, 0x76, 0xf6, + 0x4e, 0x12, 0x04, 0xa5, 0x31, 0xe1, 0x3a, 0x38, 0x6f, 0xb5, 0x19, 0x23, 0x0e, 0x1f, 0x50, 0xfd, + 0x92, 0x54, 0xfd, 0x7c, 0x35, 0x3d, 0x8d, 0x06, 0xed, 0x43, 0x88, 0x06, 0xf1, 0x29, 0x23, 0x8d, + 0x18, 0xa2, 0x90, 0x86, 0x78, 0x3f, 0x3d, 0x8d, 0x06, 0xed, 0xa1, 0x0d, 0x34, 0x89, 0x9a, 0xb9, + 0x82, 0xd3, 0x02, 0xf2, 0x95, 0xa0, 0xab, 0x69, 0xd5, 0xf1, 0xa6, 0xe8, 0x28, 0x2c, 0xfd, 0xaf, + 0x1c, 0x00, 0x35, 0xc2, 0x19, 0xb5, 0xc4, 0x8e, 0x59, 0x05, 0x05, 0xbe, 0xe7, 0x11, 0x79, 0x14, + 0x2c, 0xf7, 0x9a, 0x59, 0x7d, 0xcf, 0x23, 0x4f, 0xbb, 0xda, 0x05, 0x69, 0x29, 0x8e, 0xe7, 0x70, + 0x0c, 0x09, 0x6b, 0x88, 0xc1, 0x8c, 0x2b, 0x76, 0x86, 0x5c, 0x97, 0x77, 0x27, 0xd8, 0x5e, 0x71, + 0x6f, 0x8e, 0x81, 0x2b, 0x20, 0xec, 0x68, 0x72, 0xab, 0x49, 0x60, 0xf8, 0x09, 0x28, 0x78, 0x6e, + 0xa3, 0xd7, 0x41, 0xdf, 0x99, 0x80, 0x60, 0xc3, 0x6d, 0xf8, 0x29, 0xf8, 0xd9, 0x30, 0xa3, 0x70, + 0x14, 0x09, 0x48, 0x48, 0xc1, 0x6c, 0xef, 0xca, 0x21, 0x56, 0x4b, 0x2d, 0xbf, 0x37, 0x01, 0x3c, + 0x92, 0xae, 0x29, 0x8a, 0xb9, 0xb0, 0x33, 0xf6, 0x66, 0x50, 0x0c, 0xaf, 0xff, 0x9d, 0x03, 0x73, + 0xd2, 0x30, 0xda, 0x20, 0xff, 0xb1, 0xde, 0xd1, 0x29, 0xf2, 0xdc, 0xf4, 0x8e, 0xe0, 0x9f, 0xab, + 0xde, 0x11, 0x45, 0x96, 0xde, 0xdf, 0xe7, 0x00, 0x1c, 0x2e, 0x30, 0xe8, 0x80, 0x99, 0xa8, 0xb5, + 0x9d, 0xf2, 0x71, 0x10, 0x1f, 0xc7, 0xb2, 0xf3, 0x4b, 0x96, 0xf0, 0x72, 0x64, 0x0b, 0xfe, 0xbb, + 0xfd, 0x4b, 0x54, 0x7c, 0x39, 0xa9, 0xc5, 0x33, 0x28, 0x61, 0x05, 0x09, 0x50, 0x23, 0xef, 0x2d, + 0xdc, 0x6a, 0x13, 0xb9, 0x0e, 0x63, 0x4f, 0x69, 0xa3, 0x97, 0xb6, 0xf1, 0x51, 0x1b, 0x3b, 0x9c, + 0xf2, 0xbd, 0xfe, 0x79, 0x51, 0xef, 0x43, 0xa1, 0x24, 0xae, 0xfe, 0xe3, 0xa0, 0x42, 0x51, 0x5d, + 0xfe, 0x1f, 0x14, 0xda, 0x01, 0x73, 0xb2, 0xbb, 0x3d, 0x8b, 0x44, 0x17, 0x25, 0xcb, 0x5c, 0x35, + 0x81, 0x85, 0x52, 0xc8, 0xfa, 0xcf, 0x0a, 0xb8, 0x30, 0xd8, 0x46, 0x06, 0x42, 0x56, 0x8e, 0x15, + 0xf2, 0x3e, 0x80, 0x51, 0xc2, 0xeb, 0x1d, 0xc2, 0x70, 0x93, 0x44, 0x81, 0xe7, 0x4e, 0x14, 0xf8, + 0xa2, 0xe4, 0x82, 0xf5, 0x21, 0x44, 0x34, 0x82, 0x45, 0xff, 0x25, 0x9d, 0x44, 0xb4, 0xce, 0x27, + 0x49, 0xe2, 0x2b, 0x30, 0x2f, 0xd5, 0x39, 0x85, 0x2c, 0x96, 0x24, 0xd9, 0x7c, 0x75, 0x18, 0x12, + 0x8d, 0xe2, 0xd1, 0x7f, 0xca, 0x81, 0x8b, 0xa3, 0x9a, 0x2e, 0xac, 0xc9, 0x47, 0x4a, 0x94, 0xc5, + 0x5a, 0xf2, 0x91, 0xf2, 0xb4, 0xab, 0x5d, 0x1b, 0xf7, 0x64, 0x8a, 0xbb, 0x4a, 0xe2, 0x45, 0xf3, + 0x31, 0x28, 0xa6, 0x54, 0x4c, 0x9c, 0x9f, 0xf2, 0x02, 0xf7, 0x72, 0xd0, 0xd5, 0x8a, 0xf5, 0x0c, + 0x1b, 0x94, 0xe9, 0x0d, 0x3b, 0x23, 0xab, 0xe0, 0x64, 0xe5, 0xbb, 0x30, 0x41, 0x05, 0x3c, 0x1e, + 0x56, 0x2e, 0xaa, 0x82, 0x53, 0x56, 0xee, 0x33, 0x70, 0x39, 0xbd, 0x70, 0xc3, 0xd2, 0x5d, 0x09, + 0xba, 0xda, 0xe5, 0x6a, 0x96, 0x11, 0xca, 0xf6, 0xcf, 0xaa, 0xbe, 0xfc, 0x0b, 0xaa, 0xbe, 0x1f, + 0x72, 0x60, 0x5a, 0x5c, 0x19, 0x5f, 0xc0, 0x0b, 0x75, 0x2b, 0xf5, 0x42, 0x5d, 0x9d, 0xa0, 0x05, + 0x8b, 0x08, 0x33, 0xdf, 0xa3, 0x5f, 0x0c, 0xbc, 0x47, 0x6f, 0x4c, 0x8c, 0x3c, 0xfe, 0xf5, 0xb9, + 0x06, 0xce, 0xc4, 0x01, 0xc0, 0xeb, 0xe1, 0x69, 0x2f, 0xef, 0xc2, 0x8a, 0x58, 0xfb, 0xf8, 0xe9, + 0x18, 0x5f, 0x82, 0x63, 0x0b, 0x9d, 0x02, 0x35, 0xc1, 0x30, 0x99, 0x73, 0x68, 0xed, 0x93, 0x16, + 0xb1, 0xb8, 0xcb, 0xe4, 0x11, 0x12, 0x5b, 0x6f, 0xca, 0x71, 0x14, 0x5b, 0x54, 0x5e, 0x3d, 0x38, + 0x2c, 0x4d, 0x3d, 0x39, 0x2c, 0x4d, 0xfd, 0x76, 0x58, 0x9a, 0xfa, 0x3a, 0x28, 0x29, 0x07, 0x41, + 0x49, 0x79, 0x12, 0x94, 0x94, 0xdf, 0x83, 0x92, 0xf2, 0xed, 0x1f, 0xa5, 0xa9, 0x4f, 0x73, 0x9d, + 0x95, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61, 0xfc, 0xd4, 0x31, 0x3f, 0x13, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto new file mode 100644 index 000000000..4953624af --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/generated.proto @@ -0,0 +1,298 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// configuration of a horizontal pod autoscaler. +message HorizontalPodAutoscaler { + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// specification of a horizontal pod autoscaler. +message HorizontalPodAutoscalerSpec { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // lower limit for the number of pods that can be set by the autoscaler, default 1. + // +optional + optional int32 minReplicas = 2; + + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + optional int32 maxReplicas = 3; + + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + // +optional + optional int32 targetCPUUtilizationPercentage = 4; +} + +// current status of a horizontal pod autoscaler +message HorizontalPodAutoscalerStatus { + // most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // current number of replicas of pods managed by this autoscaler. + optional int32 currentReplicas = 3; + + // desired number of replicas of pods managed by this autoscaler. + optional int32 desiredReplicas = 4; + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + // +optional + optional int32 currentCPUUtilizationPercentage = 5; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource. +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string selector = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go new file mode 100644 index 000000000..aaa225261 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + &Scale{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go new file mode 100644 index 000000000..aea5e3831 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.generated.go @@ -0,0 +1,5216 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg4_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg1_v1.Time + var v2 pkg2_types.UID + var v3 pkg4_v1.ResourceName + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv6 := &x.Name + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv8 := &x.APIVersion + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = x.TargetCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.TargetCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy17 := *x.TargetCPUUtilizationPercentage + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(yy17)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy19 := *x.TargetCPUUtilizationPercentage + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(yy19)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "scaleTargetRef": + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) + } + case "minReplicas": + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + case "maxReplicas": + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv7 := &x.MaxReplicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "targetCPUUtilizationPercentage": + if r.TryDecodeAsNil() { + if x.TargetCPUUtilizationPercentage != nil { + x.TargetCPUUtilizationPercentage = nil + } + } else { + if x.TargetCPUUtilizationPercentage == nil { + x.TargetCPUUtilizationPercentage = new(int32) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv12 := &x.ScaleTargetRef + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv15 := &x.MaxReplicas + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetCPUUtilizationPercentage != nil { + x.TargetCPUUtilizationPercentage = nil + } + } else { + if x.TargetCPUUtilizationPercentage == nil { + x.TargetCPUUtilizationPercentage = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + yyq2[4] = x.CurrentCPUUtilizationPercentage != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym9 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym10 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.CurrentCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy18 := *x.CurrentCPUUtilizationPercentage + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(yy18)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentCPUUtilizationPercentage == nil { + r.EncodeNil() + } else { + yy20 := *x.CurrentCPUUtilizationPercentage + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(yy20)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "lastScaleTime": + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg1_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + case "currentReplicas": + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv8 := &x.CurrentReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "desiredReplicas": + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv10 := &x.DesiredReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "currentCPUUtilizationPercentage": + if r.TryDecodeAsNil() { + if x.CurrentCPUUtilizationPercentage != nil { + x.CurrentCPUUtilizationPercentage = nil + } + } else { + if x.CurrentCPUUtilizationPercentage == nil { + x.CurrentCPUUtilizationPercentage = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg1_v1.Time) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym18 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv19 := &x.CurrentReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv21 := &x.DesiredReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentCPUUtilizationPercentage != nil { + x.CurrentCPUUtilizationPercentage = nil + } + } else { + if x.CurrentCPUUtilizationPercentage == nil { + x.CurrentCPUUtilizationPercentage = new(int32) + } + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Selector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = "" + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv9 := &x.Replicas + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = "" + } else { + yyv11 := &x.Selector + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.TargetValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.TargetValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "targetValue": + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.TargetValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.TargetValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.TargetAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.TargetAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.TargetAverageUtilization != nil + yyq2[2] = x.TargetAverageValue != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.TargetAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.TargetAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "targetAverageUtilization": + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg3_resource.Quantity) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg3_resource.Quantity) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "currentValue": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.CurrentValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.CurrentValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.CurrentAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.CurrentAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.CurrentAverageUtilization != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.CurrentAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.CurrentAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentAverageValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentAverageValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "currentAverageUtilization": + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv7 := &x.CurrentAverageValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg3_resource.Quantity{} + } else { + yyv13 := &x.CurrentAverageValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go new file mode 100644 index 000000000..47fd31e27 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types.go @@ -0,0 +1,296 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// specification of a horizontal pod autoscaler. +type HorizontalPodAutoscalerSpec struct { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // lower limit for the number of pods that can be set by the autoscaler, default 1. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + // +optional + TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` +} + +// current status of a horizontal pod autoscaler +type HorizontalPodAutoscalerStatus struct { + // most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // current number of replicas of pods managed by this autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desired number of replicas of pods managed by this autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + // +optional + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` +} + +// +genclient=true + +// configuration of a horizontal pod autoscaler. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// the types below are used in the alpha metrics annotation + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..01d205f87 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go @@ -0,0 +1,205 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "configuration of a horizontal pod autoscaler.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "list of horizontal pod autoscaler objects.", + "metadata": "Standard list metadata.", + "items": "list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "specification of a horizontal pod autoscaler.", + "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", + "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", + "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "current status of a horizontal pod autoscaler", + "observedGeneration": "most recent generation observed by this autoscaler.", + "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource.", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go new file mode 100644 index 000000000..acd2283cd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.conversion.go @@ -0,0 +1,449 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + autoscaling "k8s.io/client-go/pkg/apis/autoscaling" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference, + Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, + Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, + Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, + Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, + Convert_v1_MetricSpec_To_autoscaling_MetricSpec, + Convert_autoscaling_MetricSpec_To_v1_MetricSpec, + Convert_v1_MetricStatus_To_autoscaling_MetricStatus, + Convert_autoscaling_MetricStatus_To_v1_MetricStatus, + Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, + Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource, + Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, + Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus, + Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource, + Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource, + Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, + Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus, + Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, + Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource, + Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, + Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus, + Convert_v1_Scale_To_autoscaling_Scale, + Convert_autoscaling_Scale_To_v1_Scale, + Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, + Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec, + Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus, + Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus, + ) +} + +func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]HorizontalPodAutoscaler, 0) + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + // WARNING: in.TargetCPUUtilizationPercentage requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + // WARNING: in.Metrics requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + // WARNING: in.CurrentCPUUtilizationPercentage requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*meta_v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + // WARNING: in.CurrentMetrics requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricSpec_To_v1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v1_MetricSpec(in, out, s) +} + +func autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricStatus_To_v1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v1_MetricStatus(in, out, s) +} + +func autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v1_ObjectMetricSource(in, out, s) +} + +func autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v1_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v1_PodsMetricSource(in, out, s) +} + +func autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v1_PodsMetricStatus(in, out, s) +} + +func autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v1_ResourceMetricSource(in, out, s) +} + +func autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v1_ResourceMetricStatus(in, out, s) +} + +func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) +} + +func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) +} + +func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) +} + +func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} + +func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { + return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) +} + +func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = in.Selector + return nil +} + +func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..07047c2b0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.deepcopy.go @@ -0,0 +1,312 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + ) +} + +func DeepCopy_v1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.TargetCPUUtilizationPercentage != nil { + in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentCPUUtilizationPercentage != nil { + in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_v1_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_v1_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_v1_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_v1_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_v1_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_v1_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_v1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go new file mode 100644 index 000000000..af20e9884 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v1/zz_generated.defaults.go @@ -0,0 +1,47 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscaler{}, func(obj interface{}) { SetObjectDefaults_HorizontalPodAutoscaler(obj.(*HorizontalPodAutoscaler)) }) + scheme.AddTypeDefaultingFunc(&HorizontalPodAutoscalerList{}, func(obj interface{}) { + SetObjectDefaults_HorizontalPodAutoscalerList(obj.(*HorizontalPodAutoscalerList)) + }) + return nil +} + +func SetObjectDefaults_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler) { + SetDefaults_HorizontalPodAutoscaler(in) +} + +func SetObjectDefaults_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_HorizontalPodAutoscaler(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go new file mode 100644 index 000000000..6cc60d298 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/defaults.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/autoscaling" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return scheme.AddDefaultingFuncs( + SetDefaults_HorizontalPodAutoscaler, + ) +} + +func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { + if obj.Spec.MinReplicas == nil { + minReplicas := int32(1) + obj.Spec.MinReplicas = &minReplicas + } + + if len(obj.Spec.Metrics) == 0 { + utilizationDefaultVal := int32(autoscaling.DefaultCPUUtilization) + obj.Spec.Metrics = []MetricSpec{ + { + Type: ResourceMetricSourceType, + Resource: &ResourceMetricSource{ + Name: v1.ResourceCPU, + TargetAverageUtilization: &utilizationDefaultVal, + }, + }, + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go new file mode 100644 index 000000000..a9fe60b1c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go new file mode 100644 index 000000000..69e71edd9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.pb.go @@ -0,0 +1,3062 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + HorizontalPodAutoscaler + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.CrossVersionObjectReference") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.client-go.pkg.apis.autoscaling.v2alpha1.ResourceMetricStatus") +} +func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + return i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) + n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.MinReplicas != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ObservedGeneration != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) + n6, err := m.LastScaleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, msg := range m.CurrentMetrics { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MetricSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n7, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n8, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n9, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *MetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *MetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.Object != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) + n10, err := m.Object.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.Pods != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Pods.Size())) + n11, err := m.Pods.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Resource != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *ObjectMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n13, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n14, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil +} + +func (m *ObjectMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) + n15, err := m.Target.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n16, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + return i, nil +} + +func (m *PodsMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n17, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *PodsMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.MetricName))) + i += copy(data[i:], m.MetricName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n18, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil +} + +func (m *ResourceMetricSource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.TargetAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetAverageValue.Size())) + n19, err := m.TargetAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ResourceMetricStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if m.CurrentAverageUtilization != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.CurrentAverageUtilization)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentAverageValue.Size())) + n20, err := m.CurrentAverageValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CrossVersionObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricSpec) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MaxReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_kubernetes_pkg_api_v1.ResourceName(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 1208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xd4, 0x57, 0x5b, 0x6f, 0x1b, 0x45, + 0x14, 0x8e, 0x2f, 0x49, 0xc3, 0x38, 0x37, 0x26, 0x55, 0xea, 0x26, 0xd4, 0x8e, 0xf6, 0xa9, 0x54, + 0xb0, 0x4b, 0x4c, 0x41, 0x54, 0x08, 0xa1, 0xd8, 0x5c, 0x5a, 0x11, 0xa7, 0x61, 0x1a, 0x2a, 0x04, + 0x48, 0x30, 0x59, 0x4f, 0x9c, 0x21, 0xde, 0x8b, 0x76, 0x66, 0xad, 0x26, 0x52, 0x25, 0x7e, 0x00, + 0x0f, 0xbc, 0xf0, 0x13, 0x90, 0xf8, 0x07, 0x3c, 0x83, 0x84, 0x94, 0xc7, 0xf2, 0xc6, 0x93, 0x45, + 0xdc, 0x37, 0x7e, 0x42, 0x25, 0x2e, 0xda, 0x99, 0xf1, 0x5e, 0xbc, 0x5e, 0x13, 0x87, 0xb4, 0x82, + 0x37, 0xef, 0xcc, 0x39, 0xdf, 0x77, 0xce, 0xf9, 0xce, 0x9c, 0x19, 0x83, 0xb7, 0x0f, 0xdf, 0x60, + 0x3a, 0x75, 0x8c, 0x43, 0x7f, 0x8f, 0x78, 0x36, 0xe1, 0x84, 0x19, 0xee, 0x61, 0xdb, 0xc0, 0x2e, + 0x65, 0x06, 0xf6, 0xb9, 0xc3, 0x4c, 0xdc, 0xa1, 0x76, 0xdb, 0xe8, 0xd6, 0x70, 0xc7, 0x3d, 0xc0, + 0x1b, 0x46, 0x9b, 0xd8, 0xc4, 0xc3, 0x9c, 0xb4, 0x74, 0xd7, 0x73, 0xb8, 0x03, 0x0d, 0x09, 0xa0, + 0x47, 0x00, 0xba, 0x7b, 0xd8, 0xd6, 0x03, 0x00, 0x3d, 0x06, 0xa0, 0x0f, 0x00, 0x56, 0x5f, 0x6e, + 0x53, 0x7e, 0xe0, 0xef, 0xe9, 0xa6, 0x63, 0x19, 0x6d, 0xa7, 0xed, 0x18, 0x02, 0x67, 0xcf, 0xdf, + 0x17, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0xf8, 0xab, 0x37, 0x55, 0x80, 0xd8, 0xa5, 0x16, 0x36, 0x0f, + 0xa8, 0x4d, 0xbc, 0xa3, 0x41, 0x88, 0x86, 0x47, 0x98, 0xe3, 0x7b, 0x26, 0x19, 0x8e, 0x6a, 0xac, + 0x17, 0x33, 0x2c, 0xc2, 0xb1, 0xd1, 0x4d, 0xe5, 0xb2, 0x6a, 0x64, 0x79, 0x79, 0xbe, 0xcd, 0xa9, + 0x95, 0xa6, 0x79, 0xfd, 0x9f, 0x1c, 0x98, 0x79, 0x40, 0x2c, 0x9c, 0xf2, 0x7b, 0x35, 0xcb, 0xcf, + 0xe7, 0xb4, 0x63, 0x50, 0x9b, 0x33, 0xee, 0x8d, 0xcb, 0x89, 0x11, 0xaf, 0x4b, 0xbc, 0x28, 0x21, + 0xf2, 0x00, 0x5b, 0x6e, 0x87, 0x8c, 0xca, 0xe9, 0xa5, 0x4c, 0x81, 0x47, 0x59, 0xdf, 0x3a, 0x6b, + 0x3b, 0xa4, 0x5c, 0xb5, 0x6f, 0x73, 0x60, 0xad, 0xe1, 0x39, 0x8c, 0xdd, 0x27, 0x1e, 0xa3, 0x8e, + 0x7d, 0x77, 0xef, 0x4b, 0x62, 0x72, 0x44, 0xf6, 0x89, 0x47, 0x6c, 0x93, 0xc0, 0x75, 0x50, 0x3c, + 0xa4, 0x76, 0xab, 0x9c, 0x5b, 0xcf, 0x5d, 0x7f, 0xae, 0x3e, 0x77, 0xd2, 0xab, 0x4e, 0xf5, 0x7b, + 0xd5, 0xe2, 0x07, 0xd4, 0x6e, 0x21, 0xb1, 0x13, 0x58, 0xd8, 0xd8, 0x22, 0xe5, 0x7c, 0xd2, 0x62, + 0x1b, 0x5b, 0x04, 0x89, 0x1d, 0x58, 0x03, 0x00, 0xbb, 0x54, 0x11, 0x94, 0x0b, 0xc2, 0x0e, 0x2a, + 0x3b, 0xb0, 0xb9, 0x73, 0x47, 0xed, 0xa0, 0x98, 0x95, 0xf6, 0x38, 0x0f, 0xae, 0xdc, 0x76, 0x3c, + 0x7a, 0xec, 0xd8, 0x1c, 0x77, 0x76, 0x9c, 0xd6, 0xa6, 0xca, 0x83, 0x78, 0xf0, 0x0b, 0x30, 0x1b, + 0xf4, 0x42, 0x0b, 0x73, 0x2c, 0xe2, 0x2a, 0xd5, 0x5e, 0xd1, 0x55, 0x3f, 0xc7, 0xa5, 0x89, 0x3a, + 0x3a, 0xb0, 0xd6, 0xbb, 0x1b, 0xba, 0x4c, 0xae, 0x49, 0x38, 0x8e, 0xf8, 0xa3, 0x35, 0x14, 0xa2, + 0x42, 0x1b, 0x14, 0x99, 0x4b, 0x4c, 0x91, 0x53, 0xa9, 0xb6, 0xa5, 0x4f, 0x78, 0x5a, 0xf4, 0x8c, + 0xc8, 0xef, 0xb9, 0xc4, 0x8c, 0x2a, 0x14, 0x7c, 0x21, 0xc1, 0x03, 0xbb, 0x60, 0x86, 0x71, 0xcc, + 0x7d, 0x26, 0xaa, 0x53, 0xaa, 0x6d, 0x5f, 0x18, 0xa3, 0x40, 0xad, 0x2f, 0x28, 0xce, 0x19, 0xf9, + 0x8d, 0x14, 0x9b, 0xf6, 0x7b, 0x0e, 0xac, 0x65, 0x78, 0x6e, 0x51, 0xc6, 0xe1, 0x67, 0xa9, 0x4a, + 0xeb, 0x67, 0xab, 0x74, 0xe0, 0x2d, 0xea, 0xbc, 0xa4, 0x98, 0x67, 0x07, 0x2b, 0xb1, 0x2a, 0x5b, + 0x60, 0x9a, 0x72, 0x62, 0xb1, 0x72, 0x7e, 0xbd, 0x70, 0xbd, 0x54, 0xbb, 0x7d, 0x51, 0x49, 0xd7, + 0xe7, 0x15, 0xe9, 0xf4, 0x9d, 0x00, 0x1e, 0x49, 0x16, 0xed, 0xcf, 0x7c, 0x66, 0xb2, 0x81, 0x14, + 0xf0, 0xeb, 0x1c, 0x58, 0x10, 0x9f, 0xbb, 0xd8, 0x6b, 0x93, 0xe0, 0x0c, 0xa8, 0x9c, 0x27, 0xd7, + 0x7f, 0xcc, 0x89, 0xaa, 0xaf, 0xa8, 0xe0, 0x16, 0xee, 0x25, 0xb8, 0xd0, 0x10, 0x37, 0xdc, 0x00, + 0x25, 0x8b, 0xda, 0x88, 0xb8, 0x1d, 0x6a, 0x62, 0x26, 0x5a, 0x71, 0xba, 0xbe, 0xd8, 0xef, 0x55, + 0x4b, 0xcd, 0x68, 0x19, 0xc5, 0x6d, 0xe0, 0x6b, 0xa0, 0x64, 0xe1, 0x07, 0xa1, 0x4b, 0x41, 0xb8, + 0x2c, 0x2b, 0xbe, 0x52, 0x33, 0xda, 0x42, 0x71, 0x3b, 0xb8, 0x0f, 0x2e, 0x59, 0x84, 0x7b, 0xd4, + 0x64, 0xe5, 0xa2, 0x50, 0xe2, 0xcd, 0x89, 0x13, 0x6e, 0x0a, 0x7f, 0xd1, 0xdf, 0x8b, 0x8a, 0xef, + 0x92, 0x5c, 0x63, 0x68, 0x00, 0xae, 0xfd, 0x52, 0x00, 0xd7, 0xc6, 0xf6, 0x29, 0x7c, 0x0f, 0x40, + 0x67, 0x4f, 0x8c, 0xc9, 0xd6, 0xfb, 0x72, 0x50, 0x05, 0x13, 0x23, 0x50, 0xa1, 0x50, 0x5f, 0xe9, + 0xf7, 0xaa, 0xf0, 0x6e, 0x6a, 0x17, 0x8d, 0xf0, 0x80, 0x26, 0x98, 0xef, 0x60, 0xc6, 0x65, 0x85, + 0xa9, 0x1a, 0x4e, 0xa5, 0xda, 0x8d, 0xb3, 0x35, 0x6f, 0xe0, 0x51, 0x7f, 0xbe, 0xdf, 0xab, 0xce, + 0x6f, 0xc5, 0x41, 0x50, 0x12, 0x13, 0x6e, 0x82, 0x45, 0xd3, 0xf7, 0x3c, 0x62, 0xf3, 0xa1, 0x8a, + 0x5f, 0x51, 0x15, 0x58, 0x6c, 0x24, 0xb7, 0xd1, 0xb0, 0x7d, 0x00, 0xd1, 0x22, 0x8c, 0x7a, 0xa4, + 0x15, 0x42, 0x14, 0x93, 0x10, 0xef, 0x24, 0xb7, 0xd1, 0xb0, 0x3d, 0x7c, 0x08, 0x16, 0x14, 0xaa, + 0xaa, 0x77, 0x79, 0x5a, 0x68, 0xf8, 0xd6, 0x79, 0x35, 0x94, 0x13, 0x23, 0xec, 0xd2, 0x46, 0x02, + 0x1c, 0x0d, 0x91, 0x69, 0x7f, 0xe4, 0x01, 0x88, 0xc4, 0x87, 0x37, 0x41, 0x91, 0x1f, 0xb9, 0x44, + 0x5d, 0x17, 0xeb, 0x83, 0x51, 0xb7, 0x7b, 0xe4, 0x92, 0x27, 0xbd, 0xea, 0x92, 0xb2, 0x14, 0xb7, + 0x7f, 0xb0, 0x86, 0x84, 0x35, 0x6c, 0x83, 0x19, 0x47, 0x9c, 0x12, 0xa5, 0x53, 0x63, 0xe2, 0xd8, + 0xc3, 0x29, 0x1e, 0xc2, 0xd7, 0x41, 0x30, 0xef, 0xd4, 0xe1, 0x53, 0xf0, 0xf0, 0x73, 0x50, 0x74, + 0x9d, 0xd6, 0x60, 0xca, 0x6e, 0x4e, 0x4c, 0xb3, 0xe3, 0xb4, 0x58, 0x82, 0x64, 0x36, 0xc8, 0x2e, + 0x58, 0x45, 0x02, 0x18, 0x3a, 0x60, 0x76, 0xf0, 0xba, 0x11, 0x4a, 0x96, 0x6a, 0xef, 0x4e, 0x4c, + 0x82, 0x14, 0x40, 0x82, 0x68, 0x2e, 0x98, 0xa1, 0x83, 0x1d, 0x14, 0x92, 0x68, 0x7f, 0xe5, 0xc1, + 0x5c, 0x5c, 0xb8, 0xff, 0x86, 0x02, 0xb2, 0x87, 0x9e, 0xb2, 0x02, 0x92, 0xe4, 0x19, 0x28, 0x20, + 0x89, 0xb2, 0x14, 0xf8, 0x2e, 0x0f, 0x60, 0xba, 0xfd, 0x20, 0x07, 0x33, 0x5c, 0xcc, 0xf2, 0xa7, + 0x72, 0x89, 0x84, 0x17, 0xba, 0xba, 0x2f, 0x14, 0x57, 0xf0, 0xd4, 0x92, 0xd3, 0x76, 0x3b, 0x7a, + 0x92, 0x85, 0x4f, 0x9d, 0x66, 0xb8, 0x83, 0x62, 0x56, 0x90, 0x80, 0x92, 0xf4, 0xbe, 0x8f, 0x3b, + 0x3e, 0x51, 0xca, 0x8c, 0xbd, 0xe7, 0xf5, 0x41, 0xf2, 0xfa, 0x87, 0x3e, 0xb6, 0x39, 0xe5, 0x47, + 0xd1, 0x2d, 0xb3, 0x1b, 0x41, 0xa1, 0x38, 0xae, 0xf6, 0xfd, 0x70, 0x9d, 0x64, 0xbf, 0xfe, 0x7f, + 0xea, 0x74, 0x00, 0xe6, 0xd4, 0xf0, 0xfb, 0x37, 0x85, 0xba, 0xac, 0x58, 0xe6, 0x1a, 0x31, 0x2c, + 0x94, 0x40, 0xd6, 0x7e, 0xca, 0x81, 0xa5, 0xe1, 0x51, 0x33, 0x14, 0x72, 0xee, 0x4c, 0x21, 0x1f, + 0x03, 0x28, 0x13, 0xde, 0xec, 0x12, 0x0f, 0xb7, 0x89, 0x0c, 0x3c, 0x7f, 0xae, 0xc0, 0x57, 0x15, + 0x17, 0xdc, 0x4d, 0x21, 0xa2, 0x11, 0x2c, 0xda, 0xcf, 0xc9, 0x24, 0xa4, 0xda, 0xe7, 0x49, 0xe2, + 0x21, 0x58, 0x56, 0xd5, 0xb9, 0x80, 0x2c, 0xd6, 0x14, 0xd9, 0x72, 0x23, 0x0d, 0x89, 0x46, 0xf1, + 0x68, 0x3f, 0xe4, 0xc1, 0xe5, 0x51, 0x23, 0x19, 0x36, 0xd5, 0x1f, 0x1f, 0x99, 0xc5, 0xad, 0xf8, + 0x1f, 0x9f, 0x27, 0xbd, 0xea, 0x8b, 0xe3, 0xfe, 0xc1, 0x85, 0x13, 0x26, 0xf6, 0x2f, 0xe9, 0x63, + 0x50, 0x4e, 0x54, 0xf1, 0x23, 0x4e, 0x3b, 0xf4, 0x58, 0xbe, 0x80, 0xe4, 0xe3, 0xef, 0x85, 0x7e, + 0xaf, 0x5a, 0xde, 0xcd, 0xb0, 0x41, 0x99, 0xde, 0xb0, 0x3b, 0xb2, 0x0b, 0xce, 0xd7, 0xbe, 0x2b, + 0x13, 0x74, 0xc0, 0x8f, 0xe9, 0xca, 0xc9, 0x2e, 0xb8, 0xe0, 0xca, 0x7d, 0x0a, 0xae, 0x26, 0x85, + 0x4b, 0x97, 0xee, 0x5a, 0xbf, 0x57, 0xbd, 0xda, 0xc8, 0x32, 0x42, 0xd9, 0xfe, 0x59, 0xdd, 0x57, + 0x78, 0x36, 0xdd, 0x57, 0xbf, 0x71, 0x72, 0x5a, 0x99, 0x7a, 0x74, 0x5a, 0x99, 0xfa, 0xf5, 0xb4, + 0x32, 0xf5, 0x55, 0xbf, 0x92, 0x3b, 0xe9, 0x57, 0x72, 0x8f, 0xfa, 0x95, 0xdc, 0x6f, 0xfd, 0x4a, + 0xee, 0x9b, 0xc7, 0x95, 0xa9, 0x4f, 0x66, 0x07, 0x83, 0xf0, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x95, 0xf2, 0xec, 0x8a, 0x16, 0x12, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto new file mode 100644 index 000000000..27b99c224 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/generated.proto @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.autoscaling.v2alpha1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + repeated MetricSpec metrics = 4; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + repeated MetricStatus currentMetrics = 5; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will match one of the fields below. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go new file mode 100644 index 000000000..2fc437f93 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/register.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go new file mode 100644 index 000000000..9eb6919a3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.generated.go @@ -0,0 +1,4621 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v2alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_resource "k8s.io/apimachinery/pkg/api/resource" + pkg3_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg4_types "k8s.io/apimachinery/pkg/types" + pkg2_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_resource.Quantity + var v1 pkg3_v1.Time + var v2 pkg4_types.UID + var v3 pkg2_v1.ResourceName + var v4 time.Time + _, _, _, _, _ = v0, v1, v2, v3, v4 + } +} + +func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv6 := &x.Name + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv8 := &x.APIVersion + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv11 := &x.Kind + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv13 := &x.Name + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.MinReplicas != nil + yyq2[3] = len(x.Metrics) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.ScaleTargetRef + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ScaleTargetRef + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy9 := *x.MinReplicas + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MinReplicas == nil { + r.EncodeNil() + } else { + yy11 := *x.MinReplicas + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.MaxReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Metrics == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metrics")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Metrics == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceMetricSpec(([]MetricSpec)(x.Metrics), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "scaleTargetRef": + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv4 := &x.ScaleTargetRef + yyv4.CodecDecodeSelf(d) + } + case "minReplicas": + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + case "maxReplicas": + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv7 := &x.MaxReplicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + case "metrics": + if r.TryDecodeAsNil() { + x.Metrics = nil + } else { + yyv9 := &x.Metrics + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceMetricSpec((*[]MetricSpec)(yyv9), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ScaleTargetRef = CrossVersionObjectReference{} + } else { + yyv12 := &x.ScaleTargetRef + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MinReplicas != nil { + x.MinReplicas = nil + } + } else { + if x.MinReplicas == nil { + x.MinReplicas = new(int32) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MaxReplicas = 0 + } else { + yyv15 := &x.MaxReplicas + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int32)(yyv15)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Metrics = nil + } else { + yyv17 := &x.Metrics + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceMetricSpec((*[]MetricSpec)(yyv17), d) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x MetricSourceType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *MetricSourceType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *MetricSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricSource) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricSource) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricSource) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.TargetValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.TargetValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "targetValue": + if r.TryDecodeAsNil() { + x.TargetValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.TargetValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.TargetValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg1_resource.Quantity{} + } else { + yyv6 := &x.TargetAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetAverageValue = pkg1_resource.Quantity{} + } else { + yyv11 := &x.TargetAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricSource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.TargetAverageUtilization != nil + yyq2[2] = x.TargetAverageValue != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.TargetAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.TargetAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym12 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TargetAverageValue == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TargetAverageValue) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(x.TargetAverageValue) + } else { + z.EncFallback(x.TargetAverageValue) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricSource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "targetAverageUtilization": + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "targetAverageValue": + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg1_resource.Quantity) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageUtilization != nil { + x.TargetAverageUtilization = nil + } + } else { + if x.TargetAverageUtilization == nil { + x.TargetAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.TargetAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.TargetAverageValue != nil { + x.TargetAverageValue = nil + } + } else { + if x.TargetAverageValue == nil { + x.TargetAverageValue = new(pkg1_resource.Quantity) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.TargetAverageValue) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.TargetAverageValue) + } else { + z.DecFallback(x.TargetAverageValue, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != nil + yyq2[1] = x.LastScaleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy4 := *x.ObservedGeneration + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ObservedGeneration == nil { + r.EncodeNil() + } else { + yy6 := *x.ObservedGeneration + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym9 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScaleTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { + } else if yym10 { + z.EncBinaryMarshal(x.LastScaleTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScaleTime) + } else { + z.EncFallback(x.LastScaleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredReplicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.CurrentMetrics == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentMetrics")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentMetrics == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceMetricStatus(([]MetricStatus)(x.CurrentMetrics), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + case "lastScaleTime": + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg3_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + case "currentReplicas": + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv8 := &x.CurrentReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "desiredReplicas": + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv10 := &x.DesiredReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "currentMetrics": + if r.TryDecodeAsNil() { + x.CurrentMetrics = nil + } else { + yyv12 := &x.CurrentMetrics + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceMetricStatus((*[]MetricStatus)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ObservedGeneration != nil { + x.ObservedGeneration = nil + } + } else { + if x.ObservedGeneration == nil { + x.ObservedGeneration = new(int64) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScaleTime != nil { + x.LastScaleTime = nil + } + } else { + if x.LastScaleTime == nil { + x.LastScaleTime = new(pkg3_v1.Time) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { + } else if yym18 { + z.DecBinaryUnmarshal(x.LastScaleTime) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScaleTime) + } else { + z.DecFallback(x.LastScaleTime, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentReplicas = 0 + } else { + yyv19 := &x.CurrentReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredReplicas = 0 + } else { + yyv21 := &x.DesiredReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentMetrics = nil + } else { + yyv23 := &x.CurrentMetrics + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceMetricStatus((*[]MetricStatus)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *MetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Object != nil + yyq2[2] = x.Pods != nil + yyq2[3] = x.Resource != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("object")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Object == nil { + r.EncodeNil() + } else { + x.Object.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("pods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Pods == nil { + r.EncodeNil() + } else { + x.Pods.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resource")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resource == nil { + r.EncodeNil() + } else { + x.Resource.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *MetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *MetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "object": + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + case "pods": + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + case "resource": + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *MetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv9 := &x.Type + yyv9.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Object != nil { + x.Object = nil + } + } else { + if x.Object == nil { + x.Object = new(ObjectMetricStatus) + } + x.Object.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Pods != nil { + x.Pods = nil + } + } else { + if x.Pods == nil { + x.Pods = new(PodsMetricStatus) + } + x.Pods.CodecDecodeSelf(d) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Resource != nil { + x.Resource = nil + } + } else { + if x.Resource == nil { + x.Resource = new(ResourceMetricStatus) + } + x.Resource.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ObjectMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.Target + yy4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("target")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.Target + yy6.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ObjectMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "target": + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv4 := &x.Target + yyv4.CodecDecodeSelf(d) + } + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv5 := &x.MetricName + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "currentValue": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.CurrentValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ObjectMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Target = CrossVersionObjectReference{} + } else { + yyv10 := &x.Target + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv11 := &x.MetricName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.CurrentValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodsMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metricName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.MetricName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentAverageValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentAverageValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodsMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodsMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metricName": + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv4 := &x.MetricName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv6 := &x.CurrentAverageValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodsMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MetricName = "" + } else { + yyv9 := &x.MetricName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv11 := &x.CurrentAverageValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ResourceMetricStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.CurrentAverageUtilization != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf4 := &x.Name + yysf4.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf5 := &x.Name + yysf5.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy7 := *x.CurrentAverageUtilization + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageUtilization")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CurrentAverageUtilization == nil { + r.EncodeNil() + } else { + yy9 := *x.CurrentAverageUtilization + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.CurrentAverageValue + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentAverageValue")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.CurrentAverageValue + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(yy14) { + } else if !yym15 && z.IsJSONHandle() { + z.EncJSONMarshal(yy14) + } else { + z.EncFallback(yy14) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ResourceMetricStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yyv4.CodecDecodeSelf(d) + } + case "currentAverageUtilization": + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + case "currentAverageValue": + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv7 := &x.CurrentAverageValue + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv7) + } else { + z.DecFallback(yyv7, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ResourceMetricStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv10 := &x.Name + yyv10.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CurrentAverageUtilization != nil { + x.CurrentAverageUtilization = nil + } + } else { + if x.CurrentAverageUtilization == nil { + x.CurrentAverageUtilization = new(int32) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(x.CurrentAverageUtilization)) = int32(r.DecodeInt(32)) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentAverageValue = pkg1_resource.Quantity{} + } else { + yyv13 := &x.CurrentAverageValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(yyv13) { + } else if !yym14 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv13) + } else { + z.DecFallback(yyv13, false) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg3_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = HorizontalPodAutoscalerSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = HorizontalPodAutoscalerStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg3_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg3_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceMetricSpec(v []MetricSpec, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceMetricSpec(v *[]MetricSpec, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []MetricSpec{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]MetricSpec, yyrl1) + } + } else { + yyv1 = make([]MetricSpec, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, MetricSpec{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, MetricSpec{}) // var yyz1 MetricSpec + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricSpec{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []MetricSpec{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceMetricStatus(v []MetricStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceMetricStatus(v *[]MetricStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []MetricStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]MetricStatus, yyrl1) + } + } else { + yyv1 = make([]MetricStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, MetricStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, MetricStatus{}) // var yyz1 MetricStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = MetricStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []MetricStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 392) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + } else { + yyv1 = make([]HorizontalPodAutoscaler, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HorizontalPodAutoscaler{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HorizontalPodAutoscaler{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go new file mode 100644 index 000000000..4df8ae414 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types.go @@ -0,0 +1,269 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will match one of the fields below. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} + +// +genclient=true + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..6030e2dc3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,175 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will match one of the fields below.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..70d11d5e8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.conversion.go @@ -0,0 +1,387 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + autoscaling "k8s.io/client-go/pkg/apis/autoscaling" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, + Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference, + Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, + Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler, + Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, + Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList, + Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, + Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec, + Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, + Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus, + Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec, + Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec, + Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus, + Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus, + Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource, + Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource, + Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus, + Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus, + Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource, + Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource, + Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus, + Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus, + Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource, + Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource, + Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus, + Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus, + ) +} + +func autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Name = in.Name + out.APIVersion = in.APIVersion + return nil +} + +func Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { + return autoConvert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v2alpha1_HorizontalPodAutoscaler(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]autoscaling.HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]HorizontalPodAutoscaler, 0) + } else { + out.Items = *(*[]HorizontalPodAutoscaler)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v2alpha1_HorizontalPodAutoscalerList(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]autoscaling.MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { + return err + } + out.MinReplicas = (*int32)(unsafe.Pointer(in.MinReplicas)) + out.MaxReplicas = in.MaxReplicas + out.Metrics = *(*[]MetricSpec)(unsafe.Pointer(&in.Metrics)) + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v2alpha1_HorizontalPodAutoscalerSpec(in, out, s) +} + +func autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + out.CurrentMetrics = *(*[]autoscaling.MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + return nil +} + +func Convert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + out.ObservedGeneration = (*int64)(unsafe.Pointer(in.ObservedGeneration)) + out.LastScaleTime = (*v1.Time)(unsafe.Pointer(in.LastScaleTime)) + out.CurrentReplicas = in.CurrentReplicas + out.DesiredReplicas = in.DesiredReplicas + if in.CurrentMetrics == nil { + out.CurrentMetrics = make([]MetricStatus, 0) + } else { + out.CurrentMetrics = *(*[]MetricStatus)(unsafe.Pointer(&in.CurrentMetrics)) + } + return nil +} + +func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { + return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v2alpha1_HorizontalPodAutoscalerStatus(in, out, s) +} + +func autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in *MetricSpec, out *autoscaling.MetricSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_MetricSpec_To_autoscaling_MetricSpec(in, out, s) +} + +func autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricSource)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricSource)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricSource)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in *autoscaling.MetricSpec, out *MetricSpec, s conversion.Scope) error { + return autoConvert_autoscaling_MetricSpec_To_v2alpha1_MetricSpec(in, out, s) +} + +func autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + out.Type = autoscaling.MetricSourceType(in.Type) + out.Object = (*autoscaling.ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*autoscaling.PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*autoscaling.ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in *MetricStatus, out *autoscaling.MetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_MetricStatus_To_autoscaling_MetricStatus(in, out, s) +} + +func autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + out.Type = MetricSourceType(in.Type) + out.Object = (*ObjectMetricStatus)(unsafe.Pointer(in.Object)) + out.Pods = (*PodsMetricStatus)(unsafe.Pointer(in.Pods)) + out.Resource = (*ResourceMetricStatus)(unsafe.Pointer(in.Resource)) + return nil +} + +func Convert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in *autoscaling.MetricStatus, out *MetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_MetricStatus_To_v2alpha1_MetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in *ObjectMetricSource, out *autoscaling.ObjectMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_ObjectMetricSource_To_autoscaling_ObjectMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.TargetValue = in.TargetValue + return nil +} + +func Convert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in *autoscaling.ObjectMetricSource, out *ObjectMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricSource_To_v2alpha1_ObjectMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_v2alpha1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in *ObjectMetricStatus, out *autoscaling.ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ObjectMetricStatus_To_autoscaling_ObjectMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + if err := Convert_autoscaling_CrossVersionObjectReference_To_v2alpha1_CrossVersionObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + out.MetricName = in.MetricName + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in *autoscaling.ObjectMetricStatus, out *ObjectMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ObjectMetricStatus_To_v2alpha1_ObjectMetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in *PodsMetricSource, out *autoscaling.PodsMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_PodsMetricSource_To_autoscaling_PodsMetricSource(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + out.MetricName = in.MetricName + out.TargetAverageValue = in.TargetAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in *autoscaling.PodsMetricSource, out *PodsMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricSource_To_v2alpha1_PodsMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in *PodsMetricStatus, out *autoscaling.PodsMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_PodsMetricStatus_To_autoscaling_PodsMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + out.MetricName = in.MetricName + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in *autoscaling.PodsMetricStatus, out *PodsMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_PodsMetricStatus_To_v2alpha1_PodsMetricStatus(in, out, s) +} + +func autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in *ResourceMetricSource, out *autoscaling.ResourceMetricSource, s conversion.Scope) error { + return autoConvert_v2alpha1_ResourceMetricSource_To_autoscaling_ResourceMetricSource(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.TargetAverageUtilization = (*int32)(unsafe.Pointer(in.TargetAverageUtilization)) + out.TargetAverageValue = (*resource.Quantity)(unsafe.Pointer(in.TargetAverageValue)) + return nil +} + +func Convert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in *autoscaling.ResourceMetricSource, out *ResourceMetricSource, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricSource_To_v2alpha1_ResourceMetricSource(in, out, s) +} + +func autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + out.Name = api.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in *ResourceMetricStatus, out *autoscaling.ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_ResourceMetricStatus_To_autoscaling_ResourceMetricStatus(in, out, s) +} + +func autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + out.Name = api_v1.ResourceName(in.Name) + out.CurrentAverageUtilization = (*int32)(unsafe.Pointer(in.CurrentAverageUtilization)) + out.CurrentAverageValue = in.CurrentAverageValue + return nil +} + +func Convert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in *autoscaling.ResourceMetricStatus, out *ResourceMetricStatus, s conversion.Scope) error { + return autoConvert_autoscaling_ResourceMetricStatus_To_v2alpha1_ResourceMetricStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..1953eaea6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,285 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + ) +} + +func DeepCopy_v2alpha1_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_v2alpha1_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_v2alpha1_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_v2alpha1_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v2alpha1_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_v2alpha1_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_v2alpha1_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_v2alpha1_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v2alpha1_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_v2alpha1_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v2alpha1_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go new file mode 100644 index 000000000..8639502a9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -0,0 +1,320 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package autoscaling + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_CrossVersionObjectReference, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscaler, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerList, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerSpec, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_HorizontalPodAutoscalerStatus, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricSpec, InType: reflect.TypeOf(&MetricSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_MetricStatus, InType: reflect.TypeOf(&MetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricSource, InType: reflect.TypeOf(&ObjectMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ObjectMetricStatus, InType: reflect.TypeOf(&ObjectMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricSource, InType: reflect.TypeOf(&PodsMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_PodsMetricStatus, InType: reflect.TypeOf(&PodsMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricSource, InType: reflect.TypeOf(&ResourceMetricSource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ResourceMetricStatus, InType: reflect.TypeOf(&ResourceMetricStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_autoscaling_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + ) +} + +func DeepCopy_autoscaling_CrossVersionObjectReference(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CrossVersionObjectReference) + out := out.(*CrossVersionObjectReference) + *out = *in + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscaler(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscaler) + out := out.(*HorizontalPodAutoscaler) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerList) + out := out.(*HorizontalPodAutoscalerList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerSpec) + out := out.(*HorizontalPodAutoscalerSpec) + *out = *in + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_MetricSpec(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HorizontalPodAutoscalerStatus) + out := out.(*HorizontalPodAutoscalerStatus) + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + if err := DeepCopy_autoscaling_MetricStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_autoscaling_MetricSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricSpec) + out := out.(*MetricSpec) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + if err := DeepCopy_autoscaling_ObjectMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + if err := DeepCopy_autoscaling_PodsMetricSource(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + if err := DeepCopy_autoscaling_ResourceMetricSource(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_autoscaling_MetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*MetricStatus) + out := out.(*MetricStatus) + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + if err := DeepCopy_autoscaling_ObjectMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + if err := DeepCopy_autoscaling_PodsMetricStatus(*in, *out, c); err != nil { + return err + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + if err := DeepCopy_autoscaling_ResourceMetricStatus(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_autoscaling_ObjectMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricSource) + out := out.(*ObjectMetricSource) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_ObjectMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ObjectMetricStatus) + out := out.(*ObjectMetricStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_PodsMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricSource) + out := out.(*PodsMetricSource) + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_PodsMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodsMetricStatus) + out := out.(*PodsMetricStatus) + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_ResourceMetricSource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricSource) + out := out.(*ResourceMetricSource) + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_autoscaling_ResourceMetricStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ResourceMetricStatus) + out := out.(*ResourceMetricStatus) + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return nil + } +} + +func DeepCopy_autoscaling_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + return nil + } +} + +func DeepCopy_autoscaling_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_autoscaling_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS b/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS new file mode 100755 index 000000000..502f90771 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/OWNERS @@ -0,0 +1,19 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- erictune +- sttts +- saad-ali +- ncdc +- timothysc +- soltysh +- dims +- errordeveloper +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/doc.go new file mode 100644 index 000000000..6fe952226 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go b/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go new file mode 100644 index 000000000..d37f31820 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/batch" + "k8s.io/client-go/pkg/apis/batch/v1" + "k8s.io/client-go/pkg/apis/batch/v2alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: batch.GroupName, + VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version, v2alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/batch", + AddInternalObjectsToScheme: batch.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v2alpha1.SchemeGroupVersion.Version: v2alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/register.go new file mode 100644 index 000000000..4601ca4ec --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/types.go new file mode 100644 index 000000000..7a2ad011f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/types.go @@ -0,0 +1,286 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package batch + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// +genclient=true + +// Job represents the configuration of a single job. +type Job struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec JobSpec + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status JobStatus +} + +// JobList is a collection of jobs. +type JobList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of Job. + Items []Job +} + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec JobSpec +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // +optional + Parallelism *int32 + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // +optional + Completions *int32 + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // +optional + Selector *metav1.LabelSelector + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // +optional + ManualSelector *bool + + // Template is the object that describes the pod that will be created when + // executing a job. + Template api.PodTemplateSpec +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // +optional + Conditions []JobCondition + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTime *metav1.Time + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTime *metav1.Time + + // Active is the number of actively running pods. + // +optional + Active int32 + + // Succeeded is the number of pods which reached Phase Succeeded. + // +optional + Succeeded int32 + + // Failed is the number of pods which reached Phase Failed. + // +optional + Failed int32 +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time + // (brief) reason for the condition's last transition. + // +optional + Reason string + // Human readable message indicating details about last transition. + // +optional + Message string +} + +// +genclient=true + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus +} + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of CronJob. + Items []CronJob +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // +optional + ConcurrencyPolicy ConcurrencyPolicy + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool + + // JobTemplate is the object that describes the job that will be created when + // executing a CronJob. + JobTemplate JobTemplateSpec + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // Active holds pointers to currently running jobs. + // +optional + Active []api.ObjectReference + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go new file mode 100644 index 000000000..7f2bc9d15 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/conversion.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/batch" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobSpec_To_batch_JobSpec, + ) + if err != nil { + return err + } + + return scheme.AddFieldLabelConversionFunc("batch/v1", "Job", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for Job", label) + } + }, + ) +} + +func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + out.Selector = in.Selector + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = in.Parallelism + out.Completions = in.Completions + out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds + out.Selector = in.Selector + if in.ManualSelector != nil { + out.ManualSelector = new(bool) + *out.ManualSelector = *in.ManualSelector + } else { + out.ManualSelector = nil + } + + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go new file mode 100644 index 000000000..3603247f2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/defaults.go @@ -0,0 +1,47 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_Job, + ) +} + +func SetDefaults_Job(obj *Job) { + // For a non-parallel job, you can leave both `.spec.completions` and + // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. + if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { + obj.Spec.Completions = new(int32) + *obj.Spec.Completions = 1 + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + if obj.Spec.Parallelism == nil { + obj.Spec.Parallelism = new(int32) + *obj.Spec.Parallelism = 1 + } + labels := obj.Spec.Template.Labels + if labels != nil && len(obj.Labels) == 0 { + obj.Labels = labels + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go new file mode 100644 index 000000000..c7be42d5a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go new file mode 100644 index 000000000..0ebc07be7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.pb.go @@ -0,0 +1,1580 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.client-go.pkg.apis.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v1.JobStatus") +} +func (m *Job) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Job) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *JobCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *JobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) + } + if m.Completions != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.ManualSelector != nil { + data[i] = 0x28 + i++ + if *m.ManualSelector { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StartTime != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.CompletionTime != nil { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Active)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Failed)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Job) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Job) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Job{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *JobList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobSpec{`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `Completions:` + valueToStringGenerated(this.Completions) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobStatus{`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Active:` + fmt.Sprintf("%v", this.Active) + `,`, + `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Job) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Active |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Succeeded |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Failed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 885 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xce, 0x4f, 0xd3, 0x26, 0x93, 0xb6, 0xbb, 0x8c, 0x54, 0x29, 0xf4, 0x22, 0x59, 0x05, 0x84, + 0x0a, 0xda, 0xb5, 0x49, 0xbb, 0x42, 0x88, 0x0b, 0x24, 0x5c, 0x84, 0x44, 0xd5, 0xb2, 0xd5, 0xa4, + 0x02, 0x89, 0x1f, 0x89, 0xb1, 0x7d, 0x9a, 0x0e, 0xb5, 0x3d, 0x96, 0x67, 0x12, 0xd1, 0x3b, 0xde, + 0x00, 0x1e, 0x06, 0x21, 0x1e, 0xa1, 0x97, 0xbd, 0xe4, 0x2a, 0xa2, 0xe6, 0x2d, 0xf6, 0x0a, 0xcd, + 0x78, 0xfc, 0x93, 0x4d, 0x0a, 0xd9, 0xbd, 0xb3, 0xcf, 0x7c, 0xdf, 0x37, 0x67, 0xce, 0xf9, 0xce, + 0x41, 0x47, 0xd7, 0x1f, 0x0b, 0x8b, 0x71, 0xfb, 0x7a, 0xea, 0x42, 0x12, 0x81, 0x04, 0x61, 0xc7, + 0xd7, 0x13, 0x9b, 0xc6, 0x4c, 0xd8, 0x2e, 0x95, 0xde, 0x95, 0x3d, 0x1b, 0xd9, 0x13, 0x88, 0x20, + 0xa1, 0x12, 0x7c, 0x2b, 0x4e, 0xb8, 0xe4, 0xf8, 0x9d, 0x8c, 0x64, 0x95, 0x24, 0x2b, 0xbe, 0x9e, + 0x58, 0x8a, 0x64, 0x69, 0x92, 0x35, 0x1b, 0xed, 0x3f, 0x9b, 0x30, 0x79, 0x35, 0x75, 0x2d, 0x8f, + 0x87, 0xf6, 0x84, 0x4f, 0xb8, 0xad, 0xb9, 0xee, 0xf4, 0x52, 0xff, 0xe9, 0x1f, 0xfd, 0x95, 0x69, + 0xee, 0x3f, 0x37, 0x89, 0xd0, 0x98, 0x85, 0xd4, 0xbb, 0x62, 0x11, 0x24, 0x37, 0x65, 0x2a, 0x21, + 0x48, 0xba, 0x22, 0x93, 0x7d, 0xfb, 0x21, 0x56, 0x32, 0x8d, 0x24, 0x0b, 0x61, 0x89, 0xf0, 0xd1, + 0xff, 0x11, 0x84, 0x77, 0x05, 0x21, 0x5d, 0xe2, 0x1d, 0x3d, 0xc4, 0x9b, 0x4a, 0x16, 0xd8, 0x2c, + 0x92, 0x42, 0x26, 0x4b, 0xa4, 0xca, 0x9b, 0x04, 0x24, 0x33, 0x48, 0xca, 0x07, 0xc1, 0xcf, 0x34, + 0x8c, 0x03, 0x58, 0xf5, 0xa6, 0xa7, 0x0f, 0xb6, 0x64, 0x05, 0x7a, 0xf8, 0x6b, 0x03, 0x35, 0x4f, + 0xb8, 0x8b, 0x7f, 0x44, 0x6d, 0x55, 0x24, 0x9f, 0x4a, 0xda, 0xab, 0x3f, 0xa9, 0x1f, 0x74, 0x0f, + 0x3f, 0xb4, 0x4c, 0x9b, 0xaa, 0x39, 0x97, 0x8d, 0x52, 0x68, 0x6b, 0x36, 0xb2, 0x5e, 0xb8, 0x3f, + 0x81, 0x27, 0xcf, 0x40, 0x52, 0x07, 0xdf, 0xce, 0x07, 0xb5, 0x74, 0x3e, 0x40, 0x65, 0x8c, 0x14, + 0xaa, 0xf8, 0x2b, 0xb4, 0x21, 0x62, 0xf0, 0x7a, 0x0d, 0xad, 0xfe, 0xd4, 0x5a, 0xc3, 0x04, 0xd6, + 0x09, 0x77, 0xc7, 0x31, 0x78, 0xce, 0xb6, 0x51, 0xde, 0x50, 0x7f, 0x44, 0xeb, 0xe0, 0xaf, 0xd1, + 0xa6, 0x90, 0x54, 0x4e, 0x45, 0xaf, 0xa9, 0x15, 0xad, 0xb5, 0x15, 0x35, 0xcb, 0xd9, 0x35, 0x9a, + 0x9b, 0xd9, 0x3f, 0x31, 0x6a, 0xc3, 0xbb, 0x26, 0xda, 0x3e, 0xe1, 0xee, 0x31, 0x8f, 0x7c, 0x26, + 0x19, 0x8f, 0xf0, 0x73, 0xb4, 0x21, 0x6f, 0x62, 0xd0, 0x65, 0xe9, 0x38, 0x4f, 0xf2, 0x54, 0x2e, + 0x6e, 0x62, 0x78, 0x39, 0x1f, 0x3c, 0xae, 0x62, 0x55, 0x8c, 0x68, 0x74, 0x25, 0xbd, 0x86, 0xe6, + 0x7d, 0xba, 0x78, 0xdd, 0xcb, 0xf9, 0xe0, 0x3f, 0x1b, 0x65, 0x15, 0x9a, 0x8b, 0xe9, 0xe1, 0x09, + 0xda, 0x09, 0xa8, 0x90, 0xe7, 0x09, 0x77, 0xe1, 0x82, 0x85, 0x60, 0x5e, 0xff, 0xc1, 0x7a, 0xdd, + 0x52, 0x0c, 0x67, 0xcf, 0xa4, 0xb2, 0x73, 0x5a, 0x15, 0x22, 0x8b, 0xba, 0x78, 0x86, 0xb0, 0x0a, + 0x5c, 0x24, 0x34, 0x12, 0xd9, 0xe3, 0xd4, 0x6d, 0x1b, 0xaf, 0x7d, 0xdb, 0xbe, 0xb9, 0x0d, 0x9f, + 0x2e, 0xa9, 0x91, 0x15, 0x37, 0xe0, 0xf7, 0xd0, 0x66, 0x02, 0x54, 0xf0, 0xa8, 0xd7, 0xd2, 0x85, + 0x2b, 0xfa, 0x44, 0x74, 0x94, 0x98, 0x53, 0xfc, 0x3e, 0xda, 0x0a, 0x41, 0x08, 0x3a, 0x81, 0xde, + 0xa6, 0x06, 0x3e, 0x32, 0xc0, 0xad, 0xb3, 0x2c, 0x4c, 0xf2, 0xf3, 0xe1, 0x1f, 0x75, 0xb4, 0x75, + 0xc2, 0xdd, 0x53, 0x26, 0x24, 0xfe, 0x7e, 0xc9, 0xe8, 0xd6, 0x7a, 0x8f, 0x51, 0x6c, 0x6d, 0xf3, + 0xc7, 0xe6, 0x9e, 0x76, 0x1e, 0xa9, 0x98, 0xfc, 0x0c, 0xb5, 0x98, 0x84, 0x50, 0x35, 0xbd, 0x79, + 0xd0, 0x3d, 0x3c, 0x58, 0xd7, 0x93, 0xce, 0x8e, 0x11, 0x6d, 0x7d, 0xa9, 0xe8, 0x24, 0x53, 0x19, + 0xfe, 0xd9, 0xd4, 0x89, 0x2b, 0xd7, 0xe3, 0x11, 0xea, 0xc6, 0x34, 0xa1, 0x41, 0x00, 0x01, 0x13, + 0xa1, 0xce, 0xbd, 0xe5, 0x3c, 0x4a, 0xe7, 0x83, 0xee, 0x79, 0x19, 0x26, 0x55, 0x8c, 0xa2, 0x78, + 0x5c, 0xed, 0x09, 0x55, 0xdc, 0xcc, 0x88, 0x86, 0x72, 0x5c, 0x86, 0x49, 0x15, 0x83, 0x5f, 0xa0, + 0x3d, 0xea, 0x49, 0x36, 0x83, 0xcf, 0x81, 0xfa, 0x01, 0x8b, 0x60, 0x0c, 0x1e, 0x8f, 0xfc, 0x6c, + 0xc8, 0x9a, 0xce, 0xdb, 0xe9, 0x7c, 0xb0, 0xf7, 0xd9, 0x2a, 0x00, 0x59, 0xcd, 0xc3, 0x3f, 0xa0, + 0xb6, 0x80, 0x00, 0x3c, 0xc9, 0x13, 0x63, 0x9e, 0xa3, 0x35, 0xeb, 0x4d, 0x5d, 0x08, 0xc6, 0x86, + 0xea, 0x6c, 0xab, 0x82, 0xe7, 0x7f, 0xa4, 0x90, 0xc4, 0x9f, 0xa0, 0xdd, 0x90, 0x46, 0x53, 0x5a, + 0x20, 0xb5, 0x6b, 0xda, 0x0e, 0x4e, 0xe7, 0x83, 0xdd, 0xb3, 0x85, 0x13, 0xf2, 0x0a, 0x12, 0x7f, + 0x87, 0xda, 0x12, 0xc2, 0x38, 0xa0, 0x32, 0xb3, 0x50, 0xf7, 0xf0, 0xd9, 0xc3, 0xfd, 0x52, 0x29, + 0x9d, 0x73, 0xff, 0xc2, 0x10, 0xf4, 0x5a, 0x2a, 0x9c, 0x90, 0x47, 0x49, 0x21, 0x38, 0xfc, 0xbd, + 0x89, 0x3a, 0xc5, 0xb2, 0xc1, 0x80, 0x90, 0x97, 0x0f, 0xb4, 0xe8, 0xd5, 0xb5, 0x39, 0x46, 0xeb, + 0x9a, 0xa3, 0x58, 0x05, 0xe5, 0x86, 0x2d, 0x42, 0x82, 0x54, 0x84, 0xf1, 0x37, 0xa8, 0x23, 0x24, + 0x4d, 0xa4, 0x1e, 0xd5, 0xc6, 0x6b, 0x8f, 0xea, 0x4e, 0x3a, 0x1f, 0x74, 0xc6, 0xb9, 0x00, 0x29, + 0xb5, 0xf0, 0x25, 0xda, 0x2d, 0x5d, 0xf2, 0x86, 0x6b, 0x47, 0xb7, 0xe4, 0x78, 0x41, 0x85, 0xbc, + 0xa2, 0xaa, 0x86, 0x3f, 0xb3, 0x91, 0xf6, 0x4a, 0xab, 0x1c, 0xfe, 0xcc, 0x73, 0xc4, 0x9c, 0x62, + 0x1b, 0x75, 0xc4, 0xd4, 0xf3, 0x00, 0x7c, 0xf0, 0x75, 0xc7, 0x5b, 0xce, 0x5b, 0x06, 0xda, 0x19, + 0xe7, 0x07, 0xa4, 0xc4, 0x28, 0xe1, 0x4b, 0xca, 0x02, 0xf0, 0x75, 0xa7, 0x2b, 0xc2, 0x5f, 0xe8, + 0x28, 0x31, 0xa7, 0xce, 0xbb, 0xb7, 0xf7, 0xfd, 0xda, 0xdd, 0x7d, 0xbf, 0xf6, 0xd7, 0x7d, 0xbf, + 0xf6, 0x4b, 0xda, 0xaf, 0xdf, 0xa6, 0xfd, 0xfa, 0x5d, 0xda, 0xaf, 0xff, 0x9d, 0xf6, 0xeb, 0xbf, + 0xfd, 0xd3, 0xaf, 0x7d, 0xdb, 0x98, 0x8d, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xe7, 0x0a, + 0x8d, 0xf7, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto new file mode 100644 index 000000000..283da1c8d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/generated.proto @@ -0,0 +1,168 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional JobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Job. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + optional int32 parallelism = 1; + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + optional int32 completions = 2; + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + optional int64 activeDeadlineSeconds = 3; + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // +optional + optional bool manualSelector = 5; + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://kubernetes.io/docs/user-guide/jobs + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // Conditions represent the latest available observations of an object's current state. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + repeated JobCondition conditions = 1; + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + + // Active is the number of actively running pods. + // +optional + optional int32 active = 4; + + // Succeeded is the number of pods which reached Phase Succeeded. + // +optional + optional int32 succeeded = 5; + + // Failed is the number of pods which reached Phase Failed. + // +optional + optional int32 failed = 6; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go new file mode 100644 index 000000000..4ba570d1b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go new file mode 100644 index 000000000..49dab1f88 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.generated.go @@ -0,0 +1,2681 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg4_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg3_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg4_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg3_v1.PodTemplateSpec + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = JobSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = JobStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceJob(([]Job)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceJob((*[]Job)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceJob((*[]Job)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Parallelism != nil + yyq2[1] = x.Completions != nil + yyq2[2] = x.ActiveDeadlineSeconds != nil + yyq2[3] = x.Selector != nil + yyq2[4] = x.ManualSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy4 := *x.Parallelism + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parallelism")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parallelism == nil { + r.EncodeNil() + } else { + yy6 := *x.Parallelism + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Completions == nil { + r.EncodeNil() + } else { + yy9 := *x.Completions + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Completions == nil { + r.EncodeNil() + } else { + yy11 := *x.Completions + yym12 := z.EncBinary() + _ = yym12 + if false { + } else { + r.EncodeInt(int64(yy11)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy14 := *x.ActiveDeadlineSeconds + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeInt(int64(yy14)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ActiveDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy16 := *x.ActiveDeadlineSeconds + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(yy16)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy22 := *x.ManualSelector + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(yy22)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ManualSelector == nil { + r.EncodeNil() + } else { + yy24 := *x.ManualSelector + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(yy24)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy27 := &x.Template + yy27.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy29 := &x.Template + yy29.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "parallelism": + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + case "completions": + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + case "activeDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "manualSelector": + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv14 := &x.Template + yyv14.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj15 int + var yyb15 bool + var yyhl15 bool = l >= 0 + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Parallelism != nil { + x.Parallelism = nil + } + } else { + if x.Parallelism == nil { + x.Parallelism = new(int32) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Completions != nil { + x.Completions = nil + } + } else { + if x.Completions == nil { + x.Completions = new(int32) + } + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ActiveDeadlineSeconds != nil { + x.ActiveDeadlineSeconds = nil + } + } else { + if x.ActiveDeadlineSeconds == nil { + x.ActiveDeadlineSeconds = new(int64) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ManualSelector != nil { + x.ManualSelector = nil + } + } else { + if x.ManualSelector == nil { + x.ManualSelector = new(bool) + } + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*bool)(x.ManualSelector)) = r.DecodeBool() + } + } + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg3_v1.PodTemplateSpec{} + } else { + yyv26 := &x.Template + yyv26.CodecDecodeSelf(d) + } + for { + yyj15++ + if yyhl15 { + yyb15 = yyj15 > l + } else { + yyb15 = r.CheckBreak() + } + if yyb15 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj15-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = x.StartTime != nil + yyq2[2] = x.CompletionTime != nil + yyq2[3] = x.Active != 0 + yyq2[4] = x.Succeeded != 0 + yyq2[5] = x.Failed != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym7 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.StartTime) { + } else if yym8 { + z.EncBinaryMarshal(x.StartTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.StartTime) + } else { + z.EncFallback(x.StartTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym10 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("completionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.CompletionTime == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { + } else if yym11 { + z.EncBinaryMarshal(x.CompletionTime) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(x.CompletionTime) + } else { + z.EncFallback(x.CompletionTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.Active)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("succeeded")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.Succeeded)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.Failed)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv4), d) + } + } + case "startTime": + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + case "completionTime": + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_v1.Time) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym9 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + case "active": + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + yyv10 := &x.Active + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "succeeded": + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + yyv12 := &x.Succeeded + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "failed": + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + yyv14 := &x.Failed + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv17 := &x.Conditions + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + h.decSliceJobCondition((*[]JobCondition)(yyv17), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartTime != nil { + x.StartTime = nil + } + } else { + if x.StartTime == nil { + x.StartTime = new(pkg1_v1.Time) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(x.StartTime) { + } else if yym20 { + z.DecBinaryUnmarshal(x.StartTime) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.StartTime) + } else { + z.DecFallback(x.StartTime, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.CompletionTime != nil { + x.CompletionTime = nil + } + } else { + if x.CompletionTime == nil { + x.CompletionTime = new(pkg1_v1.Time) + } + yym22 := z.DecBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { + } else if yym22 { + z.DecBinaryUnmarshal(x.CompletionTime) + } else if !yym22 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.CompletionTime) + } else { + z.DecFallback(x.CompletionTime, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = 0 + } else { + yyv23 := &x.Active + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Succeeded = 0 + } else { + yyv25 := &x.Succeeded + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Failed = 0 + } else { + yyv27 := &x.Failed + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastProbeTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastProbeTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastProbeTime": + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastProbeTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastProbeTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastProbeTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 880) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Job, yyrl1) + } + } else { + yyv1 = make([]Job, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Job{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Job{}) // var yyz1 Job + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Job{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Job{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]JobCondition, yyrl1) + } + } else { + yyv1 = make([]JobCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, JobCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = JobCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []JobCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go new file mode 100644 index 000000000..734e6204d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types.go @@ -0,0 +1,168 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// +genclient=true + +// Job represents the configuration of a single job. +type Job struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// JobList is a collection of jobs. +type JobList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Job. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Parallelism specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Completions specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Optional duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Selector is a label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // ManualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // +optional + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Template is the object that describes the pod that will be created when + // executing a job. + // More info: http://kubernetes.io/docs/user-guide/jobs + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + + // Conditions represent the latest available observations of an object's current state. + // More info: http://kubernetes.io/docs/user-guide/jobs + // +optional + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // StartTime represents time when the job was acknowledged by the Job Manager. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // CompletionTime represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // Active is the number of actively running pods. + // +optional + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // Succeeded is the number of pods which reached Phase Succeeded. + // +optional + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // Failed is the number of pods which reached Phase Failed. + // +optional + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..3d224e703 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Job.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://kubernetes.io/docs/user-guide/jobs", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://kubernetes.io/docs/user-guide/jobs", + "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://kubernetes.io/docs/user-guide/jobs", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://kubernetes.io/docs/user-guide/jobs", + "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "Active is the number of actively running pods.", + "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", + "failed": "Failed is the number of pods which reached Phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go new file mode 100644 index 000000000..c7ee0a35b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.conversion.go @@ -0,0 +1,202 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + batch "k8s.io/client-go/pkg/apis/batch" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_Job_To_batch_Job, + Convert_batch_Job_To_v1_Job, + Convert_v1_JobCondition_To_batch_JobCondition, + Convert_batch_JobCondition_To_v1_JobCondition, + Convert_v1_JobList_To_batch_JobList, + Convert_batch_JobList_To_v1_JobList, + Convert_v1_JobSpec_To_batch_JobSpec, + Convert_batch_JobSpec_To_v1_JobSpec, + Convert_v1_JobStatus_To_batch_JobStatus, + Convert_batch_JobStatus_To_v1_JobStatus, + ) +} + +func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { + return autoConvert_v1_Job_To_batch_Job(in, out, s) +} + +func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { + return autoConvert_batch_Job_To_v1_Job(in, out, s) +} + +func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + out.Type = batch.JobConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { + return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s) +} + +func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + out.Type = JobConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { + return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s) +} + +func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.Job, len(*in)) + for i := range *in { + if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { + return autoConvert_v1_JobList_To_batch_JobList(in, out, s) +} + +func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Job, 0) + } + return nil +} + +func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { + return autoConvert_batch_JobList_To_v1_JobList(in, out, s) +} + +func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { + out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) + out.Completions = (*int32)(unsafe.Pointer(in.Completions)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { + out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) + out.Completions = (*int32)(unsafe.Pointer(in.Completions)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + out.ManualSelector = (*bool)(unsafe.Pointer(in.ManualSelector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + out.Conditions = *(*[]batch.JobCondition)(unsafe.Pointer(&in.Conditions)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { + return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s) +} + +func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + out.Conditions = *(*[]JobCondition)(unsafe.Pointer(&in.Conditions)) + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.CompletionTime = (*meta_v1.Time)(unsafe.Pointer(in.CompletionTime)) + out.Active = in.Active + out.Succeeded = in.Succeeded + out.Failed = in.Failed + return nil +} + +func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { + return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..5e31d5964 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.deepcopy.go @@ -0,0 +1,162 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_Job, InType: reflect.TypeOf(&Job{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobList, InType: reflect.TypeOf(&JobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, + ) +} + +func DeepCopy_v1_Job(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Job) + out := out.(*Job) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if err := DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1_JobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobCondition) + out := out.(*JobCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobList) + out := out.(*JobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := DeepCopy_v1_Job(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobSpec) + out := out.(*JobSpec) + *out = *in + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int32) + **out = **in + } + if in.Completions != nil { + in, out := &in.Completions, &out.Completions + *out = new(int32) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*meta_v1.LabelSelector) + } + } + if in.ManualSelector != nil { + in, out := &in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = **in + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobStatus) + out := out.(*JobStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = new(meta_v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go new file mode 100644 index 000000000..417024f62 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v1/zz_generated.defaults.go @@ -0,0 +1,176 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&Job{}, func(obj interface{}) { SetObjectDefaults_Job(obj.(*Job)) }) + scheme.AddTypeDefaultingFunc(&JobList{}, func(obj interface{}) { SetObjectDefaults_JobList(obj.(*JobList)) }) + return nil +} + +func SetObjectDefaults_Job(in *Job) { + SetDefaults_Job(in) + api_v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + api_v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + api_v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + api_v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + api_v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + api_v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + api_v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + api_v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + api_v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + api_v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + api_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + api_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + api_v1.SetDefaults_ResourceList(&a.Resources.Limits) + api_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + api_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + api_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + api_v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + api_v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + api_v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + api_v1.SetDefaults_ResourceList(&a.Resources.Limits) + api_v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + api_v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + api_v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + api_v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_JobList(in *JobList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Job(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go new file mode 100644 index 000000000..2393fdca9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/conversion.go @@ -0,0 +1,44 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + var err error + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{"Job", "JobTemplate", "CronJob"} { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace", "status.successful": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go new file mode 100644 index 000000000..6da07cc7d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/defaults.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_CronJob, + ) +} + +func SetDefaults_CronJob(obj *CronJob) { + if obj.Spec.ConcurrencyPolicy == "" { + obj.Spec.ConcurrencyPolicy = AllowConcurrent + } + if obj.Spec.Suspend == nil { + obj.Spec.Suspend = new(bool) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go new file mode 100644 index 000000000..a9fe60b1c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go new file mode 100644 index 000000000..81dab9908 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.pb.go @@ -0,0 +1,1505 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto + + It has these top-level messages: + CronJob + CronJobList + CronJobSpec + CronJobStatus + JobTemplate + JobTemplateSpec +*/ +package v2alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*CronJob)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.client-go.pkg.apis.batch.v2alpha1.JobTemplateSpec") +} +func (m *CronJob) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJob) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CronJobList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJobList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CronJobSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJobSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Schedule))) + i += copy(data[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds)) + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(data[i:], m.ConcurrencyPolicy) + if m.Suspend != nil { + data[i] = 0x20 + i++ + if *m.Suspend { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + } + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size())) + n5, err := m.JobTemplate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.SuccessfulJobsHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.FailedJobsHistoryLimit)) + } + return i, nil +} + +func (m *CronJobStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CronJobStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Active) > 0 { + for _, msg := range m.Active { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LastScheduleTime != nil { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size())) + n6, err := m.LastScheduleTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *JobTemplate) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplate) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil +} + +func (m *JobTemplateSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CronJob) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronJobList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronJobSpec) Size() (n int) { + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + return n +} + +func (m *CronJobStatus) Size() (n int) { + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JobTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJob{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobSpec{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, + `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobStatus{`, + `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_kubernetes_pkg_api_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_kubernetes_pkg_apis_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CronJob) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + } + if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 799 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x4f, 0xe3, 0x46, + 0x18, 0xc7, 0xe3, 0x90, 0x37, 0x26, 0xa5, 0x05, 0xb7, 0x82, 0x28, 0x95, 0x9c, 0x28, 0x52, 0xa5, + 0x14, 0x81, 0x5d, 0x42, 0x85, 0x68, 0x6f, 0x35, 0x55, 0xd5, 0x22, 0xfa, 0x22, 0x07, 0xd4, 0xaa, + 0x42, 0x15, 0x63, 0xe7, 0x49, 0x32, 0xc4, 0x6f, 0xf5, 0x8c, 0xa3, 0xe6, 0xd6, 0x8f, 0xd0, 0x6f, + 0xd1, 0x6f, 0xb1, 0x97, 0xdd, 0x03, 0x47, 0x0e, 0x7b, 0xd8, 0xbd, 0x44, 0x8b, 0xf7, 0x5b, 0xec, + 0x69, 0xe5, 0x89, 0x13, 0x07, 0x1c, 0x2f, 0x61, 0x57, 0xe2, 0xe6, 0x19, 0x3f, 0xff, 0xdf, 0x3c, + 0xcf, 0xf3, 0x7f, 0x66, 0xd0, 0x37, 0x83, 0x43, 0x2a, 0x13, 0x47, 0x19, 0xf8, 0x3a, 0x78, 0x36, + 0x30, 0xa0, 0x8a, 0x3b, 0xe8, 0x29, 0xd8, 0x25, 0x54, 0xd1, 0x31, 0x33, 0xfa, 0xca, 0xb0, 0x85, + 0x4d, 0xb7, 0x8f, 0xf7, 0x94, 0x1e, 0xd8, 0xe0, 0x61, 0x06, 0x1d, 0xd9, 0xf5, 0x1c, 0xe6, 0x88, + 0x5f, 0x4e, 0xa4, 0x72, 0x2c, 0x95, 0xdd, 0x41, 0x4f, 0x0e, 0xa5, 0x32, 0x97, 0xca, 0x53, 0x69, + 0x75, 0xb7, 0x47, 0x58, 0xdf, 0xd7, 0x65, 0xc3, 0xb1, 0x94, 0x9e, 0xd3, 0x73, 0x14, 0x4e, 0xd0, + 0xfd, 0x2e, 0x5f, 0xf1, 0x05, 0xff, 0x9a, 0x90, 0xab, 0x5f, 0x47, 0x49, 0x61, 0x97, 0x58, 0xd8, + 0xe8, 0x13, 0x1b, 0xbc, 0x51, 0x9c, 0x96, 0x05, 0x0c, 0x2b, 0xc3, 0x44, 0x3e, 0x55, 0x25, 0x4d, + 0xe5, 0xf9, 0x36, 0x23, 0x16, 0x24, 0x04, 0x07, 0xf7, 0x09, 0xa8, 0xd1, 0x07, 0x0b, 0x27, 0x74, + 0xfb, 0x69, 0x3a, 0x9f, 0x11, 0x53, 0x21, 0x36, 0xa3, 0xcc, 0x4b, 0x88, 0xe6, 0x6a, 0xa2, 0xe0, + 0x0d, 0xc1, 0x8b, 0x0b, 0x82, 0x7f, 0xb0, 0xe5, 0x9a, 0xb0, 0xa8, 0xa6, 0x9d, 0x54, 0x7b, 0x16, + 0x45, 0xef, 0xdf, 0x6f, 0x66, 0x42, 0xd4, 0xf8, 0x3f, 0x8b, 0x8a, 0x47, 0x9e, 0x63, 0x1f, 0x3b, + 0xba, 0x78, 0x81, 0x4a, 0x61, 0x77, 0x3b, 0x98, 0xe1, 0x8a, 0x50, 0x17, 0x9a, 0xe5, 0xd6, 0x57, + 0x72, 0xe4, 0xf2, 0x7c, 0xb1, 0xb1, 0xcf, 0x61, 0xb4, 0x3c, 0xdc, 0x93, 0x7f, 0xd5, 0x2f, 0xc1, + 0x60, 0x3f, 0x03, 0xc3, 0xaa, 0x78, 0x35, 0xae, 0x65, 0x82, 0x71, 0x0d, 0xc5, 0x7b, 0xda, 0x8c, + 0x2a, 0xfe, 0x81, 0x72, 0xd4, 0x05, 0xa3, 0x92, 0xe5, 0xf4, 0x03, 0x79, 0xe9, 0x19, 0x92, 0xa3, + 0x1c, 0xdb, 0x2e, 0x18, 0xea, 0x47, 0xd1, 0x19, 0xb9, 0x70, 0xa5, 0x71, 0xa2, 0x78, 0x81, 0x0a, + 0x94, 0x61, 0xe6, 0xd3, 0xca, 0x0a, 0x67, 0x1f, 0xbe, 0x07, 0x9b, 0xeb, 0xd5, 0x8f, 0x23, 0x7a, + 0x61, 0xb2, 0xd6, 0x22, 0x6e, 0xe3, 0x99, 0x80, 0xca, 0x51, 0xe4, 0x09, 0xa1, 0x4c, 0x3c, 0x4f, + 0x74, 0x4b, 0x5e, 0xae, 0x5b, 0xa1, 0x9a, 0xf7, 0x6a, 0x3d, 0x3a, 0xa9, 0x34, 0xdd, 0x99, 0xeb, + 0xd4, 0xef, 0x28, 0x4f, 0x18, 0x58, 0xb4, 0x92, 0xad, 0xaf, 0x34, 0xcb, 0xad, 0xd6, 0xc3, 0xcb, + 0x51, 0xd7, 0x22, 0x7c, 0xfe, 0xa7, 0x10, 0xa4, 0x4d, 0x78, 0x8d, 0x27, 0xb9, 0x59, 0x19, 0x61, + 0xfb, 0xc4, 0x1d, 0x54, 0x0a, 0x07, 0xbd, 0xe3, 0x9b, 0xc0, 0xcb, 0x58, 0x8d, 0xd3, 0x6a, 0x47, + 0xfb, 0xda, 0x2c, 0x42, 0x3c, 0x43, 0x5b, 0x94, 0x61, 0x8f, 0x11, 0xbb, 0xf7, 0x3d, 0xe0, 0x8e, + 0x49, 0x6c, 0x68, 0x83, 0xe1, 0xd8, 0x1d, 0xca, 0x3d, 0x5d, 0x51, 0x3f, 0x0f, 0xc6, 0xb5, 0xad, + 0xf6, 0xe2, 0x10, 0x2d, 0x4d, 0x2b, 0x9e, 0xa3, 0x0d, 0xc3, 0xb1, 0x0d, 0xdf, 0xf3, 0xc0, 0x36, + 0x46, 0xbf, 0x39, 0x26, 0x31, 0x46, 0xdc, 0xc8, 0x55, 0x55, 0x8e, 0xb2, 0xd9, 0x38, 0xba, 0x1b, + 0xf0, 0x66, 0xd1, 0xa6, 0x96, 0x04, 0x89, 0x5f, 0xa0, 0x22, 0xf5, 0xa9, 0x0b, 0x76, 0xa7, 0x92, + 0xab, 0x0b, 0xcd, 0x92, 0x5a, 0x0e, 0xc6, 0xb5, 0x62, 0x7b, 0xb2, 0xa5, 0x4d, 0xff, 0x89, 0x7f, + 0xa3, 0xf2, 0xa5, 0xa3, 0x9f, 0x82, 0xe5, 0x9a, 0x98, 0x41, 0x25, 0xcf, 0x3d, 0xfd, 0xf6, 0x01, + 0x8d, 0x3f, 0x8e, 0xd5, 0x7c, 0x4e, 0x3f, 0x8d, 0x52, 0x2f, 0xcf, 0xfd, 0xd0, 0xe6, 0xcf, 0x10, + 0xff, 0x42, 0x55, 0xea, 0x1b, 0x06, 0x50, 0xda, 0xf5, 0xcd, 0x63, 0x47, 0xa7, 0x3f, 0x12, 0xca, + 0x1c, 0x6f, 0x74, 0x42, 0x2c, 0xc2, 0x2a, 0x85, 0xba, 0xd0, 0xcc, 0xab, 0x52, 0x30, 0xae, 0x55, + 0xdb, 0xa9, 0x51, 0xda, 0x3b, 0x08, 0xa2, 0x86, 0x36, 0xbb, 0x98, 0x98, 0xd0, 0x49, 0xb0, 0x8b, + 0x9c, 0x5d, 0x0d, 0xc6, 0xb5, 0xcd, 0x1f, 0x16, 0x46, 0x68, 0x29, 0xca, 0xc6, 0x73, 0x01, 0xad, + 0xdd, 0xba, 0x31, 0xe2, 0x19, 0x2a, 0x60, 0x83, 0x91, 0x61, 0x38, 0x40, 0xe1, 0xb0, 0xee, 0xa6, + 0xf7, 0x2c, 0x7e, 0x2d, 0x34, 0xe8, 0x42, 0x68, 0x12, 0xc4, 0x17, 0xee, 0x3b, 0x0e, 0xd1, 0x22, + 0x98, 0x68, 0xa2, 0x75, 0x13, 0x53, 0x36, 0x9d, 0xc2, 0x53, 0x62, 0x01, 0xf7, 0xaf, 0xdc, 0xda, + 0x5e, 0xee, 0xa2, 0x85, 0x0a, 0xf5, 0xb3, 0x60, 0x5c, 0x5b, 0x3f, 0xb9, 0xc3, 0xd1, 0x12, 0xe4, + 0xc6, 0x4b, 0x01, 0xcd, 0xfb, 0xf4, 0x08, 0x8f, 0x61, 0x1f, 0x95, 0xd8, 0x74, 0xd8, 0xb2, 0x1f, + 0x3c, 0x6c, 0xb3, 0x5b, 0x3b, 0x9b, 0xb4, 0x19, 0xbd, 0xf1, 0x54, 0x40, 0x9f, 0xdc, 0x89, 0x7f, + 0x84, 0xfa, 0x7e, 0xb9, 0xf5, 0xd8, 0xef, 0x2c, 0x51, 0x1b, 0xaf, 0x2a, 0xed, 0x89, 0x57, 0xb7, + 0xaf, 0x6e, 0xa4, 0xcc, 0xf5, 0x8d, 0x94, 0x79, 0x71, 0x23, 0x65, 0xfe, 0x0d, 0x24, 0xe1, 0x2a, + 0x90, 0x84, 0xeb, 0x40, 0x12, 0x5e, 0x05, 0x92, 0xf0, 0xdf, 0x6b, 0x29, 0xf3, 0x67, 0x69, 0xda, + 0x9d, 0xb7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x32, 0x5e, 0xac, 0x56, 0xd9, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto new file mode 100644 index 000000000..4f51d6161 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/generated.proto @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// CronJob represents the configuration of a single cron job. +message CronJob { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional CronJobSpec spec = 2; + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional CronJobStatus status = 3; +} + +// CronJobList is a collection of cron jobs. +message CronJobList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of CronJob. + repeated CronJob items = 2; +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +message CronJobSpec { + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + optional int64 startingDeadlineSeconds = 2; + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // +optional + optional string concurrencyPolicy = 3; + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + optional bool suspend = 4; + + // JobTemplate is the object that describes the job that will be created when + // executing a CronJob. + optional JobTemplateSpec jobTemplate = 5; + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 successfulJobsHistoryLimit = 6; + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 failedJobsHistoryLimit = 7; +} + +// CronJobStatus represents the current state of a cron job. +message CronJobStatus { + // Active holds pointers to currently running jobs. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec spec = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go new file mode 100644 index 000000000..5286ca4a0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{}) + scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{}) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go new file mode 100644 index 000000000..8dd93175f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.generated.go @@ -0,0 +1,2525 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v2alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg5_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg6_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg4_v1 "k8s.io/client-go/pkg/api/v1" + pkg3_v1 "k8s.io/client-go/pkg/apis/batch/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg5_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg6_intstr.IntOrString + var v4 pkg4_v1.PodTemplateSpec + var v5 pkg3_v1.JobSpec + var v6 time.Time + _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6 + } +} + +func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = JobTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.ObjectMeta + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.ObjectMeta + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yy9 := &x.Spec + yy9.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.Spec + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv4 := &x.ObjectMeta + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = pkg3_v1.JobSpec{} + } else { + yyv6 := &x.Spec + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = pkg3_v1.JobSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CronJob) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJob) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = CronJobSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = CronJobStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = CronJobSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = CronJobStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CronJobList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceCronJob(([]CronJob)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceCronJob(([]CronJob)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJobList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceCronJob((*[]CronJob)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCronJob((*[]CronJob)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CronJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.StartingDeadlineSeconds != nil + yyq2[2] = x.ConcurrencyPolicy != "" + yyq2[3] = x.Suspend != nil + yyq2[5] = x.SuccessfulJobsHistoryLimit != nil + yyq2[6] = x.FailedJobsHistoryLimit != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("schedule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy7 := *x.StartingDeadlineSeconds + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(yy7)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.StartingDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy9 := *x.StartingDeadlineSeconds + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(yy9)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.ConcurrencyPolicy.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Suspend == nil { + r.EncodeNil() + } else { + yy15 := *x.Suspend + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(yy15)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("suspend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Suspend == nil { + r.EncodeNil() + } else { + yy17 := *x.Suspend + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeBool(bool(yy17)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy20 := &x.JobTemplate + yy20.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.JobTemplate + yy22.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.SuccessfulJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.SuccessfulJobsHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("successfulJobsHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SuccessfulJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.SuccessfulJobsHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.FailedJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy30 := *x.FailedJobsHistoryLimit + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeInt(int64(yy30)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("failedJobsHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.FailedJobsHistoryLimit == nil { + r.EncodeNil() + } else { + yy32 := *x.FailedJobsHistoryLimit + yym33 := z.EncBinary() + _ = yym33 + if false { + } else { + r.EncodeInt(int64(yy32)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "schedule": + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + yyv4 := &x.Schedule + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "startingDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + case "concurrencyPolicy": + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + yyv8 := &x.ConcurrencyPolicy + yyv8.CodecDecodeSelf(d) + } + case "suspend": + if r.TryDecodeAsNil() { + if x.Suspend != nil { + x.Suspend = nil + } + } else { + if x.Suspend == nil { + x.Suspend = new(bool) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*bool)(x.Suspend)) = r.DecodeBool() + } + } + case "jobTemplate": + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv11 := &x.JobTemplate + yyv11.CodecDecodeSelf(d) + } + case "successfulJobsHistoryLimit": + if r.TryDecodeAsNil() { + if x.SuccessfulJobsHistoryLimit != nil { + x.SuccessfulJobsHistoryLimit = nil + } + } else { + if x.SuccessfulJobsHistoryLimit == nil { + x.SuccessfulJobsHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "failedJobsHistoryLimit": + if r.TryDecodeAsNil() { + if x.FailedJobsHistoryLimit != nil { + x.FailedJobsHistoryLimit = nil + } + } else { + if x.FailedJobsHistoryLimit == nil { + x.FailedJobsHistoryLimit = new(int32) + } + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Schedule = "" + } else { + yyv17 := &x.Schedule + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.StartingDeadlineSeconds != nil { + x.StartingDeadlineSeconds = nil + } + } else { + if x.StartingDeadlineSeconds == nil { + x.StartingDeadlineSeconds = new(int64) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ConcurrencyPolicy = "" + } else { + yyv21 := &x.ConcurrencyPolicy + yyv21.CodecDecodeSelf(d) + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Suspend != nil { + x.Suspend = nil + } + } else { + if x.Suspend == nil { + x.Suspend = new(bool) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else { + *((*bool)(x.Suspend)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.JobTemplate = JobTemplateSpec{} + } else { + yyv24 := &x.JobTemplate + yyv24.CodecDecodeSelf(d) + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SuccessfulJobsHistoryLimit != nil { + x.SuccessfulJobsHistoryLimit = nil + } + } else { + if x.SuccessfulJobsHistoryLimit == nil { + x.SuccessfulJobsHistoryLimit = new(int32) + } + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(x.SuccessfulJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.FailedJobsHistoryLimit != nil { + x.FailedJobsHistoryLimit = nil + } + } else { + if x.FailedJobsHistoryLimit == nil { + x.FailedJobsHistoryLimit = new(int32) + } + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(x.FailedJobsHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *CronJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Active) != 0 + yyq2[1] = x.LastScheduleTime != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Active == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("active")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Active == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSlicev1_ObjectReference(([]pkg4_v1.ObjectReference)(x.Active), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym7 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.LastScheduleTime == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { + } else if yym8 { + z.EncBinaryMarshal(x.LastScheduleTime) + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.LastScheduleTime) + } else { + z.EncFallback(x.LastScheduleTime) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CronJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CronJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "active": + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv4 := &x.Active + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv4), d) + } + } + case "lastScheduleTime": + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_v1.Time) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym7 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CronJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Active = nil + } else { + yyv9 := &x.Active + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSlicev1_ObjectReference((*[]pkg4_v1.ObjectReference)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.LastScheduleTime != nil { + x.LastScheduleTime = nil + } + } else { + if x.LastScheduleTime == nil { + x.LastScheduleTime = new(pkg1_v1.Time) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { + } else if yym12 { + z.DecBinaryUnmarshal(x.LastScheduleTime) + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.LastScheduleTime) + } else { + z.DecFallback(x.LastScheduleTime, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCronJob(v []CronJob, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCronJob(v *[]CronJob, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CronJob{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1144) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CronJob, yyrl1) + } + } else { + yyv1 = make([]CronJob, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CronJob{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CronJob{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CronJob{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CronJob{}) // var yyz1 CronJob + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CronJob{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CronJob{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg4_v1.ObjectReference, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg4_v1.ObjectReference, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg4_v1.ObjectReference{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) + } + } else { + yyv1 = make([]pkg4_v1.ObjectReference, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg4_v1.ObjectReference{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg4_v1.ObjectReference{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, pkg4_v1.ObjectReference{}) // var yyz1 pkg4_v1.ObjectReference + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = pkg4_v1.ObjectReference{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg4_v1.ObjectReference{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go new file mode 100644 index 000000000..67f1c95e4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go @@ -0,0 +1,147 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" + batchv1 "k8s.io/client-go/pkg/apis/batch/v1" +) + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines jobs that will be created from this template + // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient=true + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a structure defining the expected behavior of a job, including the schedule. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is a structure describing current status of a job. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of CronJob. + Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. + // +optional + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // Suspend flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` + + // JobTemplate is the object that describes the job that will be created when + // executing a CronJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // Active holds pointers to currently running jobs. + // +optional + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..c0b53b8e0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CronJob = map[string]string{ + "": "CronJob represents the configuration of a single cron job.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (CronJob) SwaggerDoc() map[string]string { + return map_CronJob +} + +var map_CronJobList = map[string]string{ + "": "CronJobList is a collection of cron jobs.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of CronJob.", +} + +func (CronJobList) SwaggerDoc() map[string]string { + return map_CronJobList +} + +var map_CronJobSpec = map[string]string{ + "": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", + "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a CronJob.", + "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", + "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", +} + +func (CronJobSpec) SwaggerDoc() map[string]string { + return map_CronJobSpec +} + +var map_CronJobStatus = map[string]string{ + "": "CronJobStatus represents the current state of a cron job.", + "active": "Active holds pointers to currently running jobs.", + "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", +} + +func (CronJobStatus) SwaggerDoc() map[string]string { + return map_CronJobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "template": "Template defines jobs that will be created from this template http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..b268575cd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.conversion.go @@ -0,0 +1,227 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + batch "k8s.io/client-go/pkg/apis/batch" + batch_v1 "k8s.io/client-go/pkg/apis/batch/v1" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v2alpha1_CronJob_To_batch_CronJob, + Convert_batch_CronJob_To_v2alpha1_CronJob, + Convert_v2alpha1_CronJobList_To_batch_CronJobList, + Convert_batch_CronJobList_To_v2alpha1_CronJobList, + Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec, + Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec, + Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus, + Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus, + Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, + Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, + Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, + Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec, + ) +} + +func autoConvert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_CronJob_To_batch_CronJob(in *CronJob, out *batch.CronJob, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJob_To_batch_CronJob(in, out, s) +} + +func autoConvert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_batch_CronJob_To_v2alpha1_CronJob(in *batch.CronJob, out *CronJob, s conversion.Scope) error { + return autoConvert_batch_CronJob_To_v2alpha1_CronJob(in, out, s) +} + +func autoConvert_v2alpha1_CronJobList_To_batch_CronJobList(in *CronJobList, out *batch.CronJobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]batch.CronJob, len(*in)) + for i := range *in { + if err := Convert_v2alpha1_CronJob_To_batch_CronJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v2alpha1_CronJobList_To_batch_CronJobList(in *CronJobList, out *batch.CronJobList, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJobList_To_batch_CronJobList(in, out, s) +} + +func autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList, out *CronJobList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + if err := Convert_batch_CronJob_To_v2alpha1_CronJob(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]CronJob, 0) + } + return nil +} + +func Convert_batch_CronJobList_To_v2alpha1_CronJobList(in *batch.CronJobList, out *CronJobList, s conversion.Scope) error { + return autoConvert_batch_CronJobList_To_v2alpha1_CronJobList(in, out, s) +} + +func autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds)) + out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) + out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) + return nil +} + +func Convert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in *CronJobSpec, out *batch.CronJobSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJobSpec_To_batch_CronJobSpec(in, out, s) +} + +func autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, out *CronJobSpec, s conversion.Scope) error { + out.Schedule = in.Schedule + out.StartingDeadlineSeconds = (*int64)(unsafe.Pointer(in.StartingDeadlineSeconds)) + out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy) + out.Suspend = (*bool)(unsafe.Pointer(in.Suspend)) + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { + return err + } + out.SuccessfulJobsHistoryLimit = (*int32)(unsafe.Pointer(in.SuccessfulJobsHistoryLimit)) + out.FailedJobsHistoryLimit = (*int32)(unsafe.Pointer(in.FailedJobsHistoryLimit)) + return nil +} + +func Convert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in *batch.CronJobSpec, out *CronJobSpec, s conversion.Scope) error { + return autoConvert_batch_CronJobSpec_To_v2alpha1_CronJobSpec(in, out, s) +} + +func autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { + out.Active = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Active)) + out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) + return nil +} + +func Convert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in *CronJobStatus, out *batch.CronJobStatus, s conversion.Scope) error { + return autoConvert_v2alpha1_CronJobStatus_To_batch_CronJobStatus(in, out, s) +} + +func autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { + out.Active = *(*[]api_v1.ObjectReference)(unsafe.Pointer(&in.Active)) + out.LastScheduleTime = (*v1.Time)(unsafe.Pointer(in.LastScheduleTime)) + return nil +} + +func Convert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in *batch.CronJobStatus, out *CronJobStatus, s conversion.Scope) error { + return autoConvert_batch_CronJobStatus_To_v2alpha1_CronJobStatus(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s) +} + +func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { + return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s) +} + +func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := batch_v1.Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { + return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s) +} + +func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := batch_v1.Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { + return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8c9010d9f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,170 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v2alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + batch_v1 "k8s.io/client-go/pkg/apis/batch/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJob, InType: reflect.TypeOf(&CronJob{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v2alpha1_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, + ) +} + +func DeepCopy_v2alpha1_CronJob(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJob) + out := out.(*CronJob) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v2alpha1_CronJobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_CronJobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobList) + out := out.(*CronJobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + if err := DeepCopy_v2alpha1_CronJob(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v2alpha1_CronJobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobSpec) + out := out.(*CronJobSpec) + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v2alpha1_CronJobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobStatus) + out := out.(*CronJobStatus) + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]api_v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_v2alpha1_JobTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplate) + out := out.(*JobTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v2alpha1_JobTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplateSpec) + out := out.(*JobTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := batch_v1.DeepCopy_v1_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..1c0bd0ee0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/v2alpha1/zz_generated.defaults.go @@ -0,0 +1,310 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v2alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CronJob{}, func(obj interface{}) { SetObjectDefaults_CronJob(obj.(*CronJob)) }) + scheme.AddTypeDefaultingFunc(&CronJobList{}, func(obj interface{}) { SetObjectDefaults_CronJobList(obj.(*CronJobList)) }) + scheme.AddTypeDefaultingFunc(&JobTemplate{}, func(obj interface{}) { SetObjectDefaults_JobTemplate(obj.(*JobTemplate)) }) + return nil +} + +func SetObjectDefaults_CronJob(in *CronJob) { + SetDefaults_CronJob(in) + v1.SetDefaults_PodSpec(&in.Spec.JobTemplate.Spec.Template.Spec) + for i := range in.Spec.JobTemplate.Spec.Template.Spec.Volumes { + a := &in.Spec.JobTemplate.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers { + a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.JobTemplate.Spec.Template.Spec.Containers { + a := &in.Spec.JobTemplate.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_CronJobList(in *CronJobList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CronJob(a) + } +} + +func SetObjectDefaults_JobTemplate(in *JobTemplate) { + v1.SetDefaults_PodSpec(&in.Template.Spec.Template.Spec) + for i := range in.Template.Spec.Template.Spec.Volumes { + a := &in.Template.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.Template.Spec.InitContainers { + a := &in.Template.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Template.Spec.Containers { + a := &in.Template.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go new file mode 100644 index 000000000..6e75ecdc0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/batch/zz_generated.deepcopy.go @@ -0,0 +1,291 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package batch + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJob, InType: reflect.TypeOf(&CronJob{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobList, InType: reflect.TypeOf(&CronJobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobSpec, InType: reflect.TypeOf(&CronJobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_CronJobStatus, InType: reflect.TypeOf(&CronJobStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_Job, InType: reflect.TypeOf(&Job{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobCondition, InType: reflect.TypeOf(&JobCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobList, InType: reflect.TypeOf(&JobList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobSpec, InType: reflect.TypeOf(&JobSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobStatus, InType: reflect.TypeOf(&JobStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobTemplate, InType: reflect.TypeOf(&JobTemplate{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_batch_JobTemplateSpec, InType: reflect.TypeOf(&JobTemplateSpec{})}, + ) +} + +func DeepCopy_batch_CronJob(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJob) + out := out.(*CronJob) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_CronJobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_batch_CronJobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_CronJobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobList) + out := out.(*CronJobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + if err := DeepCopy_batch_CronJob(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_batch_CronJobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobSpec) + out := out.(*CronJobSpec) + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if err := DeepCopy_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, c); err != nil { + return err + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_batch_CronJobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CronJobStatus) + out := out.(*CronJobStatus) + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]api.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_batch_Job(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Job) + out := out.(*Job) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_batch_JobStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_JobCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobCondition) + out := out.(*JobCondition) + *out = *in + out.LastProbeTime = in.LastProbeTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_batch_JobList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobList) + out := out.(*JobList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + if err := DeepCopy_batch_Job(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_batch_JobSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobSpec) + out := out.(*JobSpec) + *out = *in + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int32) + **out = **in + } + if in.Completions != nil { + in, out := &in.Completions, &out.Completions + *out = new(int32) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.ManualSelector != nil { + in, out := &in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = **in + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_JobStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobStatus) + out := out.(*JobStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + if err := DeepCopy_batch_JobCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = new(v1.Time) + **out = (*in).DeepCopy() + } + return nil + } +} + +func DeepCopy_batch_JobTemplate(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplate) + out := out.(*JobTemplate) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_JobTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_batch_JobTemplateSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*JobTemplateSpec) + out := out.(*JobTemplateSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_batch_JobSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS b/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS new file mode 100755 index 000000000..c67bd1172 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- liggitt +- sttts +- timothysc +- dims +- errordeveloper +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go b/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go new file mode 100644 index 000000000..a10177469 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=certificates.k8s.io +package certificates diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go b/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go new file mode 100644 index 000000000..2608e4076 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go b/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go new file mode 100644 index 000000000..8850e07aa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the certificates API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/certificates" + "k8s.io/client-go/pkg/apis/certificates/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: certificates.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/certificates", + RootScopedKinds: sets.NewString("CertificateSigningRequest"), + AddInternalObjectsToScheme: certificates.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/register.go b/vendor/k8s.io/client-go/pkg/apis/certificates/register.go new file mode 100644 index 000000000..f9d228d00 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequest{}, + &CertificateSigningRequestList{}, + ) + return nil +} + +func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/types.go b/vendor/k8s.io/client-go/pkg/apis/certificates/types.go new file mode 100644 index 000000000..4a7884a1e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/types.go @@ -0,0 +1,143 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificates + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient=true +// +nonNamespaced=true + +// Describes a certificate signing request +type CertificateSigningRequest struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The certificate request itself and any additional information. + // +optional + Spec CertificateSigningRequestSpec + + // Derived information about the request. + // +optional + Status CertificateSigningRequestStatus +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +type CertificateSigningRequestSpec struct { + // Base64-encoded PKCS#10 CSR data + Request []byte + + // usages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + Username string + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + UID string + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + Groups []string + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue +} + +// ExtraValue masks the value so protobuf can generate +type ExtraValue []string + +type CertificateSigningRequestStatus struct { + // Conditions applied to the request, such as approval or denial. + // +optional + Conditions []CertificateSigningRequestCondition + + // If request was approved, the controller will place the issued certificate here. + // +optional + Certificate []byte +} + +type RequestConditionType string + +// These are the possible conditions for a certificate request. +const ( + CertificateApproved RequestConditionType = "Approved" + CertificateDenied RequestConditionType = "Denied" +) + +type CertificateSigningRequestCondition struct { + // request approval state, currently Approved or Denied. + Type RequestConditionType + // brief reason for the request state + // +optional + Reason string + // human readable message with details about the request state + // +optional + Message string + // timestamp for the last update to this condition + // +optional + LastUpdateTime metav1.Time +} + +type CertificateSigningRequestList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // +optional + Items []CertificateSigningRequest +} + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content committment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go new file mode 100644 index 000000000..b9cf0b016 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/conversion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions here. Currently there are none. + + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "CertificateSigningRequest", + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go new file mode 100644 index 000000000..cd6a29d33 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/defaults.go @@ -0,0 +1,31 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/runtime" + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_CertificateSigningRequestSpec, + ) +} +func SetDefaults_CertificateSigningRequestSpec(obj *CertificateSigningRequestSpec) { + if obj.Usages == nil { + obj.Usages = []KeyUsage{UsageDigitalSignature, UsageKeyEncipherment} + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go new file mode 100644 index 000000000..6f257909d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=certificates.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go new file mode 100644 index 000000000..11833465d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.pb.go @@ -0,0 +1,1674 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/certificates/v1beta1/generated.proto + + It has these top-level messages: + CertificateSigningRequest + CertificateSigningRequestCondition + CertificateSigningRequestList + CertificateSigningRequestSpec + CertificateSigningRequestStatus + ExtraValue +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } +func (*CertificateSigningRequest) ProtoMessage() {} +func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } +func (*CertificateSigningRequestCondition) ProtoMessage() {} +func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } +func (*CertificateSigningRequestList) ProtoMessage() {} +func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } +func (*CertificateSigningRequestSpec) ProtoMessage() {} +func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} +} + +func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } +func (*CertificateSigningRequestStatus) ProtoMessage() {} +func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequest") + proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestCondition") + proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestList") + proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.client-go.pkg.apis.certificates.v1beta1.ExtraValue") +} +func (m *CertificateSigningRequest) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequest) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *CertificateSigningRequestCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *CertificateSigningRequestList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CertificateSigningRequestSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Request != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Request))) + i += copy(data[i:], m.Request) + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Username))) + i += copy(data[i:], m.Username) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.UID))) + i += copy(data[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Extra) > 0 { + for k := range m.Extra { + data[i] = 0x32 + i++ + v := m.Extra[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n6, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + } + } + return i, nil +} + +func (m *CertificateSigningRequestStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CertificateSigningRequestStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Certificate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Certificate))) + i += copy(data[i:], m.Certificate) + } + return i, nil +} + +func (m ExtraValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m ExtraValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *CertificateSigningRequest) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CertificateSigningRequestSpec) Size() (n int) { + var l int + _ = l + if m.Request != nil { + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *CertificateSigningRequestStatus) Size() (n int) { + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Certificate != nil { + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m ExtraValue) Size() (n int) { + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CertificateSigningRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&CertificateSigningRequestSpec{`, + `Request:` + valueToStringGenerated(this.Request) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestStatus{`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, + `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CertificateSigningRequest) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RequestConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CertificateSigningRequest{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = append(m.Request[:0], data[iNdEx:postIndex]...) + if m.Request == nil { + m.Request = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Usages = append(m.Usages, KeyUsage(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], data[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 839 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0xf3, 0x6b, 0x93, 0xc9, 0xb2, 0xad, 0x46, 0xa8, 0x32, 0x2b, 0xd5, 0x5e, 0x59, 0x80, + 0xb6, 0xa8, 0xd8, 0x64, 0x41, 0xb0, 0x2a, 0x07, 0x24, 0x97, 0x0a, 0x15, 0x5a, 0x7e, 0xcc, 0x36, + 0x48, 0x20, 0x0e, 0x4c, 0x9c, 0x57, 0xef, 0x34, 0xf1, 0x0f, 0x3c, 0xe3, 0x68, 0x73, 0x41, 0xbd, + 0x71, 0xe5, 0xc8, 0x05, 0x89, 0x3f, 0x67, 0x8f, 0x3d, 0x72, 0x40, 0x11, 0x1b, 0x4e, 0x5c, 0xf8, + 0x03, 0x7a, 0x42, 0x33, 0x9e, 0xc4, 0x66, 0xd3, 0xd0, 0x56, 0xca, 0xcd, 0xf3, 0xcd, 0xf7, 0xbe, + 0xf7, 0xde, 0xf7, 0x9e, 0x07, 0x7d, 0x34, 0x3e, 0xe6, 0x2e, 0x4b, 0xbc, 0x71, 0x3e, 0x84, 0x2c, + 0x06, 0x01, 0xdc, 0x4b, 0xc7, 0xa1, 0x47, 0x53, 0xc6, 0xbd, 0x00, 0x32, 0xc1, 0x1e, 0xb2, 0x80, + 0x4a, 0x74, 0xda, 0x1f, 0x82, 0xa0, 0x7d, 0x2f, 0x84, 0x18, 0x32, 0x2a, 0x60, 0xe4, 0xa6, 0x59, + 0x22, 0x12, 0xec, 0x15, 0x02, 0x6e, 0x29, 0xe0, 0xa6, 0xe3, 0xd0, 0x95, 0x02, 0x6e, 0x55, 0xc0, + 0xd5, 0x02, 0xfb, 0x6f, 0x87, 0x4c, 0x9c, 0xe6, 0x43, 0x37, 0x48, 0x22, 0x2f, 0x4c, 0xc2, 0xc4, + 0x53, 0x3a, 0xc3, 0xfc, 0xa1, 0x3a, 0xa9, 0x83, 0xfa, 0x2a, 0xf4, 0xf7, 0xdf, 0xd3, 0x05, 0xd2, + 0x94, 0x45, 0x34, 0x38, 0x65, 0x31, 0x64, 0xb3, 0xb2, 0xc4, 0x08, 0x04, 0xf5, 0xa6, 0x6b, 0x55, + 0xed, 0x7b, 0x9b, 0xa2, 0xb2, 0x3c, 0x16, 0x2c, 0x82, 0xb5, 0x80, 0xf7, 0x9f, 0x17, 0xc0, 0x83, + 0x53, 0x88, 0xe8, 0x5a, 0xdc, 0xbb, 0x9b, 0xe2, 0x72, 0xc1, 0x26, 0x1e, 0x8b, 0x05, 0x17, 0xd9, + 0x5a, 0x50, 0xa5, 0x27, 0x0e, 0xd9, 0x14, 0xb2, 0xb2, 0x21, 0x38, 0xa3, 0x51, 0x3a, 0x81, 0x67, + 0xf5, 0x74, 0x73, 0xe3, 0xa8, 0x9e, 0xc1, 0x76, 0xfe, 0xae, 0xa3, 0xd7, 0x6e, 0x97, 0xfe, 0x9f, + 0xb0, 0x30, 0x66, 0x71, 0x48, 0xe0, 0x87, 0x1c, 0xb8, 0xc0, 0xdf, 0xa3, 0x8e, 0xb4, 0x6e, 0x44, + 0x05, 0x35, 0x8d, 0x03, 0xe3, 0xb0, 0x77, 0xf4, 0x8e, 0xab, 0x07, 0x59, 0xed, 0xa4, 0x1c, 0xa5, + 0x64, 0xbb, 0xd3, 0xbe, 0xfb, 0xc5, 0xf0, 0x11, 0x04, 0xe2, 0x3e, 0x08, 0xea, 0xe3, 0xf3, 0xb9, + 0x5d, 0x5b, 0xcc, 0x6d, 0x54, 0x62, 0x64, 0xa5, 0x8a, 0x53, 0xd4, 0xe4, 0x29, 0x04, 0x66, 0x5d, + 0xa9, 0x7f, 0xee, 0xbe, 0xe4, 0x9a, 0xb8, 0x1b, 0x6b, 0x3f, 0x49, 0x21, 0xf0, 0x77, 0x75, 0xee, + 0xa6, 0x3c, 0x11, 0x95, 0x09, 0x9f, 0xa1, 0x36, 0x17, 0x54, 0xe4, 0xdc, 0x6c, 0xa8, 0x9c, 0x5f, + 0x6e, 0x31, 0xa7, 0xd2, 0xf5, 0xf7, 0x74, 0xd6, 0x76, 0x71, 0x26, 0x3a, 0x9f, 0xf3, 0x6b, 0x1d, + 0x39, 0x1b, 0x63, 0x6f, 0x27, 0xf1, 0x88, 0x09, 0x96, 0xc4, 0xf8, 0x18, 0x35, 0xc5, 0x2c, 0x05, + 0x65, 0x78, 0xd7, 0x7f, 0x7d, 0xd9, 0xc2, 0x83, 0x59, 0x0a, 0x4f, 0xe7, 0xf6, 0xab, 0x97, 0xf9, + 0x12, 0x27, 0x2a, 0x02, 0xbf, 0x89, 0xda, 0x19, 0x50, 0x9e, 0xc4, 0xca, 0xce, 0x6e, 0x59, 0x08, + 0x51, 0x28, 0xd1, 0xb7, 0xf8, 0x06, 0xda, 0x89, 0x80, 0x73, 0x1a, 0x82, 0xf2, 0xa0, 0xeb, 0x5f, + 0xd1, 0xc4, 0x9d, 0xfb, 0x05, 0x4c, 0x96, 0xf7, 0xf8, 0x11, 0xda, 0x9b, 0x50, 0x2e, 0x06, 0xe9, + 0x88, 0x0a, 0x78, 0xc0, 0x22, 0x30, 0x9b, 0xca, 0xb5, 0xb7, 0x5e, 0x6c, 0x0f, 0x64, 0x84, 0x7f, + 0x4d, 0xab, 0xef, 0xdd, 0xfb, 0x8f, 0x12, 0xb9, 0xa4, 0xec, 0xfc, 0x63, 0xa0, 0xeb, 0x1b, 0xfd, + 0xb9, 0xc7, 0xb8, 0xc0, 0xdf, 0xad, 0xed, 0xa3, 0xfb, 0x62, 0x75, 0xc8, 0x68, 0xb5, 0x8d, 0x57, + 0x75, 0x2d, 0x9d, 0x25, 0x52, 0xd9, 0xc5, 0x04, 0xb5, 0x98, 0x80, 0x88, 0x9b, 0xf5, 0x83, 0xc6, + 0x61, 0xef, 0xe8, 0xd3, 0xed, 0x2d, 0x86, 0xff, 0x8a, 0x4e, 0xdb, 0xba, 0x2b, 0x13, 0x90, 0x22, + 0x8f, 0xb3, 0x68, 0xfc, 0x4f, 0xc3, 0x72, 0x65, 0xf1, 0x1b, 0x68, 0x27, 0x2b, 0x8e, 0xaa, 0xdf, + 0x5d, 0xbf, 0x27, 0xa7, 0xa4, 0x19, 0x64, 0x79, 0x87, 0x6f, 0xa2, 0x4e, 0xce, 0x21, 0x8b, 0x69, + 0x04, 0x7a, 0xf4, 0xab, 0x3e, 0x07, 0x1a, 0x27, 0x2b, 0x06, 0xbe, 0x8e, 0x1a, 0x39, 0x1b, 0xe9, + 0xd1, 0xf7, 0x34, 0xb1, 0x31, 0xb8, 0xfb, 0x31, 0x91, 0x38, 0x76, 0x50, 0x3b, 0xcc, 0x92, 0x3c, + 0xe5, 0x66, 0xf3, 0xa0, 0x71, 0xd8, 0xf5, 0x91, 0xdc, 0xa0, 0x4f, 0x14, 0x42, 0xf4, 0x0d, 0x3e, + 0x42, 0x9d, 0x31, 0xcc, 0x06, 0x6a, 0x85, 0x5a, 0x8a, 0x75, 0x4d, 0xb2, 0x14, 0xc0, 0x9f, 0xce, + 0xed, 0xce, 0x67, 0xfa, 0x96, 0xac, 0x78, 0xf8, 0x47, 0xd4, 0x82, 0x33, 0x91, 0x51, 0xb3, 0xad, + 0xec, 0xfd, 0x66, 0xbb, 0xff, 0xba, 0x7b, 0x47, 0x6a, 0xdf, 0x89, 0x45, 0x36, 0x2b, 0xdd, 0x56, + 0x18, 0x29, 0xd2, 0xee, 0xe7, 0x08, 0x95, 0x1c, 0x7c, 0x15, 0x35, 0xc6, 0x30, 0x2b, 0x7e, 0x32, + 0x22, 0x3f, 0xf1, 0x57, 0xa8, 0x35, 0xa5, 0x93, 0x1c, 0xf4, 0x5b, 0xf4, 0xe1, 0x4b, 0xd7, 0xa7, + 0xd4, 0xbf, 0x96, 0x12, 0xa4, 0x50, 0xba, 0x55, 0x3f, 0x36, 0x9c, 0xb9, 0x81, 0xec, 0xe7, 0xbc, + 0x18, 0xf8, 0x27, 0x03, 0xa1, 0x60, 0xf9, 0x43, 0x73, 0xd3, 0x50, 0x06, 0x9d, 0x6c, 0xcf, 0xa0, + 0xd5, 0x63, 0x51, 0xbe, 0xc6, 0x2b, 0x88, 0x93, 0x4a, 0x6a, 0xdc, 0x47, 0xbd, 0x8a, 0xb4, 0xb2, + 0x62, 0xd7, 0xbf, 0xb2, 0x98, 0xdb, 0xbd, 0x8a, 0x38, 0xa9, 0x72, 0x9c, 0x0f, 0xb4, 0xaf, 0xaa, + 0x73, 0x6c, 0x2f, 0x7f, 0x22, 0x43, 0xad, 0x45, 0xf7, 0xf2, 0xd2, 0xdf, 0xea, 0xfc, 0xf2, 0x9b, + 0x5d, 0x7b, 0xfc, 0xc7, 0x41, 0xcd, 0xbf, 0x71, 0x7e, 0x61, 0xd5, 0x9e, 0x5c, 0x58, 0xb5, 0xdf, + 0x2f, 0xac, 0xda, 0xe3, 0x85, 0x65, 0x9c, 0x2f, 0x2c, 0xe3, 0xc9, 0xc2, 0x32, 0xfe, 0x5c, 0x58, + 0xc6, 0xcf, 0x7f, 0x59, 0xb5, 0x6f, 0x77, 0x74, 0x67, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x07, + 0x0c, 0x3b, 0x3a, 0x80, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto new file mode 100644 index 000000000..215dc39a4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/generated.proto @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.certificates.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Describes a certificate signing request +message CertificateSigningRequest { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The certificate request itself and any additional information. + // +optional + optional CertificateSigningRequestSpec spec = 2; + + // Derived information about the request. + // +optional + optional CertificateSigningRequestStatus status = 3; +} + +message CertificateSigningRequestCondition { + // request approval state, currently Approved or Denied. + optional string type = 1; + + // brief reason for the request state + // +optional + optional string reason = 2; + + // human readable message with details about the request state + // +optional + optional string message = 3; + + // timestamp for the last update to this condition + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; +} + +message CertificateSigningRequestList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated CertificateSigningRequest items = 2; +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +message CertificateSigningRequestSpec { + // Base64-encoded PKCS#10 CSR data + optional bytes request = 1; + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + repeated string keyUsage = 5; + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + optional string username = 2; + + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + optional string uid = 3; + + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + repeated string groups = 4; + + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + map<string, ExtraValue> extra = 6; +} + +message CertificateSigningRequestStatus { + // Conditions applied to the request, such as approval or denial. + // +optional + repeated CertificateSigningRequestCondition conditions = 1; + + // If request was approved, the controller will place the issued certificate here. + // +optional + optional bytes certificate = 2; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go new file mode 100644 index 000000000..1375063c1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// ParseCSR extracts the CSR from the API object and decodes it. +func ParseCSR(obj *CertificateSigningRequest) (*x509.CertificateRequest, error) { + // extract PEM from request object + pemBytes := obj.Spec.Request + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go new file mode 100644 index 000000000..6574de971 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequest{}, + &CertificateSigningRequestList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func (obj *CertificateSigningRequest) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } +func (obj *CertificateSigningRequestList) GetObjectKind() schema.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go new file mode 100644 index 000000000..50d908bd7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.generated.go @@ -0,0 +1,2624 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *CertificateSigningRequest) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequest) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequest) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = CertificateSigningRequestSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = CertificateSigningRequestStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequest) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = CertificateSigningRequestSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = CertificateSigningRequestStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CertificateSigningRequestSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Usages) != 0 + yyq2[2] = x.Username != "" + yyq2[3] = x.UID != "" + yyq2[4] = len(x.Groups) != 0 + yyq2[5] = len(x.Extra) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Request == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("request")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Request == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Request)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Usages == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("usages")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Usages == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceKeyUsage(([]KeyUsage)(x.Usages), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("username")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Username)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("uid")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.UID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Groups == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("groups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Groups == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.Groups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Extra == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("extra")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Extra == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encMapstringExtraValue((map[string]ExtraValue)(x.Extra), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "request": + if r.TryDecodeAsNil() { + x.Request = nil + } else { + yyv4 := &x.Request + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *yyv4 = r.DecodeBytes(*(*[]byte)(yyv4), false, false) + } + } + case "usages": + if r.TryDecodeAsNil() { + x.Usages = nil + } else { + yyv6 := &x.Usages + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceKeyUsage((*[]KeyUsage)(yyv6), d) + } + } + case "username": + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv8 := &x.Username + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "uid": + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv10 := &x.UID + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "groups": + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv12 := &x.Groups + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + case "extra": + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv14 := &x.Extra + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Request = nil + } else { + yyv17 := &x.Request + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *yyv17 = r.DecodeBytes(*(*[]byte)(yyv17), false, false) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Usages = nil + } else { + yyv19 := &x.Usages + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceKeyUsage((*[]KeyUsage)(yyv19), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Username = "" + } else { + yyv21 := &x.Username + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UID = "" + } else { + yyv23 := &x.UID + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Groups = nil + } else { + yyv25 := &x.Groups + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecSliceStringX(yyv25, false, d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Extra = nil + } else { + yyv27 := &x.Extra + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decMapstringExtraValue((*map[string]ExtraValue)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ExtraValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encExtraValue((ExtraValue)(x), e) + } + } +} + +func (x *ExtraValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decExtraValue((*ExtraValue)(x), d) + } +} + +func (x *CertificateSigningRequestStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Conditions) != 0 + yyq2[1] = len(x.Certificate) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCertificateSigningRequestCondition(([]CertificateSigningRequestCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Certificate == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("certificate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Certificate == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Certificate)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv4 := &x.Conditions + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv4), d) + } + } + case "certificate": + if r.TryDecodeAsNil() { + x.Certificate = nil + } else { + yyv6 := &x.Certificate + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv9 := &x.Conditions + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceCertificateSigningRequestCondition((*[]CertificateSigningRequestCondition)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Certificate = nil + } else { + yyv11 := &x.Certificate + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RequestConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RequestConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *CertificateSigningRequestCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Reason != "" + yyq2[2] = x.Message != "" + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy13 := &x.LastUpdateTime + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(yy13) { + } else if yym14 { + z.EncBinaryMarshal(yy13) + } else if !yym14 && z.IsJSONHandle() { + z.EncJSONMarshal(yy13) + } else { + z.EncFallback(yy13) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy15 := &x.LastUpdateTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv5 := &x.Reason + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv7 := &x.Message + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv9 := &x.LastUpdateTime + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if yym10 { + z.DecBinaryUnmarshal(yyv9) + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv12 := &x.Type + yyv12.CodecDecodeSelf(d) + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv13 := &x.Reason + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CertificateSigningRequestList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceCertificateSigningRequest(([]CertificateSigningRequest)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CertificateSigningRequestList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CertificateSigningRequestList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CertificateSigningRequestList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceCertificateSigningRequest((*[]CertificateSigningRequest)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x KeyUsage) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *KeyUsage) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x codecSelfer1234) encSliceKeyUsage(v []KeyUsage, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceKeyUsage(v *[]KeyUsage, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []KeyUsage{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]KeyUsage, yyrl1) + } + } else { + yyv1 = make([]KeyUsage, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 KeyUsage + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []KeyUsage{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encMapstringExtraValue(v map[string]ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringExtraValue(v *map[string]ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]ExtraValue, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 ExtraValue + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv4 := &yymv1 + yyv4.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv5 := &yymk1 + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + *((*string)(yyv5)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = nil + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = nil + } else { + yyv7 := &yymv1 + yyv7.CodecDecodeSelf(d) + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encExtraValue(v ExtraValue, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1)) + } + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decExtraValue(v *ExtraValue, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]string, yyrl1) + } + } else { + yyv1 = make([]string, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 string + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv6 := &yyv1[yyj1] + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []string{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCertificateSigningRequestCondition(v []CertificateSigningRequestCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCertificateSigningRequestCondition(v *[]CertificateSigningRequestCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CertificateSigningRequestCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CertificateSigningRequestCondition, yyrl1) + } + } else { + yyv1 = make([]CertificateSigningRequestCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CertificateSigningRequestCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CertificateSigningRequestCondition{}) // var yyz1 CertificateSigningRequestCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequestCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CertificateSigningRequestCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCertificateSigningRequest(v []CertificateSigningRequest, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCertificateSigningRequest(v *[]CertificateSigningRequest, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CertificateSigningRequest{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 416) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CertificateSigningRequest, yyrl1) + } + } else { + yyv1 = make([]CertificateSigningRequest, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CertificateSigningRequest{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CertificateSigningRequest{}) // var yyz1 CertificateSigningRequest + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CertificateSigningRequest{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CertificateSigningRequest{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go new file mode 100644 index 000000000..a9149ba8d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types.go @@ -0,0 +1,152 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// Describes a certificate signing request +type CertificateSigningRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The certificate request itself and any additional information. + // +optional + Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Derived information about the request. + // +optional + Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +type CertificateSigningRequestSpec struct { + // Base64-encoded PKCS#10 CSR data + Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=keyUsage"` + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type CertificateSigningRequestStatus struct { + // Conditions applied to the request, such as approval or denial. + // +optional + Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // If request was approved, the controller will place the issued certificate here. + // +optional + Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` +} + +type RequestConditionType string + +// These are the possible conditions for a certificate request. +const ( + CertificateApproved RequestConditionType = "Approved" + CertificateDenied RequestConditionType = "Denied" +) + +type CertificateSigningRequestCondition struct { + // request approval state, currently Approved or Denied. + Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"` + // brief reason for the request state + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // human readable message with details about the request state + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // timestamp for the last update to this condition + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` +} + +type CertificateSigningRequestList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content committment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..4fd91df06 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,74 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CertificateSigningRequest = map[string]string{ + "": "Describes a certificate signing request", + "spec": "The certificate request itself and any additional information.", + "status": "Derived information about the request.", +} + +func (CertificateSigningRequest) SwaggerDoc() map[string]string { + return map_CertificateSigningRequest +} + +var map_CertificateSigningRequestCondition = map[string]string{ + "type": "request approval state, currently Approved or Denied.", + "reason": "brief reason for the request state", + "message": "human readable message with details about the request state", + "lastUpdateTime": "timestamp for the last update to this condition", +} + +func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestCondition +} + +var map_CertificateSigningRequestSpec = map[string]string{ + "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", + "request": "Base64-encoded PKCS#10 CSR data", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", +} + +func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestSpec +} + +var map_CertificateSigningRequestStatus = map[string]string{ + "conditions": "Conditions applied to the request, such as approval or denial.", + "certificate": "If request was approved, the controller will place the issued certificate here.", +} + +func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..4c0e07380 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.conversion.go @@ -0,0 +1,179 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + certificates "k8s.io/client-go/pkg/apis/certificates" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest, + Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest, + Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition, + Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition, + Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList, + Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList, + Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec, + Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec, + Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus, + Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus, + ) +} + +func autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in *CertificateSigningRequest, out *certificates.CertificateSigningRequest, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequest_To_certificates_CertificateSigningRequest(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in *certificates.CertificateSigningRequest, out *CertificateSigningRequest, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequest_To_v1beta1_CertificateSigningRequest(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { + out.Type = certificates.RequestConditionType(in.Type) + out.Reason = in.Reason + out.Message = in.Message + out.LastUpdateTime = in.LastUpdateTime + return nil +} + +func Convert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in *CertificateSigningRequestCondition, out *certificates.CertificateSigningRequestCondition, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestCondition_To_certificates_CertificateSigningRequestCondition(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { + out.Type = RequestConditionType(in.Type) + out.Reason = in.Reason + out.Message = in.Message + out.LastUpdateTime = in.LastUpdateTime + return nil +} + +func Convert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in *certificates.CertificateSigningRequestCondition, out *CertificateSigningRequestCondition, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestCondition_To_v1beta1_CertificateSigningRequestCondition(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]certificates.CertificateSigningRequest)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in *CertificateSigningRequestList, out *certificates.CertificateSigningRequestList, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestList_To_certificates_CertificateSigningRequestList(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]CertificateSigningRequest, 0) + } else { + out.Items = *(*[]CertificateSigningRequest)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in *certificates.CertificateSigningRequestList, out *CertificateSigningRequestList, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestList_To_v1beta1_CertificateSigningRequestList(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { + out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) + out.Usages = *(*[]certificates.KeyUsage)(unsafe.Pointer(&in.Usages)) + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]certificates.ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in *CertificateSigningRequestSpec, out *certificates.CertificateSigningRequestSpec, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestSpec_To_certificates_CertificateSigningRequestSpec(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { + if in.Request == nil { + out.Request = make([]byte, 0) + } else { + out.Request = *(*[]byte)(unsafe.Pointer(&in.Request)) + } + out.Usages = *(*[]KeyUsage)(unsafe.Pointer(&in.Usages)) + out.Username = in.Username + out.UID = in.UID + out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) + out.Extra = *(*map[string]ExtraValue)(unsafe.Pointer(&in.Extra)) + return nil +} + +func Convert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in *certificates.CertificateSigningRequestSpec, out *CertificateSigningRequestSpec, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestSpec_To_v1beta1_CertificateSigningRequestSpec(in, out, s) +} + +func autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { + out.Conditions = *(*[]certificates.CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) + out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) + return nil +} + +func Convert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in *CertificateSigningRequestStatus, out *certificates.CertificateSigningRequestStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CertificateSigningRequestStatus_To_certificates_CertificateSigningRequestStatus(in, out, s) +} + +func autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { + out.Conditions = *(*[]CertificateSigningRequestCondition)(unsafe.Pointer(&in.Conditions)) + out.Certificate = *(*[]byte)(unsafe.Pointer(&in.Certificate)) + return nil +} + +func Convert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in *certificates.CertificateSigningRequestStatus, out *CertificateSigningRequestStatus, s conversion.Scope) error { + return autoConvert_certificates_CertificateSigningRequestStatus_To_v1beta1_CertificateSigningRequestStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..800cdee47 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, + ) +} + +func DeepCopy_v1beta1_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequest) + out := out.(*CertificateSigningRequest) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestCondition) + out := out.(*CertificateSigningRequestCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestList) + out := out.(*CertificateSigningRequestList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestSpec) + out := out.(*CertificateSigningRequestSpec) + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestStatus) + out := out.(*CertificateSigningRequestStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..3c5ae0362 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/v1beta1/zz_generated.defaults.go @@ -0,0 +1,47 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&CertificateSigningRequest{}, func(obj interface{}) { SetObjectDefaults_CertificateSigningRequest(obj.(*CertificateSigningRequest)) }) + scheme.AddTypeDefaultingFunc(&CertificateSigningRequestList{}, func(obj interface{}) { + SetObjectDefaults_CertificateSigningRequestList(obj.(*CertificateSigningRequestList)) + }) + return nil +} + +func SetObjectDefaults_CertificateSigningRequest(in *CertificateSigningRequest) { + SetDefaults_CertificateSigningRequestSpec(&in.Spec) +} + +func SetObjectDefaults_CertificateSigningRequestList(in *CertificateSigningRequestList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_CertificateSigningRequest(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go new file mode 100644 index 000000000..876902891 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/certificates/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package certificates + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, + ) +} + +func DeepCopy_certificates_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequest) + out := out.(*CertificateSigningRequest) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestCondition) + out := out.(*CertificateSigningRequestCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestList) + out := out.(*CertificateSigningRequestList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + if err := DeepCopy_certificates_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestSpec) + out := out.(*CertificateSigningRequestSpec) + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue) + for key, val := range *in { + if newVal, err := c.DeepCopy(&val); err != nil { + return err + } else { + (*out)[key] = *newVal.(*ExtraValue) + } + } + } + return nil + } +} + +func DeepCopy_certificates_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CertificateSigningRequestStatus) + out := out.(*CertificateSigningRequestStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + if err := DeepCopy_certificates_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS b/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS new file mode 100755 index 000000000..494763a69 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- erictune +- pmorie +- sttts +- kargakis +- saad-ali +- janetkuo +- justinsb +- ncdc +- timstclair +- mwielgus +- timothysc +- soltysh +- piosz +- dims +- errordeveloper +- madhusudancs +- rootfs +- jszczepkowski +- mml +- resouer +- mbohlool +- david-mcmahon +- therc +- pweil- +- tmrts +- mqliang +- lukaszo +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go new file mode 100644 index 000000000..87edee41c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go new file mode 100644 index 000000000..27d3e23ad --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/helpers.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions + +import ( + "strings" +) + +// SysctlsFromPodSecurityPolicyAnnotation parses an annotation value of the key +// SysctlsSecurityPolocyAnnotationKey into a slice of sysctls. An empty slice +// is returned if annotation is the empty string. +func SysctlsFromPodSecurityPolicyAnnotation(annotation string) ([]string, error) { + if len(annotation) == 0 { + return []string{}, nil + } + + return strings.Split(annotation, ","), nil +} + +// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. +func PodAnnotationsFromSysctls(sysctls []string) string { + return strings.Join(sysctls, ",") +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go b/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go new file mode 100644 index 000000000..1f968e861 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/install/install.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/extensions" + "k8s.io/client-go/pkg/apis/extensions/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: extensions.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/extensions", + RootScopedKinds: sets.NewString("PodSecurityPolicy", "ThirdPartyResource"), + AddInternalObjectsToScheme: extensions.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/register.go b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go new file mode 100644 index 000000000..5983636c2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/register.go @@ -0,0 +1,70 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensions + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "extensions" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &ReplicationControllerDummy{}, + &Scale{}, + &ThirdPartyResource{}, + &ThirdPartyResourceList{}, + &DaemonSetList{}, + &DaemonSet{}, + &ThirdPartyResourceData{}, + &ThirdPartyResourceDataList{}, + &Ingress{}, + &IngressList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/types.go b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go new file mode 100644 index 000000000..945c5fa2b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/types.go @@ -0,0 +1,1124 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental +types in kubernetes. These API objects are experimental, meaning that the +APIs may be broken at any time by the kubernetes team. + +DISCLAIMER: The implementation of the experimental API group itself is +a temporary one meant as a stopgap solution until kubernetes has proper +support for multiple API groups. The transition may require changes +beyond registration differences. In other words, experimental API group +support is experimental. +*/ + +package extensions + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api" +) + +const ( + // SysctlsPodSecurityPolicyAnnotationKey represents the key of a whitelist of + // allowed safe and unsafe sysctls in a pod spec. It's a comma-separated list of plain sysctl + // names or sysctl patterns (which end in *). The string "*" matches all sysctls. + SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" +) + +// describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector +} + +// +genclient=true +// +noMethods=true + +// represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string + // Custom Metric value (average). + TargetValue resource.Quantity +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string + // Custom Metric value (average). + CurrentValue resource.Quantity +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus +} + +// +genclient=true +// +nonNamespaced=true + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +type ThirdPartyResource struct { + metav1.TypeMeta + + // Standard object metadata + // +optional + metav1.ObjectMeta + + // Description is the description of this object. + // +optional + Description string + + // Versions are versions for this third party object + Versions []APIVersion +} + +type ThirdPartyResourceList struct { + metav1.TypeMeta + + // Standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscalers. + Items []ThirdPartyResource +} + +// An APIVersion represents a single concrete version of an object model. +// TODO: we should consider merge this struct with GroupVersion in metav1.go +type APIVersion struct { + // Name of this version (e.g. 'v1'). + Name string +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +type ThirdPartyResourceData struct { + metav1.TypeMeta + // Standard object metadata. + // +optional + metav1.ObjectMeta + + // Data is the raw JSON data for this data. + // +optional + Data []byte +} + +// +genclient=true + +type Deployment struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus +} + +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas int32 + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector + + // Template describes the pods that will be created. + Template api.PodTemplateSpec + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + RevisionHistoryLimit *int32 + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + ProgressDeadlineSeconds *int32 +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta + // Required: This must match the Name of a deployment. + Name string + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string + // The config of this deployment rollback. + RollbackTo RollbackConfig +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable intstr.IntOrString + + // The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of original pods. + // +optional + MaxSurge intstr.IntOrString +} + +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastUpdateTime metav1.Time + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +type DeploymentList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of deployments. + Items []Deployment +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + CurrentNumberScheduled int32 + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + NumberMisscheduled int32 + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + DesiredNumberScheduled int32 + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 +} + +// +genclient=true + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus +} + +const ( + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // A list of daemon sets. + Items []DaemonSet +} + +type ThirdPartyResourceDataList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + // Items is a list of third party objects + Items []ThirdPartyResourceData +} + +// +genclient=true + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status IngressStatus +} + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of Ingress. + Items []Ingress +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer api.LoadBalancerStatus +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http://<host>/<path>?<searchpart> -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString +} + +// +genclient=true + +// ReplicaSet represents the configuration of a replica set. +type ReplicaSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this ReplicaSet. + // +optional + Spec ReplicaSetSpec + + // Status is the current status of this ReplicaSet. This data may be + // out of date by some window of time. + // +optional + Status ReplicaSetStatus +} + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicaSet +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +// As the internal representation of a ReplicaSet, it must have +// a Template set. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the replica count. + // Must match in order to be controlled. + // If empty, defaulted to labels on pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // +optional + Template api.PodTemplateSpec +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient=true +// +nonNamespaced=true + +// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec +} + +// PodSecurityPolicySpec defines the policy enforced. +type PodSecurityPolicySpec struct { + // Privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + DefaultAddCapabilities []api.Capability + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []api.Capability + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + AllowedCapabilities []api.Capability + // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + Volumes []FSType + // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool + // HostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange + // HostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool + // HostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool + // SELinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions + // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // Min is the start of the range, inclusive. + Min int + // Max is the end of the range, inclusive. + Max int +} + +// FSType gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + All FSType = "*" +) + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // Rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // +optional + SELinuxOptions *api.SELinuxOptions +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security. +type SELinuxStrategy string + +const ( + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy + // Ranges are the allowed ranges of uids that may be used. + // +optional + Ranges []IDRange +} + +// IDRange provides a min/max of an allowed range of IDs. +type IDRange struct { + // Min is the start of the range, inclusive. + Min int64 + // Max is the end of the range, inclusive. + Max int64 +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// SecurityContext. +type RunAsUserStrategy string + +const ( + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + Ranges []IDRange +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + Ranges []IDRange +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodSecurityPolicy +} + +// +genclient=true + +type NetworkPolicy struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector metav1.LabelSelector + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + // +optional + Ingress []NetworkPolicyIngressRule +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + Ports []NetworkPolicyPort + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + From []NetworkPolicyPeer +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *api.Protocol + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + Port *intstr.IntOrString +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + // +optional + PodSelector *metav1.LabelSelector + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omitted, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + // +optional + NamespaceSelector *metav1.LabelSelector +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []NetworkPolicy +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go new file mode 100644 index 000000000..d53dbc47a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/conversion.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + v1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/pkg/apis/extensions" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + ) + if err != nil { + return err + } + + // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. + for _, k := range []string{"DaemonSet", "Deployment", "Ingress"} { + kind := k // don't close over range variables + err = scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) + } + }, + ) + if err != nil { + return err + } + } + + return nil +} + +func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = int32(in.Replicas) + + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + return nil +} + +func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + return nil +} + +func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + out.RevisionHistoryLimit = new(int32) + *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) + } + out.MinReadySeconds = int32(in.MinReadySeconds) + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(RollbackConfig) + out.RollbackTo.Revision = int64(in.RollbackTo.Revision) + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.RevisionHistoryLimit = in.RevisionHistoryLimit + out.MinReadySeconds = in.MinReadySeconds + out.Paused = in.Paused + if in.RollbackTo != nil { + out.RollbackTo = new(extensions.RollbackConfig) + out.RollbackTo.Revision = in.RollbackTo.Revision + } else { + out.RollbackTo = nil + } + if in.ProgressDeadlineSeconds != nil { + out.ProgressDeadlineSeconds = new(int32) + *out.ProgressDeadlineSeconds = *in.ProgressDeadlineSeconds + } + return nil +} + +func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + out.RollingUpdate = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + if out.MaxSurge == nil { + out.MaxSurge = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { + if out.MaxUnavailable == nil { + out.MaxUnavailable = &intstr.IntOrString{} + } + if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = int32(in.Replicas) + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go new file mode 100644 index 000000000..298c568db --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/defaults.go @@ -0,0 +1,138 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api/v1" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_DaemonSet, + SetDefaults_Deployment, + SetDefaults_ReplicaSet, + SetDefaults_NetworkPolicy, + ) +} + +func SetDefaults_DaemonSet(obj *DaemonSet) { + labels := obj.Spec.Template.Labels + + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + updateStrategy := &obj.Spec.UpdateStrategy + if updateStrategy.Type == "" { + updateStrategy.Type = OnDeleteDaemonSetStrategyType + } + if updateStrategy.Type == RollingUpdateDaemonSetStrategyType { + if updateStrategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDaemonSet{} + updateStrategy.RollingUpdate = &rollingUpdate + } + if updateStrategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + updateStrategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + } +} + +func SetDefaults_Deployment(obj *Deployment) { + // Default labels and selector to labels from pod template spec. + labels := obj.Spec.Template.Labels + + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + // Set DeploymentSpec.Replicas to 1 if it is not set. + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } + strategy := &obj.Spec.Strategy + // Set default DeploymentStrategyType as RollingUpdate. + if strategy.Type == "" { + strategy.Type = RollingUpdateDeploymentStrategyType + } + if strategy.Type == RollingUpdateDeploymentStrategyType || strategy.RollingUpdate != nil { + if strategy.RollingUpdate == nil { + rollingUpdate := RollingUpdateDeployment{} + strategy.RollingUpdate = &rollingUpdate + } + if strategy.RollingUpdate.MaxUnavailable == nil { + // Set default MaxUnavailable as 1 by default. + maxUnavailable := intstr.FromInt(1) + strategy.RollingUpdate.MaxUnavailable = &maxUnavailable + } + if strategy.RollingUpdate.MaxSurge == nil { + // Set default MaxSurge as 1 by default. + maxSurge := intstr.FromInt(1) + strategy.RollingUpdate.MaxSurge = &maxSurge + } + } +} + +func SetDefaults_ReplicaSet(obj *ReplicaSet) { + labels := obj.Spec.Template.Labels + + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if obj.Spec.Selector == nil { + obj.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: labels, + } + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} + +func SetDefaults_NetworkPolicy(obj *NetworkPolicy) { + // Default any undefined Protocol fields to TCP. + for _, i := range obj.Spec.Ingress { + // TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it. + for _, p := range i.Ports { + if p.Protocol == nil { + proto := v1.ProtocolTCP + p.Protocol = &proto + } + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go new file mode 100644 index 000000000..a397b30e9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go new file mode 100644 index 000000000..1c72f7311 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.pb.go @@ -0,0 +1,11993 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto + + It has these top-level messages: + APIVersion + CustomMetricCurrentStatus + CustomMetricCurrentStatusList + CustomMetricTarget + CustomMetricTargetList + DaemonSet + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + FSGroupStrategyOptions + HTTPIngressPath + HTTPIngressRuleValue + HostPortRange + IDRange + Ingress + IngressBackend + IngressList + IngressRule + IngressRuleValue + IngressSpec + IngressStatus + IngressTLS + NetworkPolicy + NetworkPolicyIngressRule + NetworkPolicyList + NetworkPolicyPeer + NetworkPolicyPort + NetworkPolicySpec + PodSecurityPolicy + PodSecurityPolicyList + PodSecurityPolicySpec + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + ReplicationControllerDummy + RollbackConfig + RollingUpdateDaemonSet + RollingUpdateDeployment + RunAsUserStrategyOptions + SELinuxStrategyOptions + Scale + ScaleSpec + ScaleStatus + SupplementalGroupsStrategyOptions + ThirdPartyResource + ThirdPartyResourceData + ThirdPartyResourceDataList + ThirdPartyResourceList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *APIVersion) Reset() { *m = APIVersion{} } +func (*APIVersion) ProtoMessage() {} +func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } +func (*CustomMetricCurrentStatus) ProtoMessage() {} +func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } +func (*CustomMetricCurrentStatusList) ProtoMessage() {} +func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } +func (*CustomMetricTarget) ProtoMessage() {} +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } +func (*CustomMetricTargetList) ProtoMessage() {} +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{31} +} + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } + +func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } +func (*ReplicationControllerDummy) ProtoMessage() {} +func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{44} +} + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{47} +} + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} +} + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{53} +} + +func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } +func (*ThirdPartyResource) ProtoMessage() {} +func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } + +func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } +func (*ThirdPartyResourceData) ProtoMessage() {} +func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } + +func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } +func (*ThirdPartyResourceDataList) ProtoMessage() {} +func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{56} +} + +func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } +func (*ThirdPartyResourceList) ProtoMessage() {} +func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } + +func init() { + proto.RegisterType((*APIVersion)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.APIVersion") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.CustomMetricTargetList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentRollback") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IDRange") + proto.RegisterType((*Ingress)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.IngressTLS") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ReplicationControllerDummy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ScaleStatus") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") + proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResource") + proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceData") + proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceDataList") + proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.client-go.pkg.apis.extensions.v1beta1.ThirdPartyResourceList") +} +func (m *APIVersion) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *APIVersion) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) + n1, err := m.CurrentValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *CustomMetricCurrentStatusList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CustomMetricTarget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *CustomMetricTargetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *DaemonSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DaemonSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdateStrategy.Size())) + n9, err := m.UpdateStrategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.TemplateGeneration)) + return i, nil +} + +func (m *DaemonSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentNumberScheduled)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberMisscheduled)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberReady)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedNumberScheduled)) + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberAvailable)) + data[i] = 0x40 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NumberUnavailable)) + return i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n10, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *Deployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Deployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n11, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n13, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n13 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastUpdateTime.Size())) + n14, err := m.LastUpdateTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n14 + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n15, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n16, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n16 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentRollback) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + if len(m.UpdatedAnnotations) > 0 { + for k := range m.UpdatedAnnotations { + data[i] = 0x12 + i++ + v := m.UpdatedAnnotations[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n17, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n18, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n18 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n19, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n19 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) + n20, err := m.Strategy.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n20 + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) + } + data[i] = 0x38 + i++ + if m.Paused { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if m.RollbackTo != nil { + data[i] = 0x42 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) + n21, err := m.RollbackTo.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.ProgressDeadlineSeconds != nil { + data[i] = 0x48 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x38 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + if m.RollingUpdate != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPIngressPath) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Path))) + i += copy(data[i:], m.Path) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n23, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, msg := range m.Paths { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HostPortRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *HostPortRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *IDRange) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IDRange) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Min)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Max)) + return i, nil +} + +func (m *Ingress) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Ingress) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n24, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n24 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n25, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n25 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n26, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n26 + return i, nil +} + +func (m *IngressBackend) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressBackend) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) + i += copy(data[i:], m.ServiceName) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) + n27, err := m.ServicePort.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil +} + +func (m *IngressList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n28, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n28 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Host))) + i += copy(data[i:], m.Host) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) + n29, err := m.IngressRuleValue.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n29 + return i, nil +} + +func (m *IngressRuleValue) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HTTP != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) + n30, err := m.HTTP.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *IngressSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Backend != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) + n31, err := m.Backend.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.TLS) > 0 { + for _, msg := range m.TLS { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *IngressStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) + n32, err := m.LoadBalancer.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n32 + return i, nil +} + +func (m *IngressTLS) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *IngressTLS) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) + i += copy(data[i:], m.SecretName) + return i, nil +} + +func (m *NetworkPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n33 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n34 + return i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.From) > 0 { + for _, msg := range m.From { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n35, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n35 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PodSelector != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n36, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if m.NamespaceSelector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) + n37, err := m.NamespaceSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) + i += copy(data[i:], *m.Protocol) + } + if m.Port != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) + n38, err := m.Port.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) + n39, err := m.PodSelector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n40, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n40 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n41, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n41 + return i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n42, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n42 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + if m.Privileged { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + data[i] = 0x30 + i++ + if m.HostNetwork { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + data[i] = 0x3a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x40 + i++ + if m.HostPID { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x48 + i++ + if m.HostIPC { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + data[i] = 0x52 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) + n43, err := m.SELinux.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n43 + data[i] = 0x5a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) + n44, err := m.RunAsUser.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n44 + data[i] = 0x62 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) + n45, err := m.SupplementalGroups.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n45 + data[i] = 0x6a + i++ + i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) + n46, err := m.FSGroup.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n46 + data[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { + data[i] = 1 + } else { + data[i] = 0 + } + i++ + return i, nil +} + +func (m *ReplicaSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n47, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n47 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n48, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n48 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n49, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n49 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Type))) + i += copy(data[i:], m.Type) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Status))) + i += copy(data[i:], m.Status) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) + n50, err := m.LastTransitionTime.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n50 + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) + i += copy(data[i:], m.Reason) + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Message))) + i += copy(data[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n51, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n51 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n52, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n52 + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) + n53, err := m.Template.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n53 + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + data[i] = 0x10 + i++ + i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ReadyReplicas)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + data[i] = 0x32 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *RollbackConfig) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Revision)) + return i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n54, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n54 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) + n55, err := m.MaxUnavailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n55 + } + if m.MaxSurge != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) + n56, err := m.MaxSurge.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n56 + } + return i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if m.SELinuxOptions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) + n57, err := m.SELinuxOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n57 + } + return i, nil +} + +func (m *Scale) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Scale) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n58, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n58 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n59, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n59 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n60, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n60 + return i, nil +} + +func (m *ScaleSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + return i, nil +} + +func (m *ScaleStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k := range m.Selector { + data[i] = 0x12 + i++ + v := m.Selector[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) + i += copy(data[i:], m.TargetSelector) + return i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) + i += copy(data[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResource) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n61, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n61 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Description))) + i += copy(data[i:], m.Description) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n62, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n62 + if m.Data != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Data))) + i += copy(data[i:], m.Data) + } + return i, nil +} + +func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n63, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n63 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n64, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n64 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *APIVersion) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatusList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CustomMetricTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPIngressPath) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPortRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *Ingress) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressStatus) Size() (n int) { + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerDummy) Size() (n int) { + var l int + _ = l + return n +} + +func (m *RollbackConfig) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResource) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResourceData) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ThirdPartyResourceDataList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ThirdPartyResourceList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIVersion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIVersion{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricCurrentStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatusList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTargetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricTargetList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_kubernetes_pkg_api_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_kubernetes_pkg_api_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerDummy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerDummy{`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_kubernetes_pkg_api_v1.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResource{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResourceData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResourceData{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResourceDataList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResourceDataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ThirdPartyResourceList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ThirdPartyResourceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIVersion) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricCurrentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTarget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTargetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricTarget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) + } + m.TemplateGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.TemplateGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberReady |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberAvailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.NumberUnavailable |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressPath) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Min |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Min |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Max |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_kubernetes_pkg_api_v1.Protocol(data[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsUserStrategy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Selector == nil { + m.Selector = make(map[string]string) + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResource) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, APIVersion{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceData) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceDataList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ThirdPartyResourceData{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ThirdPartyResourceList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ThirdPartyResource{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 3369 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5b, 0xdb, 0x6f, 0x1b, 0xc7, + 0xd5, 0xf7, 0x8a, 0xa4, 0x45, 0x1d, 0x59, 0x92, 0x35, 0x72, 0x64, 0x46, 0x49, 0x44, 0x67, 0x3f, + 0x7c, 0x89, 0xf3, 0x7d, 0x09, 0xf5, 0xc5, 0xf9, 0x9c, 0x26, 0x4e, 0xe2, 0x44, 0x94, 0x7c, 0x51, + 0x21, 0xc9, 0xcc, 0x90, 0x32, 0x1a, 0xe7, 0xd6, 0x15, 0x39, 0xa2, 0xd6, 0xde, 0x5b, 0x76, 0x67, + 0x15, 0x11, 0x41, 0xdb, 0x00, 0x45, 0xf3, 0x58, 0xb4, 0x2f, 0x45, 0x0a, 0xa4, 0x8f, 0x7d, 0xe8, + 0x4b, 0x9b, 0x3c, 0xb4, 0x69, 0xff, 0x82, 0xfa, 0xa1, 0x28, 0x52, 0xa0, 0x05, 0x8a, 0x22, 0x15, + 0x6a, 0x05, 0xcd, 0x3f, 0xd0, 0xe6, 0xc5, 0x4f, 0xc5, 0xcc, 0xce, 0xde, 0x77, 0x65, 0x93, 0xb2, + 0x89, 0x02, 0x7d, 0xe3, 0xce, 0x9c, 0xf3, 0x3b, 0x97, 0x39, 0x73, 0xe6, 0xcc, 0x85, 0xf0, 0xd2, + 0x8d, 0xe7, 0x9c, 0x9a, 0x6a, 0x2e, 0xdc, 0x70, 0x37, 0x89, 0x6d, 0x10, 0x4a, 0x9c, 0x05, 0xeb, + 0x46, 0x77, 0x41, 0xb1, 0x54, 0x67, 0x81, 0xec, 0x52, 0x62, 0x38, 0xaa, 0x69, 0x38, 0x0b, 0x3b, + 0x4f, 0x6f, 0x12, 0xaa, 0x3c, 0xbd, 0xd0, 0x25, 0x06, 0xb1, 0x15, 0x4a, 0x3a, 0x35, 0xcb, 0x36, + 0xa9, 0x89, 0x9e, 0xf2, 0xd8, 0x6b, 0x21, 0x7b, 0xcd, 0xba, 0xd1, 0xad, 0x31, 0xf6, 0x5a, 0xc8, + 0x5e, 0x13, 0xec, 0x73, 0x4f, 0x75, 0x55, 0xba, 0xed, 0x6e, 0xd6, 0xda, 0xa6, 0xbe, 0xd0, 0x35, + 0xbb, 0xe6, 0x02, 0x47, 0xd9, 0x74, 0xb7, 0xf8, 0x17, 0xff, 0xe0, 0xbf, 0x3c, 0xf4, 0xb9, 0xff, + 0x17, 0xca, 0x29, 0x96, 0xaa, 0x2b, 0xed, 0x6d, 0xd5, 0x20, 0x76, 0xcf, 0x57, 0x6f, 0xc1, 0x26, + 0x8e, 0xe9, 0xda, 0x6d, 0x92, 0xd4, 0xe9, 0x40, 0x2e, 0x67, 0x41, 0x27, 0x54, 0x59, 0xd8, 0x49, + 0x59, 0x32, 0xb7, 0x90, 0xc7, 0x65, 0xbb, 0x06, 0x55, 0xf5, 0xb4, 0x98, 0x67, 0xef, 0xc4, 0xe0, + 0xb4, 0xb7, 0x89, 0xae, 0xa4, 0xf8, 0x9e, 0xc9, 0xe3, 0x73, 0xa9, 0xaa, 0x2d, 0xa8, 0x06, 0x75, + 0xa8, 0x7d, 0x90, 0x4d, 0x0e, 0xb1, 0x77, 0x88, 0x1d, 0x1d, 0x25, 0x45, 0xb7, 0x34, 0x92, 0x65, + 0xd3, 0x93, 0xb9, 0x83, 0x9b, 0x41, 0x2d, 0xd7, 0x00, 0x16, 0x1b, 0x2b, 0x57, 0x89, 0xcd, 0xc6, + 0x0c, 0x9d, 0x82, 0xa2, 0xa1, 0xe8, 0xa4, 0x22, 0x9d, 0x92, 0x4e, 0x8f, 0xd5, 0x8f, 0xdd, 0xdc, + 0xab, 0x1e, 0xd9, 0xdf, 0xab, 0x16, 0xd7, 0x15, 0x9d, 0x60, 0xde, 0x23, 0xff, 0x58, 0x82, 0x07, + 0x97, 0x5c, 0x87, 0x9a, 0xfa, 0x1a, 0xa1, 0xb6, 0xda, 0x5e, 0x72, 0x6d, 0x9b, 0x18, 0xb4, 0x49, + 0x15, 0xea, 0x3a, 0x77, 0xe6, 0x47, 0xd7, 0xa0, 0xb4, 0xa3, 0x68, 0x2e, 0xa9, 0x8c, 0x9c, 0x92, + 0x4e, 0x8f, 0x9f, 0xa9, 0xd5, 0x44, 0x2c, 0x45, 0x1d, 0xe3, 0x47, 0x53, 0xcd, 0x1f, 0xed, 0xda, + 0xab, 0xae, 0x62, 0x50, 0x95, 0xf6, 0xea, 0x27, 0x04, 0xe4, 0x31, 0x21, 0xf7, 0x2a, 0xc3, 0xc2, + 0x1e, 0xa4, 0xfc, 0x7d, 0x09, 0x1e, 0xc9, 0xd5, 0x6d, 0x55, 0x75, 0x28, 0xd2, 0xa1, 0xa4, 0x52, + 0xa2, 0x3b, 0x15, 0xe9, 0x54, 0xe1, 0xf4, 0xf8, 0x99, 0xcb, 0xb5, 0xbe, 0x22, 0xb9, 0x96, 0x0b, + 0x5e, 0x9f, 0x10, 0x7a, 0x95, 0x56, 0x18, 0x3c, 0xf6, 0xa4, 0xc8, 0x3f, 0x94, 0x00, 0x45, 0x79, + 0x5a, 0x8a, 0xdd, 0x25, 0xf4, 0x2e, 0xbc, 0xf4, 0xda, 0xe1, 0xbc, 0x34, 0x23, 0x20, 0xc7, 0x3d, + 0x81, 0x31, 0x27, 0xbd, 0x2f, 0xc1, 0x6c, 0x5a, 0x27, 0xee, 0x9d, 0xad, 0xb8, 0x77, 0x16, 0x0f, + 0xe1, 0x1d, 0x0f, 0x35, 0xc7, 0x2d, 0xbf, 0x1c, 0x81, 0xb1, 0x65, 0x85, 0xe8, 0xa6, 0xd1, 0x24, + 0x14, 0x7d, 0x13, 0xca, 0x6c, 0x7a, 0x76, 0x14, 0xaa, 0x70, 0x8f, 0x8c, 0x9f, 0xf9, 0xbf, 0x83, + 0xcc, 0x75, 0x6a, 0x8c, 0xba, 0xb6, 0xf3, 0x74, 0xed, 0xca, 0xe6, 0x75, 0xd2, 0xa6, 0x6b, 0x84, + 0x2a, 0x75, 0x24, 0xe4, 0x40, 0xd8, 0x86, 0x03, 0x54, 0xf4, 0x16, 0x14, 0x1d, 0x8b, 0xb4, 0x85, + 0x33, 0x5f, 0xec, 0xd3, 0xac, 0x40, 0xd3, 0xa6, 0x45, 0xda, 0xe1, 0x68, 0xb1, 0x2f, 0xcc, 0x71, + 0xd1, 0x16, 0x1c, 0x75, 0x78, 0x18, 0x54, 0x0a, 0x5c, 0xc2, 0xf9, 0x81, 0x25, 0x78, 0xc1, 0x34, + 0x29, 0x64, 0x1c, 0xf5, 0xbe, 0xb1, 0x40, 0x97, 0x7f, 0x27, 0xc1, 0x44, 0x40, 0xcb, 0x47, 0xec, + 0x8d, 0x94, 0xef, 0x6a, 0x77, 0xe7, 0x3b, 0xc6, 0xcd, 0x3d, 0x77, 0x5c, 0xc8, 0x2a, 0xfb, 0x2d, + 0x11, 0xbf, 0xbd, 0xe9, 0xc7, 0xc3, 0x08, 0x8f, 0x87, 0xe7, 0x06, 0x35, 0x2b, 0x27, 0x0c, 0xbe, + 0x28, 0x44, 0xcc, 0x61, 0xee, 0x44, 0x6f, 0x42, 0xd9, 0x21, 0x1a, 0x69, 0x53, 0xd3, 0x16, 0xe6, + 0x3c, 0x73, 0x97, 0xe6, 0x28, 0x9b, 0x44, 0x6b, 0x0a, 0xd6, 0xfa, 0x31, 0x66, 0x8f, 0xff, 0x85, + 0x03, 0x48, 0xf4, 0x3a, 0x94, 0x29, 0xd1, 0x2d, 0x4d, 0xa1, 0xfe, 0xc4, 0x7a, 0x2a, 0xdf, 0x24, + 0x06, 0xdb, 0x30, 0x3b, 0x2d, 0xc1, 0xc0, 0x07, 0x3f, 0x70, 0x96, 0xdf, 0x8a, 0x03, 0x40, 0xf4, + 0x81, 0x04, 0x93, 0xae, 0xd5, 0x61, 0xa4, 0x94, 0x25, 0xd8, 0x6e, 0x4f, 0x44, 0xc3, 0xc5, 0x41, + 0xdd, 0xb6, 0x11, 0x43, 0xab, 0xcf, 0x0a, 0xe1, 0x93, 0xf1, 0x76, 0x9c, 0x90, 0x8a, 0x16, 0x61, + 0x4a, 0x57, 0x0d, 0x4c, 0x94, 0x4e, 0xaf, 0x49, 0xda, 0xa6, 0xd1, 0x71, 0x2a, 0xc5, 0x53, 0xd2, + 0xe9, 0x52, 0xfd, 0xa4, 0x00, 0x98, 0x5a, 0x8b, 0x77, 0xe3, 0x24, 0x3d, 0xfa, 0x3a, 0x20, 0xdf, + 0xae, 0x4b, 0xde, 0x7a, 0xa1, 0x9a, 0x46, 0xa5, 0x74, 0x4a, 0x3a, 0x5d, 0xa8, 0xcf, 0x09, 0x14, + 0xd4, 0x4a, 0x51, 0xe0, 0x0c, 0x2e, 0xf9, 0x9f, 0x45, 0x98, 0x4a, 0x04, 0x38, 0xba, 0x0a, 0xb3, + 0x6d, 0x2f, 0x7d, 0xae, 0xbb, 0xfa, 0x26, 0xb1, 0x9b, 0xed, 0x6d, 0xd2, 0x71, 0x35, 0xd2, 0xe1, + 0xa3, 0x5e, 0xaa, 0xcf, 0x0b, 0x19, 0xb3, 0x4b, 0x99, 0x54, 0x38, 0x87, 0x9b, 0xe9, 0x6d, 0xf0, + 0xa6, 0x35, 0xd5, 0x71, 0x02, 0xcc, 0x11, 0x8e, 0x19, 0xe8, 0xbd, 0x9e, 0xa2, 0xc0, 0x19, 0x5c, + 0x4c, 0xc7, 0x0e, 0x71, 0x54, 0x9b, 0x74, 0x92, 0x3a, 0x16, 0xe2, 0x3a, 0x2e, 0x67, 0x52, 0xe1, + 0x1c, 0x6e, 0x74, 0x16, 0xc6, 0x3d, 0x69, 0xdc, 0xe3, 0x62, 0x68, 0x82, 0x84, 0xbd, 0x1e, 0x76, + 0xe1, 0x28, 0x1d, 0x33, 0xcd, 0xdc, 0xe4, 0x55, 0x40, 0x27, 0x7f, 0x48, 0xae, 0xa4, 0x28, 0x70, + 0x06, 0x17, 0x33, 0xcd, 0x8b, 0x99, 0x94, 0x69, 0x47, 0xe3, 0xa6, 0x6d, 0x64, 0x52, 0xe1, 0x1c, + 0x6e, 0x16, 0x79, 0x9e, 0xca, 0x8b, 0x3b, 0x8a, 0xaa, 0x29, 0x9b, 0x1a, 0xa9, 0x8c, 0xc6, 0x23, + 0x6f, 0x3d, 0xde, 0x8d, 0x93, 0xf4, 0xe8, 0x12, 0x4c, 0x7b, 0x4d, 0x1b, 0x86, 0x12, 0x80, 0x94, + 0x39, 0xc8, 0x83, 0x02, 0x64, 0x7a, 0x3d, 0x49, 0x80, 0xd3, 0x3c, 0xf2, 0x5f, 0x24, 0x38, 0x99, + 0x33, 0x93, 0xd0, 0xcb, 0x50, 0xa4, 0x3d, 0xcb, 0x5f, 0x7f, 0xff, 0xd7, 0xcf, 0xe8, 0xad, 0x9e, + 0x45, 0x6e, 0xef, 0x55, 0x1f, 0xca, 0x61, 0x63, 0xdd, 0x98, 0x33, 0xa2, 0x6f, 0xc3, 0x84, 0x6d, + 0x6a, 0x9a, 0x6a, 0x74, 0x3d, 0x12, 0x91, 0x4d, 0x2e, 0xf4, 0x39, 0xd3, 0x71, 0x14, 0x23, 0xcc, + 0x96, 0xd3, 0xfb, 0x7b, 0xd5, 0x89, 0x58, 0x1f, 0x8e, 0x8b, 0x93, 0x7f, 0x3d, 0x02, 0xb0, 0x4c, + 0x2c, 0xcd, 0xec, 0xe9, 0xc4, 0x18, 0xc6, 0x0a, 0xfa, 0x76, 0x6c, 0x05, 0x7d, 0xa9, 0xdf, 0x8c, + 0x16, 0xa8, 0x9a, 0xbb, 0x84, 0x76, 0x13, 0x4b, 0xe8, 0xcb, 0x83, 0x8b, 0x38, 0x78, 0x0d, 0xbd, + 0x55, 0x80, 0x99, 0x90, 0x78, 0xc9, 0x34, 0x3a, 0x2a, 0x9f, 0x13, 0x2f, 0xc4, 0x62, 0xe2, 0xf1, + 0x44, 0x4c, 0x9c, 0xcc, 0x60, 0x89, 0xc4, 0xc3, 0xd5, 0x40, 0xfb, 0x11, 0xce, 0x7e, 0x3e, 0x2e, + 0xfc, 0xf6, 0x5e, 0xf5, 0xc0, 0xa2, 0xbc, 0x16, 0x60, 0xc6, 0x95, 0x45, 0x8f, 0xc1, 0x51, 0x9b, + 0x28, 0x8e, 0x69, 0xf0, 0x34, 0x31, 0x16, 0x1a, 0x85, 0x79, 0x2b, 0x16, 0xbd, 0xe8, 0x09, 0x18, + 0xd5, 0x89, 0xe3, 0x28, 0x5d, 0xc2, 0x33, 0xc2, 0x58, 0x7d, 0x4a, 0x10, 0x8e, 0xae, 0x79, 0xcd, + 0xd8, 0xef, 0x47, 0xd7, 0x61, 0x52, 0x53, 0x1c, 0x11, 0xda, 0x2d, 0x55, 0x27, 0x7c, 0xce, 0x8f, + 0x9f, 0xf9, 0x9f, 0xbb, 0x8b, 0x18, 0xc6, 0x11, 0xae, 0x44, 0xab, 0x31, 0x24, 0x9c, 0x40, 0x46, + 0x3b, 0x80, 0x58, 0x4b, 0xcb, 0x56, 0x0c, 0xc7, 0x73, 0x19, 0x93, 0x37, 0xda, 0xb7, 0xbc, 0x20, + 0xbf, 0xad, 0xa6, 0xd0, 0x70, 0x86, 0x04, 0xf9, 0xf7, 0x12, 0x4c, 0x86, 0x03, 0x36, 0x84, 0x42, + 0xe9, 0xad, 0x78, 0xa1, 0xf4, 0xfc, 0xc0, 0xc1, 0x9b, 0x53, 0x29, 0x7d, 0x58, 0x00, 0x14, 0x12, + 0xb1, 0xd4, 0xb0, 0xa9, 0xb4, 0x6f, 0xdc, 0xc5, 0x3e, 0xe2, 0xa7, 0x12, 0x20, 0x91, 0xac, 0x17, + 0x0d, 0xc3, 0xa4, 0x3c, 0xff, 0xfb, 0x6a, 0xbe, 0x36, 0xb0, 0x9a, 0xbe, 0x06, 0xb5, 0x8d, 0x14, + 0xf6, 0x05, 0x83, 0xda, 0xbd, 0x70, 0xc4, 0xd2, 0x04, 0x38, 0x43, 0x21, 0xf4, 0x0e, 0x80, 0x2d, + 0x30, 0x5b, 0xa6, 0x48, 0x01, 0x2f, 0x0d, 0x90, 0x4d, 0x19, 0xc0, 0x92, 0x69, 0x6c, 0xa9, 0xdd, + 0x30, 0xa1, 0xe1, 0x00, 0x18, 0x47, 0x84, 0xcc, 0x5d, 0x80, 0x93, 0x39, 0xda, 0xa3, 0xe3, 0x50, + 0xb8, 0x41, 0x7a, 0x9e, 0x5b, 0x31, 0xfb, 0x89, 0x4e, 0x44, 0xf7, 0x63, 0x63, 0x62, 0x2b, 0x75, + 0x6e, 0xe4, 0x39, 0x49, 0xfe, 0xb2, 0x14, 0x8d, 0x35, 0x5e, 0xc5, 0x9e, 0x86, 0xb2, 0x4d, 0x2c, + 0x4d, 0x6d, 0x2b, 0x8e, 0xa8, 0x67, 0x78, 0x41, 0x8a, 0x45, 0x1b, 0x0e, 0x7a, 0x63, 0xf5, 0xee, + 0xc8, 0xfd, 0xad, 0x77, 0x0b, 0xf7, 0xba, 0xde, 0x35, 0xa1, 0xec, 0xf8, 0x85, 0x6e, 0x91, 0x83, + 0x2f, 0x1e, 0x22, 0x67, 0x8b, 0x1a, 0x37, 0x10, 0x18, 0x54, 0xb7, 0x81, 0x90, 0xac, 0xba, 0xb6, + 0xd4, 0x67, 0x5d, 0xbb, 0x0a, 0x27, 0x6c, 0xb2, 0xa3, 0x32, 0x35, 0x2e, 0xab, 0x0e, 0x35, 0xed, + 0xde, 0xaa, 0xaa, 0xab, 0x54, 0x94, 0x3d, 0x95, 0xfd, 0xbd, 0xea, 0x09, 0x9c, 0xd1, 0x8f, 0x33, + 0xb9, 0x58, 0x76, 0xb6, 0x14, 0xd7, 0x21, 0x1d, 0x9e, 0xd2, 0xca, 0x61, 0x76, 0x6e, 0xf0, 0x56, + 0x2c, 0x7a, 0x91, 0x1e, 0x0b, 0xee, 0xf2, 0xbd, 0x08, 0xee, 0xc9, 0xfc, 0xc0, 0x46, 0x1b, 0x70, + 0xd2, 0xb2, 0xcd, 0xae, 0x4d, 0x1c, 0x67, 0x99, 0x28, 0x1d, 0x4d, 0x35, 0x88, 0xef, 0xaf, 0x31, + 0x6e, 0xe7, 0x43, 0xfb, 0x7b, 0xd5, 0x93, 0x8d, 0x6c, 0x12, 0x9c, 0xc7, 0x2b, 0x7f, 0x54, 0x84, + 0xe3, 0xc9, 0x55, 0x36, 0xa7, 0x2a, 0x95, 0x06, 0xaa, 0x4a, 0x9f, 0x8c, 0x4c, 0x1b, 0xaf, 0x64, + 0x0f, 0xa2, 0x21, 0x63, 0xea, 0x2c, 0xc2, 0x94, 0xc8, 0x23, 0x7e, 0xa7, 0xa8, 0xcb, 0x83, 0x68, + 0xd8, 0x88, 0x77, 0xe3, 0x24, 0x3d, 0xab, 0x35, 0xc3, 0x12, 0xd2, 0x07, 0x29, 0xc6, 0x6b, 0xcd, + 0xc5, 0x24, 0x01, 0x4e, 0xf3, 0xa0, 0x35, 0x98, 0x71, 0x8d, 0x34, 0x94, 0x17, 0x9d, 0x0f, 0x09, + 0xa8, 0x99, 0x8d, 0x34, 0x09, 0xce, 0xe2, 0x43, 0x3b, 0x00, 0x6d, 0xbf, 0x20, 0x70, 0x2a, 0x47, + 0x79, 0xae, 0xae, 0x0f, 0x3c, 0xb7, 0x82, 0xda, 0x22, 0xcc, 0x88, 0x41, 0x93, 0x83, 0x23, 0x92, + 0xd0, 0x0b, 0x30, 0x61, 0xf3, 0x8d, 0x87, 0x6f, 0x80, 0x57, 0xbc, 0x3f, 0x20, 0xd8, 0x26, 0x70, + 0xb4, 0x13, 0xc7, 0x69, 0xe5, 0x3f, 0x48, 0xd1, 0x25, 0x2a, 0x28, 0xb5, 0xcf, 0xc5, 0xca, 0xaa, + 0xc7, 0x12, 0x65, 0xd5, 0x6c, 0x9a, 0x23, 0x52, 0x55, 0x7d, 0x27, 0xbb, 0xca, 0xbe, 0x78, 0xa8, + 0x2a, 0x3b, 0x5c, 0x6a, 0xef, 0x5c, 0x66, 0x7f, 0x22, 0xc1, 0xec, 0xc5, 0xe6, 0x25, 0xdb, 0x74, + 0x2d, 0x5f, 0xbd, 0x2b, 0x96, 0xe7, 0xab, 0xaf, 0x41, 0xd1, 0x76, 0x35, 0xdf, 0xae, 0xff, 0xf2, + 0xed, 0xc2, 0xae, 0xc6, 0xec, 0x9a, 0x49, 0x70, 0x79, 0x46, 0x31, 0x06, 0xf4, 0x16, 0x1c, 0xb5, + 0x15, 0xa3, 0x4b, 0xfc, 0x45, 0xf8, 0xd9, 0x3e, 0xad, 0x59, 0x59, 0xc6, 0x8c, 0x3d, 0x52, 0x0a, + 0x72, 0x34, 0x2c, 0x50, 0xe5, 0x9f, 0x48, 0x30, 0x75, 0xb9, 0xd5, 0x6a, 0xac, 0x18, 0x7c, 0x16, + 0x37, 0x14, 0xba, 0xcd, 0xea, 0x04, 0x4b, 0xa1, 0xdb, 0xc9, 0x3a, 0x81, 0xf5, 0x61, 0xde, 0x83, + 0xb6, 0x61, 0x94, 0x65, 0x0f, 0x62, 0x74, 0x06, 0x2c, 0xf1, 0x85, 0xb8, 0xba, 0x07, 0x12, 0xd6, + 0x9f, 0xa2, 0x01, 0xfb, 0xf0, 0xf2, 0x7b, 0x70, 0x22, 0xa2, 0x1e, 0xf3, 0x17, 0x3f, 0x9d, 0x44, + 0x6d, 0x28, 0x31, 0x4d, 0xfc, 0xb3, 0xc7, 0x7e, 0x8f, 0xd0, 0x12, 0x26, 0x87, 0x75, 0x14, 0xfb, + 0x72, 0xb0, 0x87, 0x2d, 0xaf, 0xc1, 0xc4, 0x65, 0xd3, 0xa1, 0x0d, 0xd3, 0xa6, 0xdc, 0x6d, 0xe8, + 0x11, 0x28, 0xe8, 0xaa, 0x21, 0x56, 0xe9, 0x71, 0xc1, 0x53, 0x60, 0xeb, 0x08, 0x6b, 0xe7, 0xdd, + 0xca, 0xae, 0xc8, 0x46, 0x61, 0xb7, 0xb2, 0x8b, 0x59, 0xbb, 0x7c, 0x09, 0x46, 0xc5, 0x70, 0x44, + 0x81, 0x0a, 0x07, 0x03, 0x15, 0x32, 0x80, 0x7e, 0x31, 0x02, 0xa3, 0x42, 0xfb, 0x21, 0x6c, 0xe6, + 0xde, 0x88, 0x6d, 0xe6, 0xce, 0x0d, 0x36, 0xd2, 0xb9, 0x3b, 0xb9, 0x4e, 0x62, 0x27, 0xf7, 0xe2, + 0x80, 0xf8, 0x07, 0x6f, 0xe3, 0x3e, 0x96, 0x60, 0x32, 0x1e, 0x73, 0xe8, 0x2c, 0x8c, 0xb3, 0x35, + 0x45, 0x6d, 0x93, 0xf5, 0xb0, 0x28, 0x0e, 0x0e, 0x56, 0x9a, 0x61, 0x17, 0x8e, 0xd2, 0xa1, 0x6e, + 0xc0, 0xc6, 0xc2, 0x42, 0x38, 0x25, 0xdf, 0xe5, 0x2e, 0x55, 0xb5, 0x9a, 0x77, 0x5f, 0x53, 0x5b, + 0x31, 0xe8, 0x15, 0xbb, 0x49, 0x6d, 0xd5, 0xe8, 0xa6, 0x04, 0xf1, 0x18, 0x8b, 0x22, 0xcb, 0x37, + 0x25, 0x18, 0x17, 0x2a, 0x0f, 0x61, 0x4b, 0xf2, 0x7a, 0x7c, 0x4b, 0xf2, 0xec, 0x80, 0xf3, 0x39, + 0x7b, 0x3f, 0xf2, 0x69, 0x68, 0x0a, 0x9b, 0xc1, 0x2c, 0xc1, 0x6c, 0x9b, 0x0e, 0x4d, 0x26, 0x18, + 0x36, 0xd7, 0x30, 0xef, 0x41, 0xdf, 0x93, 0xe0, 0xb8, 0x9a, 0x98, 0xf3, 0xc2, 0xd7, 0x2f, 0x0f, + 0xa6, 0x5a, 0x00, 0x53, 0xaf, 0x08, 0x79, 0xc7, 0x93, 0x3d, 0x38, 0x25, 0x52, 0x76, 0x21, 0x45, + 0x85, 0x14, 0x28, 0x6e, 0x53, 0x6a, 0x89, 0x41, 0x58, 0x1a, 0x3c, 0xf3, 0x84, 0x2a, 0x95, 0xb9, + 0xf9, 0xad, 0x56, 0x03, 0x73, 0x68, 0xf9, 0xe7, 0x23, 0x81, 0xc3, 0x9a, 0xde, 0x24, 0x09, 0xf2, + 0xad, 0x74, 0x2f, 0xf2, 0xed, 0x78, 0x56, 0xae, 0x45, 0xdf, 0x80, 0x02, 0xd5, 0x06, 0xdd, 0x94, + 0x0a, 0x09, 0xad, 0xd5, 0x66, 0x98, 0xb0, 0x5a, 0xab, 0x4d, 0xcc, 0x20, 0xd1, 0xdb, 0x50, 0x62, + 0xab, 0x19, 0x9b, 0xe3, 0x85, 0xc1, 0x73, 0x08, 0xf3, 0x57, 0x18, 0x61, 0xec, 0xcb, 0xc1, 0x1e, + 0xae, 0xfc, 0x1e, 0x4c, 0xc4, 0x12, 0x01, 0xba, 0x0e, 0xc7, 0x34, 0x53, 0xe9, 0xd4, 0x15, 0x4d, + 0x31, 0xda, 0xc4, 0x4e, 0xa6, 0xc6, 0xec, 0xfd, 0xcc, 0x6a, 0x84, 0x43, 0x24, 0x94, 0xe0, 0x02, + 0x31, 0xda, 0x87, 0x63, 0xd8, 0xb2, 0x02, 0x10, 0x5a, 0x8f, 0xaa, 0x50, 0x62, 0x21, 0xec, 0xad, + 0x4c, 0x63, 0xf5, 0x31, 0xa6, 0x2b, 0x8b, 0x6c, 0x07, 0x7b, 0xed, 0xe8, 0x0c, 0x80, 0x43, 0xda, + 0x36, 0xa1, 0x3c, 0xef, 0x78, 0x27, 0x40, 0x41, 0x06, 0x6e, 0x06, 0x3d, 0x38, 0x42, 0x25, 0xff, + 0x49, 0x82, 0x89, 0x75, 0x42, 0xdf, 0x35, 0xed, 0x1b, 0x0d, 0x53, 0x53, 0xdb, 0xbd, 0x21, 0xe4, + 0xfd, 0xcd, 0x58, 0xde, 0x7f, 0xa5, 0xcf, 0x31, 0x8b, 0x69, 0x9b, 0x97, 0xfd, 0xe5, 0xbf, 0x4b, + 0x50, 0x89, 0x51, 0x46, 0xd3, 0x04, 0x81, 0x92, 0x65, 0xda, 0xd4, 0x5f, 0xe3, 0x0f, 0xa5, 0x01, + 0x4b, 0xa9, 0x91, 0x55, 0x9e, 0xc1, 0x62, 0x0f, 0x9d, 0xd9, 0xb9, 0x65, 0x9b, 0xba, 0x88, 0xfb, + 0xc3, 0x49, 0x21, 0xc4, 0x0e, 0xed, 0xbc, 0x68, 0x9b, 0x3a, 0xe6, 0xd8, 0xf2, 0x1f, 0x25, 0x98, + 0x8e, 0x51, 0x0e, 0x21, 0xa5, 0x2b, 0xf1, 0x94, 0xfe, 0xe2, 0x61, 0x0c, 0xcb, 0x49, 0xec, 0x5f, + 0x25, 0xcd, 0x62, 0x0e, 0x40, 0x5b, 0x30, 0x6e, 0x99, 0x9d, 0xe6, 0x3d, 0xb8, 0x99, 0x9b, 0x62, + 0x2b, 0x64, 0x23, 0xc4, 0xc2, 0x51, 0x60, 0xb4, 0x0b, 0xd3, 0x86, 0xa2, 0x13, 0xc7, 0x52, 0xda, + 0xa4, 0x79, 0x0f, 0xce, 0x45, 0x1e, 0xe0, 0xb7, 0x05, 0x49, 0x44, 0x9c, 0x16, 0x22, 0xff, 0x2a, + 0x65, 0xb7, 0x69, 0x53, 0xf4, 0x2a, 0x94, 0xf9, 0x23, 0x89, 0xb6, 0xa9, 0x89, 0xa5, 0xed, 0x2c, + 0x1b, 0x9a, 0x86, 0x68, 0xbb, 0xbd, 0x57, 0xfd, 0xef, 0x03, 0x8f, 0x75, 0x7d, 0x42, 0x1c, 0xc0, + 0xa0, 0x75, 0x28, 0x5a, 0x87, 0x29, 0x33, 0xf8, 0xc2, 0xc2, 0x6b, 0x0b, 0x8e, 0x23, 0xff, 0x23, + 0xa9, 0x38, 0x5f, 0x5e, 0xae, 0xdf, 0xb3, 0x01, 0x0b, 0xca, 0x9a, 0xdc, 0x41, 0xb3, 0x61, 0x54, + 0xac, 0xb2, 0x22, 0x2e, 0x2f, 0x1d, 0x26, 0x2e, 0xa3, 0x2b, 0x43, 0xb0, 0x89, 0xf0, 0x1b, 0x7d, + 0x41, 0xf2, 0x5f, 0x25, 0x98, 0xe6, 0x0a, 0xb5, 0x5d, 0x5b, 0xa5, 0xbd, 0xa1, 0x65, 0xd0, 0xad, + 0x58, 0x06, 0x5d, 0xee, 0xd3, 0xd0, 0x94, 0xc6, 0xb9, 0x59, 0xf4, 0x73, 0x09, 0x1e, 0x48, 0x51, + 0x0f, 0x21, 0xc3, 0x90, 0x78, 0x86, 0x79, 0xe5, 0xb0, 0x06, 0xe6, 0x64, 0x99, 0x9b, 0x90, 0x61, + 0x1e, 0x0f, 0xdc, 0x33, 0x00, 0x96, 0xad, 0xee, 0xa8, 0x1a, 0xe9, 0x8a, 0xcb, 0xe0, 0x72, 0x38, + 0x24, 0x8d, 0xa0, 0x07, 0x47, 0xa8, 0xd0, 0xb7, 0x60, 0xb6, 0x43, 0xb6, 0x14, 0x57, 0xa3, 0x8b, + 0x9d, 0xce, 0x92, 0x62, 0x29, 0x9b, 0xaa, 0xa6, 0x52, 0x55, 0xec, 0xb0, 0xc7, 0xea, 0x17, 0xbc, + 0x4b, 0xda, 0x2c, 0x8a, 0xdb, 0x7b, 0xd5, 0xc7, 0x0f, 0xbe, 0x98, 0xf1, 0x89, 0x7b, 0x38, 0x47, + 0x08, 0xfa, 0xae, 0x04, 0x15, 0x9b, 0xbc, 0xe3, 0xaa, 0x36, 0xe9, 0x2c, 0xdb, 0xa6, 0x15, 0xd3, + 0xa0, 0xc0, 0x35, 0xb8, 0xb4, 0xbf, 0x57, 0xad, 0xe0, 0x1c, 0x9a, 0x7e, 0x74, 0xc8, 0x15, 0x84, + 0x28, 0xcc, 0x28, 0x9a, 0x66, 0xbe, 0x4b, 0xe2, 0x1e, 0x28, 0x72, 0xf9, 0xf5, 0xfd, 0xbd, 0xea, + 0xcc, 0x62, 0xba, 0xbb, 0x1f, 0xd1, 0x59, 0xf0, 0x68, 0x01, 0x46, 0x77, 0x4c, 0xcd, 0xd5, 0x89, + 0x53, 0x29, 0x71, 0x49, 0x2c, 0xe3, 0x8e, 0x5e, 0xf5, 0x9a, 0x6e, 0xef, 0x55, 0x8f, 0x5e, 0x6c, + 0xf2, 0xa3, 0x0f, 0x9f, 0x8a, 0xed, 0xd1, 0x58, 0xcd, 0x24, 0xa6, 0x3c, 0x3f, 0x77, 0x2d, 0x87, + 0x39, 0xe6, 0x72, 0xd8, 0x85, 0xa3, 0x74, 0x48, 0x87, 0xb1, 0x6d, 0xb1, 0x6f, 0x77, 0x2a, 0xa3, + 0x03, 0xad, 0x7e, 0xb1, 0x7d, 0x7f, 0x7d, 0x5a, 0x88, 0x1c, 0xf3, 0x9b, 0x1d, 0x1c, 0x4a, 0x40, + 0x4f, 0xc0, 0x28, 0xff, 0x58, 0x59, 0xe6, 0xa7, 0xb5, 0xe5, 0x30, 0x13, 0x5d, 0xf6, 0x9a, 0xb1, + 0xdf, 0xef, 0x93, 0xae, 0x34, 0x96, 0xf8, 0xe1, 0x6a, 0x82, 0x74, 0xa5, 0xb1, 0x84, 0xfd, 0x7e, + 0x64, 0xc1, 0xa8, 0x43, 0x56, 0x55, 0xc3, 0xdd, 0xad, 0xc0, 0x40, 0xd7, 0xc5, 0xcd, 0x0b, 0x9c, + 0x3b, 0x71, 0x14, 0x15, 0x4a, 0x14, 0xfd, 0xd8, 0x17, 0x83, 0x76, 0x61, 0xcc, 0x76, 0x8d, 0x45, + 0x67, 0xc3, 0x21, 0x76, 0x65, 0x9c, 0xcb, 0xec, 0x37, 0x39, 0x63, 0x9f, 0x3f, 0x29, 0x35, 0xf0, + 0x60, 0x40, 0x81, 0x43, 0x61, 0xe8, 0x23, 0x09, 0x90, 0xe3, 0x5a, 0x96, 0x46, 0x74, 0x62, 0x50, + 0x45, 0xe3, 0xa7, 0x61, 0x4e, 0xe5, 0x18, 0xd7, 0xa1, 0xd1, 0xaf, 0xdd, 0x29, 0xa0, 0xa4, 0x32, + 0xc1, 0x51, 0x73, 0x9a, 0x14, 0x67, 0xe8, 0xc1, 0x86, 0x62, 0xcb, 0xe1, 0xbf, 0x2b, 0x13, 0x03, + 0x0d, 0x45, 0xf6, 0xa9, 0x60, 0x38, 0x14, 0xa2, 0x1f, 0xfb, 0x62, 0xd0, 0x55, 0x98, 0xb5, 0x89, + 0xd2, 0xb9, 0x62, 0x68, 0x3d, 0x6c, 0x9a, 0xf4, 0xa2, 0xaa, 0x11, 0xa7, 0xe7, 0x50, 0xa2, 0x57, + 0x26, 0x79, 0xd8, 0x04, 0x4f, 0x2e, 0x70, 0x26, 0x15, 0xce, 0xe1, 0xe6, 0x2f, 0x01, 0xc4, 0x19, + 0xec, 0x70, 0xde, 0xd2, 0x1d, 0xee, 0x25, 0x40, 0xa8, 0xea, 0x7d, 0x7b, 0x09, 0x10, 0x11, 0x71, + 0xf0, 0x11, 0xd2, 0x57, 0x23, 0x30, 0x13, 0x12, 0xdf, 0xf5, 0x4b, 0x80, 0x0c, 0x96, 0x21, 0xbc, + 0x04, 0xc8, 0xbe, 0x4a, 0x2f, 0xdc, 0xef, 0xab, 0xf4, 0xfb, 0xf0, 0x02, 0x81, 0xdf, 0xce, 0x87, + 0x4e, 0xfc, 0xf7, 0xbf, 0x9d, 0x0f, 0x75, 0xcd, 0x29, 0x67, 0x7e, 0x33, 0x12, 0x35, 0xe8, 0x3f, + 0xe8, 0x0a, 0xf8, 0xf0, 0x2f, 0x0d, 0xe5, 0xcf, 0x0b, 0x70, 0x3c, 0x39, 0x63, 0x63, 0x37, 0x81, + 0xd2, 0x1d, 0x6f, 0x02, 0x1b, 0x70, 0x62, 0xcb, 0xd5, 0xb4, 0x1e, 0x77, 0x48, 0xe4, 0x3a, 0xd0, + 0x3b, 0xb5, 0x7f, 0x58, 0x70, 0x9e, 0xb8, 0x98, 0x41, 0x83, 0x33, 0x39, 0x73, 0x6e, 0x35, 0x0b, + 0x03, 0xdd, 0x6a, 0xa6, 0x2e, 0xd5, 0x8a, 0x77, 0x7f, 0xa9, 0x96, 0x7d, 0x43, 0x59, 0x1a, 0xe0, + 0x86, 0xf2, 0x5e, 0x5c, 0x29, 0x66, 0x24, 0xbe, 0x3b, 0x5d, 0x29, 0xca, 0x0f, 0xc3, 0x9c, 0x60, + 0x63, 0xdf, 0x4b, 0xa6, 0x41, 0x6d, 0x53, 0xd3, 0x88, 0xbd, 0xec, 0xea, 0x7a, 0x4f, 0x3e, 0x0f, + 0x93, 0xf1, 0x7b, 0x6d, 0x6f, 0xe4, 0xbd, 0xab, 0x76, 0x71, 0x97, 0x12, 0x19, 0x79, 0xaf, 0x1d, + 0x07, 0x14, 0xf2, 0x07, 0x12, 0xcc, 0x66, 0xbf, 0xa1, 0x43, 0x1a, 0x4c, 0xea, 0xca, 0x6e, 0xf4, + 0x11, 0xa1, 0x34, 0xe0, 0x8e, 0x1b, 0xed, 0xef, 0x55, 0x27, 0xd7, 0x62, 0x58, 0x38, 0x81, 0x2d, + 0x7f, 0x21, 0xc1, 0xc9, 0x9c, 0x6b, 0xc6, 0xe1, 0x6a, 0x82, 0xae, 0x41, 0x59, 0x57, 0x76, 0x9b, + 0xae, 0xdd, 0x25, 0x03, 0x9f, 0x31, 0xf0, 0x5c, 0xb2, 0x26, 0x50, 0x70, 0x80, 0x27, 0x7f, 0x22, + 0x41, 0x25, 0xaf, 0x1e, 0x44, 0x67, 0x63, 0x17, 0xa2, 0x8f, 0x26, 0x2e, 0x44, 0xa7, 0x53, 0x7c, + 0x43, 0xba, 0x0e, 0xfd, 0x54, 0x82, 0xd9, 0xec, 0xba, 0x19, 0x3d, 0x13, 0xd3, 0xb8, 0x9a, 0xd0, + 0x78, 0x2a, 0xc1, 0x25, 0xf4, 0xdd, 0x86, 0x49, 0x51, 0x5d, 0x0b, 0x18, 0xe1, 0xe5, 0x27, 0x0f, + 0xce, 0xaa, 0x02, 0xcc, 0xaf, 0x13, 0xf9, 0x48, 0xc6, 0xdb, 0x70, 0x02, 0x57, 0xfe, 0xd9, 0x08, + 0x94, 0x9a, 0x6d, 0x45, 0x23, 0x43, 0x28, 0xea, 0xae, 0xc5, 0x8a, 0xba, 0x7e, 0xdf, 0xf9, 0x73, + 0x2d, 0x73, 0xeb, 0xb9, 0xcd, 0x44, 0x3d, 0x77, 0x6e, 0x20, 0xf4, 0x83, 0x4b, 0xb9, 0xe7, 0x61, + 0x2c, 0x50, 0xa2, 0xbf, 0xd5, 0x43, 0xfe, 0x78, 0x04, 0xc6, 0x23, 0x22, 0xfa, 0x5c, 0x7b, 0x76, + 0x62, 0xab, 0xf7, 0x20, 0x7f, 0x29, 0x8a, 0xc8, 0xae, 0xf9, 0xeb, 0xb7, 0xf7, 0x86, 0x2e, 0x7c, + 0x0b, 0x95, 0x5e, 0xd6, 0xcf, 0xc3, 0x24, 0xe5, 0xff, 0xb0, 0x09, 0xce, 0xf8, 0x0a, 0x3c, 0x8a, + 0x83, 0x97, 0x99, 0xad, 0x58, 0x2f, 0x4e, 0x50, 0xcf, 0xbd, 0x00, 0x13, 0x31, 0x61, 0x7d, 0x3d, + 0x79, 0xfb, 0xad, 0x04, 0x8f, 0xde, 0x71, 0x4f, 0x86, 0xea, 0xb1, 0xe9, 0x55, 0x4b, 0x4c, 0xaf, + 0xf9, 0x7c, 0x80, 0x21, 0x3e, 0x96, 0xf8, 0xd1, 0x08, 0xa0, 0xd6, 0xb6, 0x6a, 0x77, 0x1a, 0x8a, + 0x4d, 0x7b, 0x58, 0xfc, 0x8f, 0x6a, 0x08, 0x13, 0xee, 0x2c, 0x8c, 0x77, 0x88, 0xd3, 0xb6, 0x55, + 0xee, 0x2c, 0xb1, 0x57, 0x08, 0xce, 0x41, 0x96, 0xc3, 0x2e, 0x1c, 0xa5, 0x43, 0x5d, 0x28, 0xef, + 0x78, 0xff, 0xd4, 0xf3, 0x6f, 0xde, 0xfa, 0x2d, 0x66, 0xc3, 0xff, 0xfa, 0x85, 0xf1, 0x25, 0x1a, + 0x1c, 0x1c, 0x80, 0xcb, 0x1f, 0x4a, 0x30, 0x9b, 0x76, 0xcc, 0x32, 0x53, 0xfd, 0xfe, 0x3b, 0xe7, + 0x61, 0x28, 0x72, 0x74, 0xe6, 0x95, 0x63, 0xde, 0x89, 0x37, 0x93, 0x8c, 0x79, 0xab, 0xfc, 0xa5, + 0x04, 0x73, 0xd9, 0xaa, 0x0d, 0x61, 0x2b, 0x71, 0x3d, 0xbe, 0x95, 0xe8, 0xf7, 0xd8, 0x20, 0x5b, + 0xef, 0x9c, 0x6d, 0xc5, 0x5e, 0xe6, 0x18, 0x0c, 0xc1, 0xc8, 0xad, 0xb8, 0x91, 0x8b, 0x87, 0x36, + 0x32, 0xdb, 0xc0, 0xfa, 0x13, 0x37, 0x6f, 0xcd, 0x1f, 0xf9, 0xec, 0xd6, 0xfc, 0x91, 0x3f, 0xdf, + 0x9a, 0x3f, 0xf2, 0xfe, 0xfe, 0xbc, 0x74, 0x73, 0x7f, 0x5e, 0xfa, 0x6c, 0x7f, 0x5e, 0xfa, 0xdb, + 0xfe, 0xbc, 0xf4, 0x83, 0x2f, 0xe6, 0x8f, 0x5c, 0x1b, 0x15, 0x98, 0xff, 0x0a, 0x00, 0x00, 0xff, + 0xff, 0x56, 0x18, 0xbd, 0xf5, 0xb1, 0x3c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto new file mode 100644 index 000000000..8ba21bc9b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/generated.proto @@ -0,0 +1,1005 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// An APIVersion represents a single concrete version of an object model. +message APIVersion { + // Name of this version (e.g. 'v1'). + // +optional + optional string name = 1; +} + +message CustomMetricCurrentStatus { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricCurrentStatusList { + repeated CustomMetricCurrentStatus items = 1; +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +message CustomMetricTarget { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricTargetList { + repeated CustomMetricTarget items = 1; +} + +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + optional int64 templateGeneration = 5; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; +} + +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map<string, string> updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + optional bool paused = 7; + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + repeated DeploymentCondition conditions = 6; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + optional string rule = 1; + + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + repeated IDRange ranges = 2; +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +message HTTPIngressPath { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + optional string path = 1; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http://<host>/<path>?<searchpart> -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// Host Port Range defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// ID Range provides a min/max of an allowed range of IDs. +message IDRange { + // Min is the start of the range, inclusive. + optional int64 min = 1; + + // Max is the end of the range, inclusive. + optional int64 max = 2; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +message Ingress { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + optional string serviceName = 1; + + // Specifies the port of the referenced service. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + // +optional + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + // +optional + optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + optional string secretName = 2; +} + +message NetworkPolicy { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + optional NetworkPolicySpec spec = 2; +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +message NetworkPolicyIngressRule { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + repeated NetworkPolicyPeer from = 2; +} + +// Network Policy List is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +message NetworkPolicyPeer { + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omitted, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; +} + +message NetworkPolicyPort { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + optional string protocol = 1; + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; +} + +message NetworkPolicySpec { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + // +optional + repeated NetworkPolicyIngressRule ingress = 2; +} + +// Pod Security Policy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + // +optional + optional PodSecurityPolicySpec spec = 2; +} + +// Pod Security Policy List is a list of PodSecurityPolicy objects. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// Pod Security Policy Spec defines the policy enforced. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + // +optional + optional bool privileged = 1; + + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + repeated string defaultAddCapabilities = 2; + + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + repeated string requiredDropCapabilities = 3; + + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + repeated string allowedCapabilities = 4; + + // volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + optional bool readOnlyRootFilesystem = 14; +} + +// ReplicaSet represents the configuration of a ReplicaSet. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + repeated ReplicaSetCondition conditions = 6; +} + +// Dummy definition +message ReplicationControllerDummy { +} + +message RollbackConfig { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // Ranges are the allowed ranges of uids that may be used. + // +optional + repeated IDRange ranges = 2; +} + +// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +message SELinuxStrategyOptions { + // type is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // +optional + optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; +} + +// represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map<string, string> selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string targetSelector = 3; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + optional string rule = 1; + + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + repeated IDRange ranges = 2; +} + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +message ThirdPartyResource { + // Standard object metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Description is the description of this object. + // +optional + optional string description = 2; + + // Versions are versions for this third party object + // +optional + repeated APIVersion versions = 3; +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +message ThirdPartyResourceData { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the raw JSON data for this data. + // +optional + optional bytes data = 2; +} + +// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. +message ThirdPartyResourceDataList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ThirdpartyResourceData. + repeated ThirdPartyResourceData items = 2; +} + +// ThirdPartyResourceList is a list of ThirdPartyResources. +message ThirdPartyResourceList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ThirdPartyResources. + repeated ThirdPartyResource items = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go new file mode 100644 index 000000000..fa5b2e1b2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/register.go @@ -0,0 +1,67 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "extensions" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &ReplicationControllerDummy{}, + &Scale{}, + &ThirdPartyResource{}, + &ThirdPartyResourceList{}, + &DaemonSetList{}, + &DaemonSet{}, + &ThirdPartyResourceData{}, + &ThirdPartyResourceDataList{}, + &Ingress{}, + &IngressList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go new file mode 100644 index 000000000..4f179e34f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.generated.go @@ -0,0 +1,21745 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg3_resource "k8s.io/apimachinery/pkg/api/resource" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + pkg5_intstr "k8s.io/apimachinery/pkg/util/intstr" + pkg4_v1 "k8s.io/client-go/pkg/api/v1" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg3_resource.Quantity + var v1 pkg1_v1.TypeMeta + var v2 pkg2_types.UID + var v3 pkg5_intstr.IntOrString + var v4 pkg4_v1.PodTemplateSpec + var v5 time.Time + _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 + } +} + +func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv7 := &x.Replicas + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int32)(yyv7)) = int32(r.DecodeInt(32)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Selector) != 0 + yyq2[2] = x.TargetSelector != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncMapStringStringV(x.Selector, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv6 := &x.Selector + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecMapStringStringX(yyv6, false, d) + } + } + case "targetSelector": + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv8 := &x.TargetSelector + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv11 := &x.Replicas + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Selector = nil + } else { + yyv13 := &x.Selector + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + z.F.DecMapStringStringX(yyv13, false, d) + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetSelector = "" + } else { + yyv15 := &x.TargetSelector + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ScaleSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ScaleStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv9 := &x.Kind + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv11 := &x.APIVersion + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.TargetValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.TargetValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.TargetValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TargetValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.TargetValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.CurrentValue + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("value")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.CurrentValue + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "value": + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv6 := &x.CurrentValue + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv9 := &x.Name + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentValue = pkg3_resource.Quantity{} + } else { + yyv11 := &x.CurrentValue + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv4 := &x.Items + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv7 := &x.Items + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = x.Description != "" + yyq2[4] = len(x.Versions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("description")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Description)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Versions == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("versions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Versions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "description": + if r.TryDecodeAsNil() { + x.Description = "" + } else { + yyv10 := &x.Description + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "versions": + if r.TryDecodeAsNil() { + x.Versions = nil + } else { + yyv12 := &x.Versions + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceAPIVersion((*[]APIVersion)(yyv12), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Description = "" + } else { + yyv21 := &x.Description + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Versions = nil + } else { + yyv23 := &x.Versions + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + h.decSliceAPIVersion((*[]APIVersion)(yyv23), d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Name != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv4 := &x.Name + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv7 := &x.Name + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*string)(yyv7)) = r.DecodeString() + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = len(x.Data) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.Data == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("data")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Data == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "data": + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv10 := &x.Data + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *yyv10 = r.DecodeBytes(*(*[]byte)(yyv10), false, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Data = nil + } else { + yyv19 := &x.Data + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *yyv19 = r.DecodeBytes(*(*[]byte)(yyv19), false, false) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DeploymentSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DeploymentStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [9]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.Selector != nil + yyq2[3] = true + yyq2[4] = x.MinReadySeconds != 0 + yyq2[5] = x.RevisionHistoryLimit != nil + yyq2[6] = x.Paused != false + yyq2[7] = x.RollbackTo != nil + yyq2[8] = x.ProgressDeadlineSeconds != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(9) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy12 := &x.Template + yy12.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.Template + yy14.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy17 := &x.Strategy + yy17.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("strategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy19 := &x.Strategy + yy19.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy25 := *x.RevisionHistoryLimit + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(yy25)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RevisionHistoryLimit == nil { + r.EncodeNil() + } else { + yy27 := *x.RevisionHistoryLimit + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(yy27)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym30 := z.EncBinary() + _ = yym30 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paused")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.Paused)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollbackTo == nil { + r.EncodeNil() + } else { + x.RollbackTo.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy36 := *x.ProgressDeadlineSeconds + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeInt(int64(yy36)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("progressDeadlineSeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ProgressDeadlineSeconds == nil { + r.EncodeNil() + } else { + yy38 := *x.ProgressDeadlineSeconds + yym39 := z.EncBinary() + _ = yym39 + if false { + } else { + r.EncodeInt(int64(yy38)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv8 := &x.Template + yyv8.CodecDecodeSelf(d) + } + case "strategy": + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv9 := &x.Strategy + yyv9.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv10 := &x.MinReadySeconds + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "revisionHistoryLimit": + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + case "paused": + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv14 := &x.Paused + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + case "progressDeadlineSeconds": + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj19 int + var yyb19 bool + var yyhl19 bool = l >= 0 + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym23 := z.DecBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv24 := &x.Template + yyv24.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Strategy = DeploymentStrategy{} + } else { + yyv25 := &x.Strategy + yyv25.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv26 := &x.MinReadySeconds + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*int32)(yyv26)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RevisionHistoryLimit != nil { + x.RevisionHistoryLimit = nil + } + } else { + if x.RevisionHistoryLimit == nil { + x.RevisionHistoryLimit = new(int32) + } + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paused = false + } else { + yyv30 := &x.Paused + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*bool)(yyv30)) = r.DecodeBool() + } + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollbackTo != nil { + x.RollbackTo = nil + } + } else { + if x.RollbackTo == nil { + x.RollbackTo = new(RollbackConfig) + } + x.RollbackTo.CodecDecodeSelf(d) + } + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.ProgressDeadlineSeconds != nil { + x.ProgressDeadlineSeconds = nil + } + } else { + if x.ProgressDeadlineSeconds == nil { + x.ProgressDeadlineSeconds = new(int32) + } + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(x.ProgressDeadlineSeconds)) = int32(r.DecodeInt(32)) + } + } + for { + yyj19++ + if yyhl19 { + yyb19 = yyj19 > l + } else { + yyb19 = r.CheckBreak() + } + if yyb19 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj19-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[3] = len(x.UpdatedAnnotations) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.UpdatedAnnotations == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy16 := &x.RollbackTo + yy16.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy18 := &x.RollbackTo + yy18.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "updatedAnnotations": + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv10 := &x.UpdatedAnnotations + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecMapStringStringX(yyv10, false, d) + } + } + case "rollbackTo": + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv12 := &x.RollbackTo + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv18 := &x.Name + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedAnnotations = nil + } else { + yyv20 := &x.UpdatedAnnotations + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + z.F.DecMapStringStringX(yyv20, false, d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RollbackTo = RollbackConfig{} + } else { + yyv22 := &x.RollbackTo + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Revision != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("revision")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Revision)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "revision": + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv4 := &x.Revision + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Revision = 0 + } else { + yyv7 := &x.Revision + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDeployment) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + yyq2[1] = x.MaxSurge != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxSurge == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxSurge) + } else { + z.EncFallback(x.MaxSurge) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + case "maxSurge": + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxSurge != nil { + x.MaxSurge = nil + } + } else { + if x.MaxSurge == nil { + x.MaxSurge = new(pkg5_intstr.IntOrString) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxSurge) + } else { + z.DecFallback(x.MaxSurge, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [7]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + yyq2[1] = x.Replicas != 0 + yyq2[2] = x.UpdatedReplicas != 0 + yyq2[3] = x.ReadyReplicas != 0 + yyq2[4] = x.AvailableReplicas != 0 + yyq2[5] = x.UnavailableReplicas != 0 + yyq2[6] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(7) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.UpdatedReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UnavailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceDeploymentCondition(([]DeploymentCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv6 := &x.Replicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "updatedReplicas": + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv8 := &x.UpdatedReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv10 := &x.ReadyReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv12 := &x.AvailableReplicas + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "unavailableReplicas": + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv14 := &x.UnavailableReplicas + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv16 := &x.Conditions + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv16), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj18 int + var yyb18 bool + var yyhl18 bool = l >= 0 + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv19 := &x.ObservedGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv21 := &x.Replicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedReplicas = 0 + } else { + yyv23 := &x.UpdatedReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv25 := &x.ReadyReplicas + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv27 := &x.AvailableReplicas + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UnavailableReplicas = 0 + } else { + yyv29 := &x.UnavailableReplicas + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int32)(yyv29)) = int32(r.DecodeInt(32)) + } + } + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv31 := &x.Conditions + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSliceDeploymentCondition((*[]DeploymentCondition)(yyv31), d) + } + } + for { + yyj18++ + if yyhl18 { + yyb18 = yyj18 > l + } else { + yyb18 = r.CheckBreak() + } + if yyb18 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj18-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DeploymentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DeploymentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *DeploymentCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = true + yyq2[4] = x.Reason != "" + yyq2[5] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastUpdateTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastUpdateTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastUpdateTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.LastTransitionTime + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(yy15) { + } else if yym16 { + z.EncBinaryMarshal(yy15) + } else if !yym16 && z.IsJSONHandle() { + z.EncJSONMarshal(yy15) + } else { + z.EncFallback(yy15) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.LastTransitionTime + yym18 := z.EncBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.EncExt(yy17) { + } else if yym18 { + z.EncBinaryMarshal(yy17) + } else if !yym18 && z.IsJSONHandle() { + z.EncJSONMarshal(yy17) + } else { + z.EncFallback(yy17) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym24 := z.EncBinary() + _ = yym24 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastUpdateTime": + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastUpdateTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv8 := &x.LastTransitionTime + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv10 := &x.Reason + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv12 := &x.Message + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv15 := &x.Type + yyv15.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv16 := &x.Status + yyv16.CodecDecodeSelf(d) + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastUpdateTime = pkg1_v1.Time{} + } else { + yyv17 := &x.LastUpdateTime + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else if yym18 { + z.DecBinaryUnmarshal(yyv17) + } else if !yym18 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv17) + } else { + z.DecFallback(yyv17, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv19 := &x.LastTransitionTime + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else if yym20 { + z.DecBinaryUnmarshal(yyv19) + } else if !yym20 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv19) + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv21 := &x.Reason + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv23 := &x.Message + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*string)(yyv23)) = r.DecodeString() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDeployment(([]Deployment)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDeployment((*[]Deployment)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetUpdateStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Type != "" + yyq2[1] = x.RollingUpdate != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Type.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RollingUpdate == nil { + r.EncodeNil() + } else { + x.RollingUpdate.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetUpdateStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "rollingUpdate": + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDaemonSet) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetUpdateStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv7 := &x.Type + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.RollingUpdate != nil { + x.RollingUpdate = nil + } + } else { + if x.RollingUpdate == nil { + x.RollingUpdate = new(RollingUpdateDaemonSet) + } + x.RollingUpdate.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x DaemonSetUpdateStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *DaemonSetUpdateStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RollingUpdateDaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.MaxUnavailable != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.MaxUnavailable == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(x.MaxUnavailable) + } else { + z.EncFallback(x.MaxUnavailable) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RollingUpdateDaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RollingUpdateDaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "maxUnavailable": + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RollingUpdateDaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.MaxUnavailable != nil { + x.MaxUnavailable = nil + } + } else { + if x.MaxUnavailable == nil { + x.MaxUnavailable = new(pkg5_intstr.IntOrString) + } + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { + } else if !yym8 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.MaxUnavailable) + } else { + z.DecFallback(x.MaxUnavailable, false) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Selector != nil + yyq2[2] = true + yyq2[3] = x.MinReadySeconds != 0 + yyq2[4] = x.TemplateGeneration != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Template + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Template + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy12 := &x.UpdateStrategy + yy12.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updateStrategy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy14 := &x.UpdateStrategy + yy14.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.TemplateGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("templateGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym21 := z.EncBinary() + _ = yym21 + if false { + } else { + r.EncodeInt(int64(x.TemplateGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv6 := &x.Template + yyv6.CodecDecodeSelf(d) + } + case "updateStrategy": + if r.TryDecodeAsNil() { + x.UpdateStrategy = DaemonSetUpdateStrategy{} + } else { + yyv7 := &x.UpdateStrategy + yyv7.CodecDecodeSelf(d) + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv8 := &x.MinReadySeconds + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "templateGeneration": + if r.TryDecodeAsNil() { + x.TemplateGeneration = 0 + } else { + yyv10 := &x.TemplateGeneration + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym14 := z.DecBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv15 := &x.Template + yyv15.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdateStrategy = DaemonSetUpdateStrategy{} + } else { + yyv16 := &x.UpdateStrategy + yyv16.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv17 := &x.MinReadySeconds + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TemplateGeneration = 0 + } else { + yyv19 := &x.TemplateGeneration + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = x.UpdatedNumberScheduled != 0 + yyq2[6] = x.NumberAvailable != 0 + yyq2[7] = x.NumberUnavailable != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(8) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.CurrentNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.NumberMisscheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.DesiredNumberScheduled)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.NumberReady)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberReady")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.NumberReady)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.UpdatedNumberScheduled)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("updatedNumberScheduled")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.UpdatedNumberScheduled)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeInt(int64(x.NumberAvailable)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberAvailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeInt(int64(x.NumberAvailable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeInt(int64(x.NumberUnavailable)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("numberUnavailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeInt(int64(x.NumberUnavailable)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "currentNumberScheduled": + if r.TryDecodeAsNil() { + x.CurrentNumberScheduled = 0 + } else { + yyv4 := &x.CurrentNumberScheduled + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "numberMisscheduled": + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + yyv6 := &x.NumberMisscheduled + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "desiredNumberScheduled": + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + yyv8 := &x.DesiredNumberScheduled + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "numberReady": + if r.TryDecodeAsNil() { + x.NumberReady = 0 + } else { + yyv10 := &x.NumberReady + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "updatedNumberScheduled": + if r.TryDecodeAsNil() { + x.UpdatedNumberScheduled = 0 + } else { + yyv14 := &x.UpdatedNumberScheduled + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + case "numberAvailable": + if r.TryDecodeAsNil() { + x.NumberAvailable = 0 + } else { + yyv16 := &x.NumberAvailable + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*int32)(yyv16)) = int32(r.DecodeInt(32)) + } + } + case "numberUnavailable": + if r.TryDecodeAsNil() { + x.NumberUnavailable = 0 + } else { + yyv18 := &x.NumberUnavailable + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int32)(yyv18)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentNumberScheduled = 0 + } else { + yyv21 := &x.CurrentNumberScheduled + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberMisscheduled = 0 + } else { + yyv23 := &x.NumberMisscheduled + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredNumberScheduled = 0 + } else { + yyv25 := &x.DesiredNumberScheduled + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberReady = 0 + } else { + yyv27 := &x.NumberReady + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv29 := &x.ObservedGeneration + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*int64)(yyv29)) = int64(r.DecodeInt(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.UpdatedNumberScheduled = 0 + } else { + yyv31 := &x.UpdatedNumberScheduled + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + *((*int32)(yyv31)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberAvailable = 0 + } else { + yyv33 := &x.NumberAvailable + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + *((*int32)(yyv33)) = int32(r.DecodeInt(32)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NumberUnavailable = 0 + } else { + yyv35 := &x.NumberUnavailable + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int32)(yyv35)) = int32(r.DecodeInt(32)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj20-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = DaemonSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = DaemonSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = DaemonSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = DaemonSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceDaemonSet((*[]DaemonSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceDaemonSet((*[]DaemonSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = IngressSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = IngressStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = IngressSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = IngressStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceIngress(([]Ingress)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceIngress(([]Ingress)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceIngress((*[]Ingress)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceIngress((*[]Ingress)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Backend != nil + yyq2[1] = len(x.TLS) != 0 + yyq2[2] = len(x.Rules) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Backend == nil { + r.EncodeNil() + } else { + x.Backend.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("backend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Backend == nil { + r.EncodeNil() + } else { + x.Backend.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.TLS == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("tls")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.TLS == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Rules == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceIngressRule(([]IngressRule)(x.Rules), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSliceIngressRule(([]IngressRule)(x.Rules), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "backend": + if r.TryDecodeAsNil() { + if x.Backend != nil { + x.Backend = nil + } + } else { + if x.Backend == nil { + x.Backend = new(IngressBackend) + } + x.Backend.CodecDecodeSelf(d) + } + case "tls": + if r.TryDecodeAsNil() { + x.TLS = nil + } else { + yyv5 := &x.TLS + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv7 := &x.Rules + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceIngressRule((*[]IngressRule)(yyv7), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Backend != nil { + x.Backend = nil + } + } else { + if x.Backend == nil { + x.Backend = new(IngressBackend) + } + x.Backend.CodecDecodeSelf(d) + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.TLS = nil + } else { + yyv11 := &x.TLS + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) + } + } + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv13 := &x.Rules + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + h.decSliceIngressRule((*[]IngressRule)(yyv13), d) + } + } + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = r.CheckBreak() + } + if yyb9 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj9-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Hosts) != 0 + yyq2[1] = x.SecretName != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Hosts == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Hosts, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hosts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Hosts == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Hosts, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("secretName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "hosts": + if r.TryDecodeAsNil() { + x.Hosts = nil + } else { + yyv4 := &x.Hosts + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "secretName": + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv6 := &x.SecretName + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Hosts = nil + } else { + yyv9 := &x.Hosts + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + z.F.DecSliceStringX(yyv9, false, d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SecretName = "" + } else { + yyv11 := &x.SecretName + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.LoadBalancer + yy4.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.LoadBalancer + yy6.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "loadBalancer": + if r.TryDecodeAsNil() { + x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} + } else { + yyv4 := &x.LoadBalancer + yyv4.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LoadBalancer = pkg4_v1.LoadBalancerStatus{} + } else { + yyv6 := &x.LoadBalancer + yyv6.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Host != "" + yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("host")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Host)) + } + } + } + var yyn6 bool + if x.IngressRuleValue.HTTP == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.EncodeNil() + } else { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("http")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if yyn6 { + r.EncodeNil() + } else { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "host": + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv4 := &x.Host + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "http": + if x.IngressRuleValue.HTTP == nil { + x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) + } + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Host = "" + } else { + yyv8 := &x.Host + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + if x.IngressRuleValue.HTTP == nil { + x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.HTTP != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("http")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HTTP == nil { + r.EncodeNil() + } else { + x.HTTP.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "http": + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj5 int + var yyb5 bool + var yyhl5 bool = l >= 0 + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.HTTP != nil { + x.HTTP = nil + } + } else { + if x.HTTP == nil { + x.HTTP = new(HTTPIngressRuleValue) + } + x.HTTP.CodecDecodeSelf(d) + } + for { + yyj5++ + if yyhl5 { + yyb5 = yyj5 > l + } else { + yyb5 = r.CheckBreak() + } + if yyb5 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj5-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [1]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(1) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Paths == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("paths")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Paths == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "paths": + if r.TryDecodeAsNil() { + x.Paths = nil + } else { + yyv4 := &x.Paths + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Paths = nil + } else { + yyv7 := &x.Paths + yym8 := z.DecBinary() + _ = yym8 + if false { + } else { + h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Path != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("path")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Path)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.Backend + yy7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("backend")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.Backend + yy9.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "path": + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv4 := &x.Path + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "backend": + if r.TryDecodeAsNil() { + x.Backend = IngressBackend{} + } else { + yyv6 := &x.Backend + yyv6.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Path = "" + } else { + yyv8 := &x.Path + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Backend = IngressBackend{} + } else { + yyv10 := &x.Backend + yyv10.CodecDecodeSelf(d) + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("serviceName")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy7 := &x.ServicePort + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(yy7) { + } else if !yym8 && z.IsJSONHandle() { + z.EncJSONMarshal(yy7) + } else { + z.EncFallback(yy7) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("servicePort")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy9 := &x.ServicePort + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(yy9) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(yy9) + } else { + z.EncFallback(yy9) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "serviceName": + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv4 := &x.ServiceName + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "servicePort": + if r.TryDecodeAsNil() { + x.ServicePort = pkg5_intstr.IntOrString{} + } else { + yyv6 := &x.ServicePort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServiceName = "" + } else { + yyv9 := &x.ServiceName + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ServicePort = pkg5_intstr.IntOrString{} + } else { + yyv11 := &x.ServicePort + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(yyv11) { + } else if !yym12 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv11) + } else { + z.DecFallback(yyv11, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = ReplicaSetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = ReplicaSetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = ReplicaSetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = ReplicaSetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceReplicaSet((*[]ReplicaSet)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceReplicaSet((*[]ReplicaSet)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Replicas != nil + yyq2[1] = x.MinReadySeconds != 0 + yyq2[2] = x.Selector != nil + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Replicas == nil { + r.EncodeNil() + } else { + yy4 := *x.Replicas + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(yy4)) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Replicas == nil { + r.EncodeNil() + } else { + yy6 := *x.Replicas + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(yy6)) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.MinReadySeconds)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym12 := z.EncBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Template + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("template")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Template + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + case "minReadySeconds": + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv6 := &x.MinReadySeconds + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + case "template": + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv10 := &x.Template + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Replicas != nil { + x.Replicas = nil + } + } else { + if x.Replicas == nil { + x.Replicas = new(int32) + } + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinReadySeconds = 0 + } else { + yyv14 := &x.MinReadySeconds + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg1_v1.LabelSelector) + } + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Template = pkg4_v1.PodTemplateSpec{} + } else { + yyv18 := &x.Template + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.FullyLabeledReplicas != 0 + yyq2[2] = x.ReadyReplicas != 0 + yyq2[3] = x.AvailableReplicas != 0 + yyq2[4] = x.ObservedGeneration != 0 + yyq2[5] = len(x.Conditions) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("replicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Replicas)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.FullyLabeledReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readyReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.ReadyReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.AvailableReplicas)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + if x.Conditions == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("conditions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Conditions == nil { + r.EncodeNil() + } else { + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + h.encSliceReplicaSetCondition(([]ReplicaSetCondition)(x.Conditions), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "replicas": + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv4 := &x.Replicas + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "fullyLabeledReplicas": + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv6 := &x.FullyLabeledReplicas + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + case "readyReplicas": + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv8 := &x.ReadyReplicas + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "availableReplicas": + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv10 := &x.AvailableReplicas + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv12 := &x.ObservedGeneration + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int64)(yyv12)) = int64(r.DecodeInt(64)) + } + } + case "conditions": + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv14 := &x.Conditions + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv14), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Replicas = 0 + } else { + yyv17 := &x.Replicas + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int32)(yyv17)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FullyLabeledReplicas = 0 + } else { + yyv19 := &x.FullyLabeledReplicas + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*int32)(yyv19)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadyReplicas = 0 + } else { + yyv21 := &x.ReadyReplicas + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AvailableReplicas = 0 + } else { + yyv23 := &x.AvailableReplicas + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv25 := &x.ObservedGeneration + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int64)(yyv25)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Conditions = nil + } else { + yyv27 := &x.Conditions + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + h.decSliceReplicaSetCondition((*[]ReplicaSetCondition)(yyv27), d) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x ReplicaSetConditionType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *ReplicaSetConditionType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *ReplicaSetCondition) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[2] = true + yyq2[3] = x.Reason != "" + yyq2[4] = x.Message != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Type.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("type")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Type.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf7 := &x.Status + yysf7.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yysf8 := &x.Status + yysf8.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.LastTransitionTime + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else if yym11 { + z.EncBinaryMarshal(yy10) + } else if !yym11 && z.IsJSONHandle() { + z.EncJSONMarshal(yy10) + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.LastTransitionTime + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else if yym13 { + z.EncBinaryMarshal(yy12) + } else if !yym13 && z.IsJSONHandle() { + z.EncJSONMarshal(yy12) + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("reason")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("message")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Message)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ReplicaSetCondition) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ReplicaSetCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "type": + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv4 := &x.Type + yyv4.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv5 := &x.Status + yyv5.CodecDecodeSelf(d) + } + case "lastTransitionTime": + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv6 := &x.LastTransitionTime + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(yyv6) { + } else if yym7 { + z.DecBinaryUnmarshal(yyv6) + } else if !yym7 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv6) + } else { + z.DecFallback(yyv6, false) + } + } + case "reason": + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv8 := &x.Reason + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv10 := &x.Message + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ReplicaSetCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Type = "" + } else { + yyv13 := &x.Type + yyv13.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = "" + } else { + yyv14 := &x.Status + yyv14.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.LastTransitionTime = pkg1_v1.Time{} + } else { + yyv15 := &x.LastTransitionTime + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else if yym16 { + z.DecBinaryUnmarshal(yyv15) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv15) + } else { + z.DecFallback(yyv15, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Reason = "" + } else { + yyv17 := &x.Reason + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv19 := &x.Message + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodSecurityPolicySpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodSecurityPolicySpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [14]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Privileged != false + yyq2[1] = len(x.DefaultAddCapabilities) != 0 + yyq2[2] = len(x.RequiredDropCapabilities) != 0 + yyq2[3] = len(x.AllowedCapabilities) != 0 + yyq2[4] = len(x.Volumes) != 0 + yyq2[5] = x.HostNetwork != false + yyq2[6] = len(x.HostPorts) != 0 + yyq2[7] = x.HostPID != false + yyq2[8] = x.HostIPC != false + yyq2[13] = x.ReadOnlyRootFilesystem != false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(14) + } else { + yynn2 = 4 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Privileged)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("privileged")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Privileged)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.DefaultAddCapabilities == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DefaultAddCapabilities == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.DefaultAddCapabilities), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.RequiredDropCapabilities == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.RequiredDropCapabilities == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.RequiredDropCapabilities), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.AllowedCapabilities == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.AllowedCapabilities == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + h.encSlicev1_Capability(([]pkg4_v1.Capability)(x.AllowedCapabilities), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Volumes == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceFSType(([]FSType)(x.Volumes), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumes")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Volumes == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + h.encSliceFSType(([]FSType)(x.Volumes), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[5] { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[5] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.HostNetwork)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[6] { + if x.HostPorts == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[6] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.HostPorts == nil { + r.EncodeNil() + } else { + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[7] { + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[7] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostPID")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.HostPID)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[8] { + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[8] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.HostIPC)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy31 := &x.SELinux + yy31.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinux")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy33 := &x.SELinux + yy33.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy36 := &x.RunAsUser + yy36.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy38 := &x.RunAsUser + yy38.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy41 := &x.SupplementalGroups + yy41.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy43 := &x.SupplementalGroups + yy43.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy46 := &x.FSGroup + yy46.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy48 := &x.FSGroup + yy48.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[13] { + yym51 := z.EncBinary() + _ = yym51 + if false { + } else { + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[13] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym52 := z.EncBinary() + _ = yym52 + if false { + } else { + r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "privileged": + if r.TryDecodeAsNil() { + x.Privileged = false + } else { + yyv4 := &x.Privileged + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "defaultAddCapabilities": + if r.TryDecodeAsNil() { + x.DefaultAddCapabilities = nil + } else { + yyv6 := &x.DefaultAddCapabilities + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv6), d) + } + } + case "requiredDropCapabilities": + if r.TryDecodeAsNil() { + x.RequiredDropCapabilities = nil + } else { + yyv8 := &x.RequiredDropCapabilities + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv8), d) + } + } + case "allowedCapabilities": + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv10 := &x.AllowedCapabilities + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv10), d) + } + } + case "volumes": + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv12 := &x.Volumes + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + h.decSliceFSType((*[]FSType)(yyv12), d) + } + } + case "hostNetwork": + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv14 := &x.HostNetwork + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + case "hostPorts": + if r.TryDecodeAsNil() { + x.HostPorts = nil + } else { + yyv16 := &x.HostPorts + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + h.decSliceHostPortRange((*[]HostPortRange)(yyv16), d) + } + } + case "hostPID": + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv18 := &x.HostPID + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*bool)(yyv18)) = r.DecodeBool() + } + } + case "hostIPC": + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv20 := &x.HostIPC + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*bool)(yyv20)) = r.DecodeBool() + } + } + case "seLinux": + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} + } else { + yyv22 := &x.SELinux + yyv22.CodecDecodeSelf(d) + } + case "runAsUser": + if r.TryDecodeAsNil() { + x.RunAsUser = RunAsUserStrategyOptions{} + } else { + yyv23 := &x.RunAsUser + yyv23.CodecDecodeSelf(d) + } + case "supplementalGroups": + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv24 := &x.SupplementalGroups + yyv24.CodecDecodeSelf(d) + } + case "fsGroup": + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv25 := &x.FSGroup + yyv25.CodecDecodeSelf(d) + } + case "readOnlyRootFilesystem": + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + yyv26 := &x.ReadOnlyRootFilesystem + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*bool)(yyv26)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj28 int + var yyb28 bool + var yyhl28 bool = l >= 0 + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Privileged = false + } else { + yyv29 := &x.Privileged + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DefaultAddCapabilities = nil + } else { + yyv31 := &x.DefaultAddCapabilities + yym32 := z.DecBinary() + _ = yym32 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv31), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RequiredDropCapabilities = nil + } else { + yyv33 := &x.RequiredDropCapabilities + yym34 := z.DecBinary() + _ = yym34 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv33), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.AllowedCapabilities = nil + } else { + yyv35 := &x.AllowedCapabilities + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + h.decSlicev1_Capability((*[]pkg4_v1.Capability)(yyv35), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Volumes = nil + } else { + yyv37 := &x.Volumes + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + h.decSliceFSType((*[]FSType)(yyv37), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostNetwork = false + } else { + yyv39 := &x.HostNetwork + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPorts = nil + } else { + yyv41 := &x.HostPorts + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + h.decSliceHostPortRange((*[]HostPortRange)(yyv41), d) + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostPID = false + } else { + yyv43 := &x.HostPID + yym44 := z.DecBinary() + _ = yym44 + if false { + } else { + *((*bool)(yyv43)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.HostIPC = false + } else { + yyv45 := &x.HostIPC + yym46 := z.DecBinary() + _ = yym46 + if false { + } else { + *((*bool)(yyv45)) = r.DecodeBool() + } + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SELinux = SELinuxStrategyOptions{} + } else { + yyv47 := &x.SELinux + yyv47.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RunAsUser = RunAsUserStrategyOptions{} + } else { + yyv48 := &x.RunAsUser + yyv48.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.SupplementalGroups = SupplementalGroupsStrategyOptions{} + } else { + yyv49 := &x.SupplementalGroups + yyv49.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.FSGroup = FSGroupStrategyOptions{} + } else { + yyv50 := &x.FSGroup + yyv50.CodecDecodeSelf(d) + } + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ReadOnlyRootFilesystem = false + } else { + yyv51 := &x.ReadOnlyRootFilesystem + yym52 := z.DecBinary() + _ = yym52 + if false { + } else { + *((*bool)(yyv51)) = r.DecodeBool() + } + } + for { + yyj28++ + if yyhl28 { + yyb28 = yyj28 > l + } else { + yyb28 = r.CheckBreak() + } + if yyb28 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj28-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "min": + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv4 := &x.Min + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int32)(yyv4)) = int32(r.DecodeInt(32)) + } + } + case "max": + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv6 := &x.Max + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int32)(yyv6)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv9 := &x.Min + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int32)(yyv9)) = int32(r.DecodeInt(32)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv11 := &x.Max + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int32)(yyv11)) = int32(r.DecodeInt(32)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.SELinuxOptions != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Rule.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.SELinuxOptions == nil { + r.EncodeNil() + } else { + x.SELinuxOptions.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "seLinuxOptions": + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv7 := &x.Rule + yyv7.CodecDecodeSelf(d) + } + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.SELinuxOptions != nil { + x.SELinuxOptions = nil + } + } else { + if x.SELinuxOptions == nil { + x.SELinuxOptions = new(pkg4_v1.SELinuxOptions) + } + x.SELinuxOptions.CodecDecodeSelf(d) + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj6-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + x.Rule.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("min")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Min)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("max")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeInt(int64(x.Max)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "min": + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv4 := &x.Min + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "max": + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv6 := &x.Max + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*int64)(yyv6)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Min = 0 + } else { + yyv9 := &x.Min + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Max = 0 + } else { + yyv11 := &x.Max + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*int64)(yyv11)) = int64(r.DecodeInt(64)) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Rule != "" + yyq2[1] = len(x.Ranges) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + x.Rule.CodecEncodeSelf(e) + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rule")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + x.Rule.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ranges == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ranges")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ranges == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceIDRange(([]IDRange)(x.Ranges), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "rule": + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv4 := &x.Rule + yyv4.CodecDecodeSelf(d) + } + case "ranges": + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv5 := &x.Ranges + yym6 := z.DecBinary() + _ = yym6 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv5), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rule = "" + } else { + yyv8 := &x.Rule + yyv8.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ranges = nil + } else { + yyv9 := &x.Ranges + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceIDRange((*[]IDRange)(yyv9), d) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x)) + } +} + +func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = NetworkPolicySpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj11 int + var yyb11 bool + var yyhl11 bool = l >= 0 + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv12 := &x.Kind + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*string)(yyv12)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv14 := &x.APIVersion + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv16 := &x.ObjectMeta + yym17 := z.DecBinary() + _ = yym17 + if false { + } else if z.HasExtensions() && z.DecExt(yyv16) { + } else { + z.DecFallback(yyv16, false) + } + } + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = NetworkPolicySpec{} + } else { + yyv18 := &x.Spec + yyv18.CodecDecodeSelf(d) + } + for { + yyj11++ + if yyhl11 { + yyb11 = yyj11 > l + } else { + yyb11 = r.CheckBreak() + } + if yyb11 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj11-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.Ingress) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy4 := &x.PodSelector + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else { + z.EncFallback(yy4) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.PodSelector + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else { + z.EncFallback(yy6) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Ingress == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ingress")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ingress == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSelector": + if r.TryDecodeAsNil() { + x.PodSelector = pkg1_v1.LabelSelector{} + } else { + yyv4 := &x.PodSelector + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + z.DecFallback(yyv4, false) + } + } + case "ingress": + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv6 := &x.Ingress + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodSelector = pkg1_v1.LabelSelector{} + } else { + yyv9 := &x.PodSelector + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ingress = nil + } else { + yyv11 := &x.Ingress + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = len(x.Ports) != 0 + yyq2[1] = len(x.From) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Ports == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("ports")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Ports == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.From == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("from")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.From == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "ports": + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv4 := &x.Ports + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) + } + } + case "from": + if r.TryDecodeAsNil() { + x.From = nil + } else { + yyv6 := &x.From + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Ports = nil + } else { + yyv9 := &x.Ports + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.From = nil + } else { + yyv11 := &x.From + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Protocol != nil + yyq2[1] = x.Port != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.Protocol == nil { + r.EncodeNil() + } else { + yy4 := *x.Protocol + yysf5 := &yy4 + yysf5.CodecEncodeSelf(e) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("protocol")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Protocol == nil { + r.EncodeNil() + } else { + yy6 := *x.Protocol + yysf7 := &yy6 + yysf7.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Port == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym9 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) + } else { + z.EncFallback(x.Port) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("port")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Port == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Port) + } else { + z.EncFallback(x.Port) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "protocol": + if r.TryDecodeAsNil() { + if x.Protocol != nil { + x.Protocol = nil + } + } else { + if x.Protocol == nil { + x.Protocol = new(pkg4_v1.Protocol) + } + x.Protocol.CodecDecodeSelf(d) + } + case "port": + if r.TryDecodeAsNil() { + if x.Port != nil { + x.Port = nil + } + } else { + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } + yym6 := z.DecBinary() + _ = yym6 + if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym6 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) + } else { + z.DecFallback(x.Port, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj7 int + var yyb7 bool + var yyhl7 bool = l >= 0 + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Protocol != nil { + x.Protocol = nil + } + } else { + if x.Protocol == nil { + x.Protocol = new(pkg4_v1.Protocol) + } + x.Protocol.CodecDecodeSelf(d) + } + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Port != nil { + x.Port = nil + } + } else { + if x.Port == nil { + x.Port = new(pkg5_intstr.IntOrString) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.Port) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Port) + } else { + z.DecFallback(x.Port, false) + } + } + for { + yyj7++ + if yyhl7 { + yyb7 = yyj7 > l + } else { + yyb7 = r.CheckBreak() + } + if yyb7 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj7-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.PodSelector != nil + yyq2[1] = x.NamespaceSelector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + if x.PodSelector == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodSelector) { + } else { + z.EncFallback(x.PodSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.PodSelector == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.PodSelector) { + } else { + z.EncFallback(x.PodSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { + } else { + z.EncFallback(x.NamespaceSelector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NamespaceSelector == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { + } else { + z.EncFallback(x.NamespaceSelector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "podSelector": + if r.TryDecodeAsNil() { + if x.PodSelector != nil { + x.PodSelector = nil + } + } else { + if x.PodSelector == nil { + x.PodSelector = new(pkg1_v1.LabelSelector) + } + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodSelector) { + } else { + z.DecFallback(x.PodSelector, false) + } + } + case "namespaceSelector": + if r.TryDecodeAsNil() { + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } + } else { + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(pkg1_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { + } else { + z.DecFallback(x.NamespaceSelector, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.PodSelector != nil { + x.PodSelector = nil + } + } else { + if x.PodSelector == nil { + x.PodSelector = new(pkg1_v1.LabelSelector) + } + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(x.PodSelector) { + } else { + z.DecFallback(x.PodSelector, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.NamespaceSelector != nil { + x.NamespaceSelector = nil + } + } else { + if x.NamespaceSelector == nil { + x.NamespaceSelector = new(pkg1_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { + } else { + z.DecFallback(x.NamespaceSelector, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + } else { + yyv1 = make([]CustomMetricTarget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricTarget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricTarget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricTarget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + } + } else { + yyv1 = make([]CustomMetricCurrentStatus, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = CustomMetricCurrentStatus{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []CustomMetricCurrentStatus{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]APIVersion, yyrl1) + } + } else { + yyv1 = make([]APIVersion, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, APIVersion{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = APIVersion{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []APIVersion{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ThirdPartyResource{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ThirdPartyResource, yyrl1) + } + } else { + yyv1 = make([]ThirdPartyResource, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResource{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ThirdPartyResource{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResource{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResource{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ThirdPartyResource{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeploymentCondition(v []DeploymentCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeploymentCondition(v *[]DeploymentCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + } else { + yyv1 = make([]DeploymentCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DeploymentCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DeploymentCondition{}) // var yyz1 DeploymentCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DeploymentCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DeploymentCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 920) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Deployment, yyrl1) + } + } else { + yyv1 = make([]Deployment, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Deployment{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Deployment{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Deployment{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []DaemonSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 872) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]DaemonSet, yyrl1) + } + } else { + yyv1 = make([]DaemonSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DaemonSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, DaemonSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = DaemonSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = DaemonSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []DaemonSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ThirdPartyResourceData{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ThirdPartyResourceData, yyrl1) + } + } else { + yyv1 = make([]ThirdPartyResourceData, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResourceData{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ThirdPartyResourceData{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResourceData{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ThirdPartyResourceData{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ThirdPartyResourceData{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Ingress{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Ingress, yyrl1) + } + } else { + yyv1 = make([]Ingress, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Ingress{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Ingress{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Ingress{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Ingress{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Ingress{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IngressTLS{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]IngressTLS, yyrl1) + } + } else { + yyv1 = make([]IngressTLS, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressTLS{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IngressTLS{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressTLS{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressTLS{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IngressTLS{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IngressRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]IngressRule, yyrl1) + } + } else { + yyv1 = make([]IngressRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IngressRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = IngressRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IngressRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HTTPIngressPath{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HTTPIngressPath, yyrl1) + } + } else { + yyv1 = make([]HTTPIngressPath, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPIngressPath{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HTTPIngressPath{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPIngressPath{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HTTPIngressPath{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HTTPIngressPath{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicaSet{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 856) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicaSet, yyrl1) + } + } else { + yyv1 = make([]ReplicaSet, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSet{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicaSet{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSet{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSet{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicaSet{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceReplicaSetCondition(v []ReplicaSetCondition, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceReplicaSetCondition(v *[]ReplicaSetCondition, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ReplicaSetCondition{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 88) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ReplicaSetCondition, yyrl1) + } + } else { + yyv1 = make([]ReplicaSetCondition, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSetCondition{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ReplicaSetCondition{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSetCondition{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ReplicaSetCondition{}) // var yyz1 ReplicaSetCondition + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ReplicaSetCondition{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ReplicaSetCondition{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicev1_Capability(v []pkg4_v1.Capability, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yysf2 := &yyv1 + yysf2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg4_v1.Capability, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []pkg4_v1.Capability{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]pkg4_v1.Capability, yyrl1) + } + } else { + yyv1 = make([]pkg4_v1.Capability, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 pkg4_v1.Capability + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []pkg4_v1.Capability{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yyv1.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []FSType{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]FSType, yyrl1) + } + } else { + yyv1 = make([]FSType, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, "") + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, "") // var yyz1 FSType + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = "" + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []FSType{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []HostPortRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]HostPortRange, yyrl1) + } + } else { + yyv1 = make([]HostPortRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostPortRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, HostPortRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostPortRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = HostPortRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []HostPortRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []IDRange{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]IDRange, yyrl1) + } + } else { + yyv1 = make([]IDRange, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IDRange{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, IDRange{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = IDRange{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = IDRange{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []IDRange{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodSecurityPolicy{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 552) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodSecurityPolicy, yyrl1) + } + } else { + yyv1 = make([]PodSecurityPolicy, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodSecurityPolicy{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodSecurityPolicy{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodSecurityPolicy{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodSecurityPolicy{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodSecurityPolicy{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyIngressRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicyIngressRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyIngressRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyIngressRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyIngressRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyIngressRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyPort{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicyPort, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicyPort, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPort{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyPort{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPort{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPort{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyPort{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicyPeer{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicyPeer, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicyPeer, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPeer{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicyPeer{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPeer{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicyPeer{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicyPeer{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []NetworkPolicy{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]NetworkPolicy, yyrl1) + } + } else { + yyv1 = make([]NetworkPolicy, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicy{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, NetworkPolicy{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicy{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = NetworkPolicy{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []NetworkPolicy{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go new file mode 100644 index 000000000..43dd2a9b0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types.go @@ -0,0 +1,1147 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/pkg/api/v1" +) + +// describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +genclient=true +// +noMethods=true + +// represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta `json:",inline"` +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource +// types to the API. It consists of one or more Versions of the api. +type ThirdPartyResource struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Description is the description of this object. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` + + // Versions are versions for this third party object + // +optional + Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` +} + +// ThirdPartyResourceList is a list of ThirdPartyResources. +type ThirdPartyResourceList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ThirdPartyResources. + Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// An APIVersion represents a single concrete version of an object model. +type APIVersion struct { + // Name of this version (e.g. 'v1'). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// An internal object, used for versioned storage in etcd. Not exposed to the end user. +type ThirdPartyResourceData struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the raw JSON data for this data. + // +optional + Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` +} + +// +genclient=true + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Once autoRollback is + // implemented, the deployment controller will automatically rollback failed + // deployments. Note that progress will not be estimated during the time a + // deployment is paused. This is not set by default. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollbck to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as DeploymentStrategy.RollingUpdate. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` +} + +// +genclient=true + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. +type ThirdPartyResourceDataList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ThirdpartyResourceData. + Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current state of the Ingress. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Ingress. + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http://<host>/<path>?<searchpart> -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` + + // Specifies the port of the referenced service. + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` +} + +// +genclient=true + +// ReplicaSet represents the configuration of a ReplicaSet. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: http://kubernetes.io/docs/user-guide/replication-controller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient=true +// +nonNamespaced=true + +// Pod Security Policy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// Pod Security Policy Spec defines the policy enforced. +type PodSecurityPolicySpec struct { + // privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +optional + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // +optional + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` + // volumes is a white list of allowed volume plugins. Empty indicates that all plugins + // may be used. + // +optional + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` + // seLinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` +} + +// FS Type gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + All FSType = "*" +) + +// Host Port Range defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // min is the start of the range, inclusive. + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// SELinux Strategy Options defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // type is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` + // seLinuxOptions required to run as; required for MustRunAs + // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // +optional + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security Context. +type SELinuxStrategy string + +const ( + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` + // Ranges are the allowed ranges of uids that may be used. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// ID Range provides a min/max of an allowed range of IDs. +type IDRange struct { + // Min is the start of the range, inclusive. + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` + // Max is the end of the range, inclusive. + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// Security Context. +type RunAsUserStrategy string + +const ( + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// Pod Security Policy List is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type NetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not affect ingress isolation. + // If this field is present and contains at least one rule, this policy allows any traffic + // which matches at least one of the ingress rules in this list. + // +optional + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` +} + +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is not provided, this rule matches all ports (traffic not restricted by port). + // If this field is empty, this rule matches no ports (no traffic matches). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is not provided, this rule matches all sources (traffic not restricted by source). + // If this field is empty, this rule matches no sources (no traffic matches). + // If this field is present and contains at least on item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. + // +optional + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +type NetworkPolicyPort struct { + // Optional. The protocol (TCP or UDP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/kubernetes/pkg/api/v1.Protocol"` + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` +} + +type NetworkPolicyPeer struct { + // Exactly one of the following must be specified. + + // This is a label selector which selects Pods in this namespace. + // This field follows standard label selector semantics. + // If not provided, this selector selects no pods. + // If present but empty, this selector selects all pods in this namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // Selects Namespaces using cluster scoped-labels. This + // matches all pods in all namespaces selected by this label selector. + // This field follows standard label selector semantics. + // If omitted, this selector selects no namespaces. + // If present but empty, this selector selects all namespaces. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` +} + +// Network Policy List is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..587615590 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,627 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIVersion = map[string]string{ + "": "An APIVersion represents a single concrete version of an object model.", + "name": "Name of this version (e.g. 'v1').", +} + +func (APIVersion) SwaggerDoc() map[string]string { + return map_APIVersion +} + +var map_CustomMetricCurrentStatus = map[string]string{ + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { + return map_CustomMetricCurrentStatus +} + +var map_CustomMetricTarget = map[string]string{ + "": "Alpha-level support for Custom Metrics in HPA (as annotations).", + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricTarget) SwaggerDoc() map[string]string { + return map_CustomMetricTarget +} + +var map_DaemonSet = map[string]string{ + "": "DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "templateGeneration": "A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", + "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", + "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Once autoRollback is implemented, the deployment controller will automatically rollback failed deployments. Note that progress will not be estimated during the time a deployment is paused. This is not set by default.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", + "conditions": "Represents the latest available observations of a deployment's current state.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_HTTPIngressPath = map[string]string{ + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", +} + +func (HTTPIngressPath) SwaggerDoc() map[string]string { + return map_HTTPIngressPath +} + +var map_HTTPIngressRuleValue = map[string]string{ + "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://<host>/<path>?<searchpart> -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "paths": "A collection of paths that map requests to backends.", +} + +func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { + return map_HTTPIngressRuleValue +} + +var map_HostPortRange = map[string]string{ + "": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (HostPortRange) SwaggerDoc() map[string]string { + return map_HostPortRange +} + +var map_IDRange = map[string]string{ + "": "ID Range provides a min/max of an allowed range of IDs.", + "min": "Min is the start of the range, inclusive.", + "max": "Max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_Ingress = map[string]string{ + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressBackend = map[string]string{ + "": "IngressBackend describes all endpoints for a given service and port.", + "serviceName": "Specifies the name of the referenced service.", + "servicePort": "Specifies the port of the referenced service.", +} + +func (IngressBackend) SwaggerDoc() map[string]string { + return map_IngressBackend +} + +var map_IngressList = map[string]string{ + "": "IngressList is a collection of Ingress.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of Ingress.", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressRule = map[string]string{ + "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", +} + +func (IngressRule) SwaggerDoc() map[string]string { + return map_IngressRule +} + +var map_IngressRuleValue = map[string]string{ + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", +} + +func (IngressRuleValue) SwaggerDoc() map[string]string { + return map_IngressRuleValue +} + +var map_IngressSpec = map[string]string{ + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "": "IngressStatus describe the current state of the Ingress.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_IngressTLS = map[string]string{ + "": "IngressTLS describes the transport layer security associated with an Ingress.", + "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", +} + +func (IngressTLS) SwaggerDoc() map[string]string { + return map_IngressTLS +} + +var map_NetworkPolicy = map[string]string{ + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior for this NetworkPolicy.", +} + +func (NetworkPolicy) SwaggerDoc() map[string]string { + return map_NetworkPolicy +} + +var map_NetworkPolicyIngressRule = map[string]string{ + "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", +} + +func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyIngressRule +} + +var map_NetworkPolicyList = map[string]string{ + "": "Network Policy List is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (NetworkPolicyList) SwaggerDoc() map[string]string { + return map_NetworkPolicyList +} + +var map_NetworkPolicyPeer = map[string]string{ + "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.", + "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.", +} + +func (NetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_NetworkPolicyPeer +} + +var map_NetworkPolicyPort = map[string]string{ + "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", +} + +func (NetworkPolicyPort) SwaggerDoc() map[string]string { + return map_NetworkPolicyPort +} + +var map_NetworkPolicySpec = map[string]string{ + "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not affect ingress isolation. If this field is present and contains at least one rule, this policy allows any traffic which matches at least one of the ingress rules in this list.", +} + +func (NetworkPolicySpec) SwaggerDoc() map[string]string { + return map_NetworkPolicySpec +} + +var map_PodSecurityPolicy = map[string]string{ + "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "spec defines the policy enforced.", +} + +func (PodSecurityPolicy) SwaggerDoc() map[string]string { + return map_PodSecurityPolicy +} + +var map_PodSecurityPolicyList = map[string]string{ + "": "Pod Security Policy List is a list of PodSecurityPolicy objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (PodSecurityPolicyList) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyList +} + +var map_PodSecurityPolicySpec = map[string]string{ + "": "Pod Security Policy Spec defines the policy enforced.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", +} + +func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySpec +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet represents the configuration of a ReplicaSet.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: http://kubernetes.io/docs/user-guide/replication-controller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_ReplicationControllerDummy = map[string]string{ + "": "Dummy definition", +} + +func (ReplicationControllerDummy) SwaggerDoc() map[string]string { + return map_ReplicationControllerDummy +} + +var map_RollbackConfig = map[string]string{ + "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.", + "rule": "Rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "Ranges are the allowed ranges of uids that may be used.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_SELinuxStrategyOptions = map[string]string{ + "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", + "rule": "type is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context", +} + +func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxStrategyOptions +} + +var map_Scale = map[string]string{ + "": "represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +var map_ThirdPartyResource = map[string]string{ + "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", + "metadata": "Standard object metadata", + "description": "Description is the description of this object.", + "versions": "Versions are versions for this third party object", +} + +func (ThirdPartyResource) SwaggerDoc() map[string]string { + return map_ThirdPartyResource +} + +var map_ThirdPartyResourceData = map[string]string{ + "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", + "metadata": "Standard object metadata.", + "data": "Data is the raw JSON data for this data.", +} + +func (ThirdPartyResourceData) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceData +} + +var map_ThirdPartyResourceDataList = map[string]string{ + "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of ThirdpartyResourceData.", +} + +func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceDataList +} + +var map_ThirdPartyResourceList = map[string]string{ + "": "ThirdPartyResourceList is a list of ThirdPartyResources.", + "metadata": "Standard list metadata.", + "items": "Items is the list of ThirdPartyResources.", +} + +func (ThirdPartyResourceList) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..2baeaa676 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -0,0 +1,1628 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api" + api_v1 "k8s.io/client-go/pkg/api/v1" + extensions "k8s.io/client-go/pkg/apis/extensions" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_APIVersion_To_extensions_APIVersion, + Convert_extensions_APIVersion_To_v1beta1_APIVersion, + Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus, + Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus, + Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList, + Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList, + Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget, + Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget, + Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList, + Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList, + Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, + Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, + Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, + Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, + Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, + Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, + Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, + Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, + Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy, + Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy, + Convert_v1beta1_Deployment_To_extensions_Deployment, + Convert_extensions_Deployment_To_v1beta1_Deployment, + Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition, + Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition, + Convert_v1beta1_DeploymentList_To_extensions_DeploymentList, + Convert_extensions_DeploymentList_To_v1beta1_DeploymentList, + Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback, + Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback, + Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, + Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, + Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, + Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, + Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, + Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, + Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions, + Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions, + Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, + Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, + Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, + Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, + Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, + Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, + Convert_v1beta1_IDRange_To_extensions_IDRange, + Convert_extensions_IDRange_To_v1beta1_IDRange, + Convert_v1beta1_Ingress_To_extensions_Ingress, + Convert_extensions_Ingress_To_v1beta1_Ingress, + Convert_v1beta1_IngressBackend_To_extensions_IngressBackend, + Convert_extensions_IngressBackend_To_v1beta1_IngressBackend, + Convert_v1beta1_IngressList_To_extensions_IngressList, + Convert_extensions_IngressList_To_v1beta1_IngressList, + Convert_v1beta1_IngressRule_To_extensions_IngressRule, + Convert_extensions_IngressRule_To_v1beta1_IngressRule, + Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, + Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, + Convert_v1beta1_IngressSpec_To_extensions_IngressSpec, + Convert_extensions_IngressSpec_To_v1beta1_IngressSpec, + Convert_v1beta1_IngressStatus_To_extensions_IngressStatus, + Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, + Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, + Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, + Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, + Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, + Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule, + Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule, + Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList, + Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList, + Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer, + Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer, + Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort, + Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort, + Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec, + Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec, + Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy, + Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy, + Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList, + Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList, + Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec, + Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec, + Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, + Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, + Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition, + Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition, + Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, + Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, + Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, + Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, + Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, + Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, + Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, + Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, + Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, + Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, + Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet, + Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet, + Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, + Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, + Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, + Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, + Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, + Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, + Convert_v1beta1_Scale_To_extensions_Scale, + Convert_extensions_Scale_To_v1beta1_Scale, + Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, + Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, + Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, + Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, + Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, + Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, + Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, + Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, + Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, + Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, + Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, + Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, + Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, + Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, + ) +} + +func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { + return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) +} + +func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { + return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { + out.Name = in.Name + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) +} + +func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { + out.Name = in.Name + out.CurrentValue = in.CurrentValue + return nil +} + +func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { + out.Items = *(*[]extensions.CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) +} + +func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { + if in.Items == nil { + out.Items = make([]CustomMetricCurrentStatus, 0) + } else { + out.Items = *(*[]CustomMetricCurrentStatus)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { + out.Name = in.Name + out.TargetValue = in.TargetValue + return nil +} + +func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) +} + +func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { + out.Name = in.Name + out.TargetValue = in.TargetValue + return nil +} + +func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) +} + +func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { + out.Items = *(*[]extensions.CustomMetricTarget)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { + return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) +} + +func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { + if in.Items == nil { + out.Items = make([]CustomMetricTarget, 0) + } else { + out.Items = *(*[]CustomMetricTarget)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { + return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) +} + +func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) +} + +func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { + return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.DaemonSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) +} + +func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]DaemonSet, 0) + } + return nil +} + +func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.TemplateGeneration = in.TemplateGeneration + return nil +} + +func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) +} + +func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.TemplateGeneration = in.TemplateGeneration + return nil +} + +func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable + return nil +} + +func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) +} + +func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { + out.CurrentNumberScheduled = in.CurrentNumberScheduled + out.NumberMisscheduled = in.NumberMisscheduled + out.DesiredNumberScheduled = in.DesiredNumberScheduled + out.NumberReady = in.NumberReady + out.ObservedGeneration = in.ObservedGeneration + out.UpdatedNumberScheduled = in.UpdatedNumberScheduled + out.NumberAvailable = in.NumberAvailable + out.NumberUnavailable = in.NumberUnavailable + return nil +} + +func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) +} + +func autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = extensions.DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDaemonSet) + if err := Convert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in *DaemonSetUpdateStrategy, out *extensions.DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_v1beta1_DaemonSetUpdateStrategy_To_extensions_DaemonSetUpdateStrategy(in, out, s) +} + +func autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { + out.Type = DaemonSetUpdateStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + if err := Convert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func Convert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in *extensions.DaemonSetUpdateStrategy, out *DaemonSetUpdateStrategy, s conversion.Scope) error { + return autoConvert_extensions_DaemonSetUpdateStrategy_To_v1beta1_DaemonSetUpdateStrategy(in, out, s) +} + +func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { + return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) +} + +func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { + return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) +} + +func autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { + out.Type = extensions.DeploymentConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastUpdateTime = in.LastUpdateTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in *DeploymentCondition, out *extensions.DeploymentCondition, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentCondition_To_extensions_DeploymentCondition(in, out, s) +} + +func autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { + out.Type = DeploymentConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + out.LastUpdateTime = in.LastUpdateTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in *extensions.DeploymentCondition, out *DeploymentCondition, s conversion.Scope) error { + return autoConvert_extensions_DeploymentCondition_To_v1beta1_DeploymentCondition(in, out, s) +} + +func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.Deployment, len(*in)) + for i := range *in { + if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) +} + +func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]Deployment, 0) + } + return nil +} + +func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { + return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) +} + +func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { + out.Name = in.Name + out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) + if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) +} + +func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { + out.Name = in.Name + out.UpdatedAnnotations = *(*map[string]string)(unsafe.Pointer(&in.UpdatedAnnotations)) + if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { + return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) +} + +func autoConvert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.Paused = in.Paused + out.RollbackTo = (*extensions.RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) + return nil +} + +func autoConvert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.RevisionHistoryLimit = (*int32)(unsafe.Pointer(in.RevisionHistoryLimit)) + out.Paused = in.Paused + out.RollbackTo = (*RollbackConfig)(unsafe.Pointer(in.RollbackTo)) + out.ProgressDeadlineSeconds = (*int32)(unsafe.Pointer(in.ProgressDeadlineSeconds)) + return nil +} + +func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + out.Conditions = *(*[]extensions.DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { + return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) +} + +func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.Replicas = in.Replicas + out.UpdatedReplicas = in.UpdatedReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.UnavailableReplicas = in.UnavailableReplicas + out.Conditions = *(*[]DeploymentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { + return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) +} + +func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { + out.Type = extensions.DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(extensions.RollingUpdateDeployment) + if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { + out.Type = DeploymentStrategyType(in.Type) + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { + return err + } + } else { + out.RollingUpdate = nil + } + return nil +} + +func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.FSGroupStrategyType(in.Rule) + out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in, out, s) +} + +func autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { + out.Rule = FSGroupStrategyType(in.Rule) + out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { + out.Path = in.Path + if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { + return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s) +} + +func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { + out.Path = in.Path + if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s) +} + +func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { + out.Paths = *(*[]extensions.HTTPIngressPath)(unsafe.Pointer(&in.Paths)) + return nil +} + +func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s) +} + +func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + if in.Paths == nil { + out.Paths = make([]HTTPIngressPath, 0) + } else { + out.Paths = *(*[]HTTPIngressPath)(unsafe.Pointer(&in.Paths)) + } + return nil +} + +func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) +} + +func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { + out.Min = int(in.Min) + out.Max = int(in.Max) + return nil +} + +func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { + return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) +} + +func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { + out.Min = int32(in.Min) + out.Max = int32(in.Max) + return nil +} + +func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { + return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) +} + +func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { + return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s) +} + +func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { + out.Min = in.Min + out.Max = in.Max + return nil +} + +func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { + return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s) +} + +func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { + return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) +} + +func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { + return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) +} + +func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { + out.ServiceName = in.ServiceName + out.ServicePort = in.ServicePort + return nil +} + +func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { + return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) +} + +func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { + out.ServiceName = in.ServiceName + out.ServicePort = in.ServicePort + return nil +} + +func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { + return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) +} + +func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.Ingress)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { + return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) +} + +func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Ingress, 0) + } else { + out.Items = *(*[]Ingress)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { + return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) +} + +func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { + out.Host = in.Host + if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { + return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) +} + +func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { + out.Host = in.Host + if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { + return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) +} + +func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { + out.HTTP = (*extensions.HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) + return nil +} + +func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { + return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s) +} + +func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { + out.HTTP = (*HTTPIngressRuleValue)(unsafe.Pointer(in.HTTP)) + return nil +} + +func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { + return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s) +} + +func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { + out.Backend = (*extensions.IngressBackend)(unsafe.Pointer(in.Backend)) + out.TLS = *(*[]extensions.IngressTLS)(unsafe.Pointer(&in.TLS)) + out.Rules = *(*[]extensions.IngressRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { + return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) +} + +func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { + out.Backend = (*IngressBackend)(unsafe.Pointer(in.Backend)) + out.TLS = *(*[]IngressTLS)(unsafe.Pointer(&in.TLS)) + out.Rules = *(*[]IngressRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { + return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) +} + +func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { + return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) +} + +func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { + return err + } + return nil +} + +func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { + return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) +} + +func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { + out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) + out.SecretName = in.SecretName + return nil +} + +func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { + return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) +} + +func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { + out.Hosts = *(*[]string)(unsafe.Pointer(&in.Hosts)) + out.SecretName = in.SecretName + return nil +} + +func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { + return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s) +} + +func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { + out.Ports = *(*[]extensions.NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) + out.From = *(*[]extensions.NetworkPolicyPeer)(unsafe.Pointer(&in.From)) + return nil +} + +func Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { + out.Ports = *(*[]NetworkPolicyPort)(unsafe.Pointer(&in.Ports)) + out.From = *(*[]NetworkPolicyPeer)(unsafe.Pointer(&in.From)) + return nil +} + +func Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.NetworkPolicy)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]NetworkPolicy, 0) + } else { + out.Items = *(*[]NetworkPolicy)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { + out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + return nil +} + +func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { + out.PodSelector = (*v1.LabelSelector)(unsafe.Pointer(in.PodSelector)) + out.NamespaceSelector = (*v1.LabelSelector)(unsafe.Pointer(in.NamespaceSelector)) + return nil +} + +func Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { + out.Protocol = (*api.Protocol)(unsafe.Pointer(in.Protocol)) + out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) + return nil +} + +func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in, out, s) +} + +func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { + out.Protocol = (*api_v1.Protocol)(unsafe.Pointer(in.Protocol)) + out.Port = (*intstr.IntOrString)(unsafe.Pointer(in.Port)) + return nil +} + +func Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s) +} + +func autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { + out.PodSelector = in.PodSelector + out.Ingress = *(*[]extensions.NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { + return autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in, out, s) +} + +func autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { + out.PodSelector = in.PodSelector + out.Ingress = *(*[]NetworkPolicyIngressRule)(unsafe.Pointer(&in.Ingress)) + return nil +} + +func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { + return autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.PodSecurityPolicy, len(*in)) + for i := range *in { + if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PodSecurityPolicy, 0) + } + return nil +} + +func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s) +} + +func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { + out.Privileged = in.Privileged + out.DefaultAddCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]api.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.Volumes = *(*[]extensions.FSType)(unsafe.Pointer(&in.Volumes)) + out.HostNetwork = in.HostNetwork + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]extensions.HostPortRange, len(*in)) + for i := range *in { + if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.HostPorts = nil + } + out.HostPID = in.HostPID + out.HostIPC = in.HostIPC + if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { + return err + } + if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + return err + } + if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { + return err + } + if err := Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { + return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s) +} + +func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { + out.Privileged = in.Privileged + out.DefaultAddCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.DefaultAddCapabilities)) + out.RequiredDropCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.RequiredDropCapabilities)) + out.AllowedCapabilities = *(*[]api_v1.Capability)(unsafe.Pointer(&in.AllowedCapabilities)) + out.Volumes = *(*[]FSType)(unsafe.Pointer(&in.Volumes)) + out.HostNetwork = in.HostNetwork + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + for i := range *in { + if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.HostPorts = nil + } + out.HostPID = in.HostPID + out.HostIPC = in.HostIPC + if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { + return err + } + if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { + return err + } + if err := Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { + return err + } + if err := Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { + return err + } + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + return nil +} + +func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { + return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) +} + +func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { + out.Type = extensions.ReplicaSetConditionType(in.Type) + out.Status = api.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in *ReplicaSetCondition, out *extensions.ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetCondition_To_extensions_ReplicaSetCondition(in, out, s) +} + +func autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { + out.Type = ReplicaSetConditionType(in.Type) + out.Status = api_v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +func Convert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in *extensions.ReplicaSetCondition, out *ReplicaSetCondition, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetCondition_To_v1beta1_ReplicaSetCondition(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]extensions.ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) +} + +func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]ReplicaSet, 0) + } + return nil +} + +func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) +} + +func autoConvert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { + if err := v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { + if err := v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]extensions.ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) +} + +func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]ReplicaSetCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { + return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) +} + +func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { + return nil +} + +func Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { + return autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s) +} + +func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + return nil +} + +func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { + return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) +} + +func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { + out.Revision = in.Revision + return nil +} + +func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { + return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) +} + +func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { + out.Revision = in.Revision + return nil +} + +func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { + return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) +} + +func autoConvert_v1beta1_RollingUpdateDaemonSet_To_extensions_RollingUpdateDaemonSet(in *RollingUpdateDaemonSet, out *extensions.RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDaemonSet_To_v1beta1_RollingUpdateDaemonSet(in *extensions.RollingUpdateDaemonSet, out *RollingUpdateDaemonSet, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/util/intstr.IntOrString vs k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { + // WARNING: in.MaxUnavailable requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + // WARNING: in.MaxSurge requires manual conversion: inconvertible types (k8s.io/apimachinery/pkg/util/intstr.IntOrString vs *k8s.io/apimachinery/pkg/util/intstr.IntOrString) + return nil +} + +func autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.RunAsUserStrategy(in.Rule) + out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in, out, s) +} + +func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { + out.Rule = RunAsUserStrategy(in.Rule) + out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.SELinuxStrategy(in.Rule) + out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + return nil +} + +func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in, out, s) +} + +func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { + out.Rule = SELinuxStrategy(in.Rule) + out.SELinuxOptions = (*api_v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + return nil +} + +func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) +} + +func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { + return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) +} + +func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { + return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} + +func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { + out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) + out.Ranges = *(*[]extensions.IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { + return autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in, out, s) +} + +func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { + out.Rule = SupplementalGroupsStrategyType(in.Rule) + out.Ranges = *(*[]IDRange)(unsafe.Pointer(&in.Ranges)) + return nil +} + +func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { + return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Description = in.Description + out.Versions = *(*[]extensions.APIVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Description = in.Description + out.Versions = *(*[]APIVersion)(unsafe.Pointer(&in.Versions)) + return nil +} + +func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ThirdPartyResourceData, 0) + } else { + out.Items = *(*[]ThirdPartyResourceData)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) +} + +func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]extensions.ThirdPartyResource)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { + return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s) +} + +func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ThirdPartyResource, 0) + } else { + out.Items = *(*[]ThirdPartyResource)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { + return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a8088c8fb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1087 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IDRange, InType: reflect.TypeOf(&IDRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Ingress, InType: reflect.TypeOf(&Ingress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressList, InType: reflect.TypeOf(&IngressList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, + ) +} + +func DeepCopy_v1beta1_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersion) + out := out.(*APIVersion) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatus) + out := out.(*CustomMetricCurrentStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatusList) + out := out.(*CustomMetricCurrentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTarget) + out := out.(*CustomMetricTarget) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTargetList) + out := out.(*CustomMetricTargetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSet) + out := out.(*DaemonSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetList) + out := out.(*DaemonSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetSpec) + out := out.(*DaemonSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetStatus) + out := out.(*DaemonSetStatus) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + if err := DeepCopy_v1beta1_RollingUpdateDaemonSet(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FSGroupStrategyOptions) + out := out.(*FSGroupStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressPath) + out := out.(*HTTPIngressPath) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressRuleValue) + out := out.(*HTTPIngressRuleValue) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPortRange) + out := out.(*HostPortRange) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_IDRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IDRange) + out := out.(*IDRange) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Ingress) + out := out.(*Ingress) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_IngressSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_IngressStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressBackend) + out := out.(*IngressBackend) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressList) + out := out.(*IngressList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Ingress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRule) + out := out.(*IngressRule) + *out = *in + if err := DeepCopy_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRuleValue) + out := out.(*IngressRuleValue) + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_v1beta1_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressSpec) + out := out.(*IngressSpec) + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressStatus) + out := out.(*IngressStatus) + *out = *in + if err := api_v1.DeepCopy_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressTLS) + out := out.(*IngressTLS) + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicy) + out := out.(*NetworkPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyIngressRule) + out := out.(*NetworkPolicyIngressRule) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyList) + out := out.(*NetworkPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPeer) + out := out.(*NetworkPolicyPeer) + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPort) + out := out.(*NetworkPolicyPort) + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(api_v1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicySpec) + out := out.(*NetworkPolicySpec) + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { + return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicy) + out := out.(*PodSecurityPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicyList) + out := out.(*PodSecurityPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicySpec) + out := out.(*PodSecurityPolicySpec) + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api_v1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + if err := DeepCopy_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSet) + out := out.(*ReplicaSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetCondition) + out := out.(*ReplicaSetCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetList) + out := out.(*ReplicaSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetSpec) + out := out.(*ReplicaSetSpec) + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api_v1.DeepCopy_v1_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetStatus) + out := out.(*ReplicaSetStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerDummy) + out := out.(*ReplicationControllerDummy) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RunAsUserStrategyOptions) + out := out.(*RunAsUserStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxStrategyOptions) + out := out.(*SELinuxStrategyOptions) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api_v1.SELinuxOptions) + **out = **in + } + return nil + } +} + +func DeepCopy_v1beta1_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SupplementalGroupsStrategyOptions) + out := out.(*SupplementalGroupsStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResource) + out := out.(*ThirdPartyResource) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersion, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceData) + out := out.(*ThirdPartyResourceData) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceDataList) + out := out.(*ThirdPartyResourceDataList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceList) + out := out.(*ThirdPartyResourceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResource, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..770faa513 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1/zz_generated.defaults.go @@ -0,0 +1,475 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&DaemonSet{}, func(obj interface{}) { SetObjectDefaults_DaemonSet(obj.(*DaemonSet)) }) + scheme.AddTypeDefaultingFunc(&DaemonSetList{}, func(obj interface{}) { SetObjectDefaults_DaemonSetList(obj.(*DaemonSetList)) }) + scheme.AddTypeDefaultingFunc(&Deployment{}, func(obj interface{}) { SetObjectDefaults_Deployment(obj.(*Deployment)) }) + scheme.AddTypeDefaultingFunc(&DeploymentList{}, func(obj interface{}) { SetObjectDefaults_DeploymentList(obj.(*DeploymentList)) }) + scheme.AddTypeDefaultingFunc(&NetworkPolicy{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicy(obj.(*NetworkPolicy)) }) + scheme.AddTypeDefaultingFunc(&NetworkPolicyList{}, func(obj interface{}) { SetObjectDefaults_NetworkPolicyList(obj.(*NetworkPolicyList)) }) + scheme.AddTypeDefaultingFunc(&ReplicaSet{}, func(obj interface{}) { SetObjectDefaults_ReplicaSet(obj.(*ReplicaSet)) }) + scheme.AddTypeDefaultingFunc(&ReplicaSetList{}, func(obj interface{}) { SetObjectDefaults_ReplicaSetList(obj.(*ReplicaSetList)) }) + return nil +} + +func SetObjectDefaults_DaemonSet(in *DaemonSet) { + SetDefaults_DaemonSet(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DaemonSetList(in *DaemonSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_DaemonSet(a) + } +} + +func SetObjectDefaults_Deployment(in *Deployment) { + SetDefaults_Deployment(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_DeploymentList(in *DeploymentList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Deployment(a) + } +} + +func SetObjectDefaults_NetworkPolicy(in *NetworkPolicy) { + SetDefaults_NetworkPolicy(in) +} + +func SetObjectDefaults_NetworkPolicyList(in *NetworkPolicyList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_NetworkPolicy(a) + } +} + +func SetObjectDefaults_ReplicaSet(in *ReplicaSet) { + SetDefaults_ReplicaSet(in) + v1.SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + v1.SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + v1.SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + v1.SetDefaults_ResourceList(&a.Resources.Limits) + v1.SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + v1.SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + v1.SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + v1.SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_ReplicaSetList(in *ReplicaSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicaSet(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go new file mode 100644 index 000000000..93409cb61 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/extensions/zz_generated.deepcopy.go @@ -0,0 +1,1059 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package extensions + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_APIVersion, InType: reflect.TypeOf(&APIVersion{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatus, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricCurrentStatusList, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTarget, InType: reflect.TypeOf(&CustomMetricTarget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_CustomMetricTargetList, InType: reflect.TypeOf(&CustomMetricTargetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSet, InType: reflect.TypeOf(&DaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetList, InType: reflect.TypeOf(&DaemonSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetSpec, InType: reflect.TypeOf(&DaemonSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetStatus, InType: reflect.TypeOf(&DaemonSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DaemonSetUpdateStrategy, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Deployment, InType: reflect.TypeOf(&Deployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentCondition, InType: reflect.TypeOf(&DeploymentCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentList, InType: reflect.TypeOf(&DeploymentList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentRollback, InType: reflect.TypeOf(&DeploymentRollback{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentSpec, InType: reflect.TypeOf(&DeploymentSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStatus, InType: reflect.TypeOf(&DeploymentStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_DeploymentStrategy, InType: reflect.TypeOf(&DeploymentStrategy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_FSGroupStrategyOptions, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressPath, InType: reflect.TypeOf(&HTTPIngressPath{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HTTPIngressRuleValue, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_HostPortRange, InType: reflect.TypeOf(&HostPortRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IDRange, InType: reflect.TypeOf(&IDRange{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Ingress, InType: reflect.TypeOf(&Ingress{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressBackend, InType: reflect.TypeOf(&IngressBackend{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressList, InType: reflect.TypeOf(&IngressList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRule, InType: reflect.TypeOf(&IngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressRuleValue, InType: reflect.TypeOf(&IngressRuleValue{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressSpec, InType: reflect.TypeOf(&IngressSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressStatus, InType: reflect.TypeOf(&IngressStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_IngressTLS, InType: reflect.TypeOf(&IngressTLS{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicy, InType: reflect.TypeOf(&NetworkPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyIngressRule, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyList, InType: reflect.TypeOf(&NetworkPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPeer, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicyPort, InType: reflect.TypeOf(&NetworkPolicyPort{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_NetworkPolicySpec, InType: reflect.TypeOf(&NetworkPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicy, InType: reflect.TypeOf(&PodSecurityPolicy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicyList, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_PodSecurityPolicySpec, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSet, InType: reflect.TypeOf(&ReplicaSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetCondition, InType: reflect.TypeOf(&ReplicaSetCondition{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetList, InType: reflect.TypeOf(&ReplicaSetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetSpec, InType: reflect.TypeOf(&ReplicaSetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicaSetStatus, InType: reflect.TypeOf(&ReplicaSetStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ReplicationControllerDummy, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollbackConfig, InType: reflect.TypeOf(&RollbackConfig{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDaemonSet, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RollingUpdateDeployment, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_RunAsUserStrategyOptions, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SELinuxStrategyOptions, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Scale, InType: reflect.TypeOf(&Scale{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceDataList, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceList, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, + ) +} + +func DeepCopy_extensions_APIVersion(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*APIVersion) + out := out.(*APIVersion) + *out = *in + return nil + } +} + +func DeepCopy_extensions_CustomMetricCurrentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatus) + out := out.(*CustomMetricCurrentStatus) + *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_CustomMetricCurrentStatusList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricCurrentStatusList) + out := out.(*CustomMetricCurrentStatusList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_CustomMetricTarget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTarget) + out := out.(*CustomMetricTarget) + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_CustomMetricTargetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*CustomMetricTargetList) + out := out.(*CustomMetricTargetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSet) + out := out.(*DaemonSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_DaemonSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_DaemonSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetList) + out := out.(*DaemonSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_DaemonSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DaemonSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetSpec) + out := out.(*DaemonSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DaemonSetUpdateStrategy(&in.UpdateStrategy, &out.UpdateStrategy, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_DaemonSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetStatus) + out := out.(*DaemonSetStatus) + *out = *in + return nil + } +} + +func DeepCopy_extensions_DaemonSetUpdateStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DaemonSetUpdateStrategy) + out := out.(*DaemonSetUpdateStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_Deployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Deployment) + out := out.(*Deployment) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_DeploymentSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_DeploymentCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentCondition) + out := out.(*DeploymentCondition) + *out = *in + out.LastUpdateTime = in.LastUpdateTime.DeepCopy() + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_DeploymentList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentList) + out := out.(*DeploymentList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_Deployment(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DeploymentRollback(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentRollback) + out := out.(*DeploymentRollback) + *out = *in + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_extensions_DeploymentSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentSpec) + out := out.(*DeploymentSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + if err := DeepCopy_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, c); err != nil { + return err + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_DeploymentStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStatus) + out := out.(*DeploymentStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_DeploymentCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_DeploymentStrategy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*DeploymentStrategy) + out := out.(*DeploymentStrategy) + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_FSGroupStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*FSGroupStrategyOptions) + out := out.(*FSGroupStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_HTTPIngressPath(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressPath) + out := out.(*HTTPIngressPath) + *out = *in + return nil + } +} + +func DeepCopy_extensions_HTTPIngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HTTPIngressRuleValue) + out := out.(*HTTPIngressRuleValue) + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_HostPortRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*HostPortRange) + out := out.(*HostPortRange) + *out = *in + return nil + } +} + +func DeepCopy_extensions_IDRange(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IDRange) + out := out.(*IDRange) + *out = *in + return nil + } +} + +func DeepCopy_extensions_Ingress(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Ingress) + out := out.(*Ingress) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_IngressSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_IngressStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_IngressBackend(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressBackend) + out := out.(*IngressBackend) + *out = *in + return nil + } +} + +func DeepCopy_extensions_IngressList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressList) + out := out.(*IngressList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_Ingress(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_IngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRule) + out := out.(*IngressRule) + *out = *in + if err := DeepCopy_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_IngressRuleValue(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressRuleValue) + out := out.(*IngressRuleValue) + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { + return err + } + } + return nil + } +} + +func DeepCopy_extensions_IngressSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressSpec) + out := out.(*IngressSpec) + *out = *in + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_IngressTLS(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_IngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_IngressStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressStatus) + out := out.(*IngressStatus) + *out = *in + if err := api.DeepCopy_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_IngressTLS(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*IngressTLS) + out := out.(*IngressTLS) + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicy) + out := out.(*NetworkPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyIngressRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyIngressRule) + out := out.(*NetworkPolicyIngressRule) + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyList) + out := out.(*NetworkPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyPeer(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPeer) + out := out.(*NetworkPolicyPeer) + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicyPort(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicyPort) + out := out.(*NetworkPolicyPort) + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(api.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_NetworkPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*NetworkPolicySpec) + out := out.(*NetworkPolicySpec) + *out = *in + if newVal, err := c.DeepCopy(&in.PodSelector); err != nil { + return err + } else { + out.PodSelector = *newVal.(*v1.LabelSelector) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_PodSecurityPolicy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicy) + out := out.(*PodSecurityPolicy) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_PodSecurityPolicyList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicyList) + out := out.(*PodSecurityPolicyList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_PodSecurityPolicySpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodSecurityPolicySpec) + out := out.(*PodSecurityPolicySpec) + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]api.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + if err := DeepCopy_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, c); err != nil { + return err + } + if err := DeepCopy_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, c); err != nil { + return err + } + if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { + return err + } + if err := DeepCopy_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSet) + out := out.(*ReplicaSet) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_extensions_ReplicaSetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSetCondition(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetCondition) + out := out.(*ReplicaSetCondition) + *out = *in + out.LastTransitionTime = in.LastTransitionTime.DeepCopy() + return nil + } +} + +func DeepCopy_extensions_ReplicaSetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetList) + out := out.(*ReplicaSetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ReplicaSet(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetSpec) + out := out.(*ReplicaSetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + if err := api.DeepCopy_api_PodTemplateSpec(&in.Template, &out.Template, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ReplicaSetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicaSetStatus) + out := out.(*ReplicaSetStatus) + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ReplicaSetCondition(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_ReplicationControllerDummy(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ReplicationControllerDummy) + out := out.(*ReplicationControllerDummy) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollbackConfig(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollbackConfig) + out := out.(*RollbackConfig) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollingUpdateDaemonSet(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDaemonSet) + out := out.(*RollingUpdateDaemonSet) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RollingUpdateDeployment(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RollingUpdateDeployment) + out := out.(*RollingUpdateDeployment) + *out = *in + return nil + } +} + +func DeepCopy_extensions_RunAsUserStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RunAsUserStrategyOptions) + out := out.(*RunAsUserStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_SELinuxStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SELinuxStrategyOptions) + out := out.(*SELinuxStrategyOptions) + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(api.SELinuxOptions) + **out = **in + } + return nil + } +} + +func DeepCopy_extensions_Scale(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Scale) + out := out.(*Scale) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_extensions_ScaleStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_extensions_ScaleSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleSpec) + out := out.(*ScaleSpec) + *out = *in + return nil + } +} + +func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ScaleStatus) + out := out.(*ScaleStatus) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*SupplementalGroupsStrategyOptions) + out := out.(*SupplementalGroupsStrategyOptions) + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResource(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResource) + out := out.(*ThirdPartyResource) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]APIVersion, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResourceData(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceData) + out := out.(*ThirdPartyResourceData) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResourceDataList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceDataList) + out := out.(*ThirdPartyResourceDataList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResourceData, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_extensions_ThirdPartyResourceList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ThirdPartyResourceList) + out := out.(*ThirdPartyResourceList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ThirdPartyResource, len(*in)) + for i := range *in { + if err := DeepCopy_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS b/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS new file mode 100755 index 000000000..8d0eea516 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- caesarxuchao +- sttts +- ncdc +- timothysc +- dims +- mml +- mbohlool +- david-mcmahon +- jianhuiz diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/doc.go b/vendor/k8s.io/client-go/pkg/apis/policy/doc.go new file mode 100644 index 000000000..8feeef8e4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go b/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go new file mode 100644 index 000000000..6253d5bc2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/policy" + "k8s.io/client-go/pkg/apis/policy/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: policy.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/policy", + AddInternalObjectsToScheme: policy.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/register.go b/vendor/k8s.io/client-go/pkg/apis/policy/register.go new file mode 100644 index 000000000..5aadc3f1b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this gets cleaned up when the types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + &Eviction{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/types.go b/vendor/k8s.io/client-go/pkg/apis/policy/types.go new file mode 100644 index 000000000..ef2ff713a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/types.go @@ -0,0 +1,113 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + // +optional + MinAvailable intstr.IntOrString + + // Label query over pods whose evictions are managed by the disruption + // budget. + // +optional + Selector *metav1.LabelSelector +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + ObservedGeneration int64 + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + DisruptedPods map[string]metav1.Time + + // Number of pod disruptions that are currently allowed. + PodDisruptionsAllowed int32 + + // current number of healthy pods + CurrentHealthy int32 + + // minimum desired number of healthy pods + DesiredHealthy int32 + + // total number of pods counted by this disruption budget + ExpectedPods int32 +} + +// +genclient=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the PodDisruptionBudget. + // +optional + Spec PodDisruptionBudgetSpec + // Most recently observed status of the PodDisruptionBudget. + // +optional + Status PodDisruptionBudgetStatus +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PodDisruptionBudget +} + +// +genclient=true +// +noMethods=true + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods/<pod name>/eviction. +type Eviction struct { + metav1.TypeMeta + + // ObjectMeta describes the pod that is being evicted. + // +optional + metav1.ObjectMeta + + // DeleteOptions may be provided + // +optional + DeleteOptions *metav1.DeleteOptions +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go new file mode 100644 index 000000000..aadbe1416 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package policy is for any kind of policy object. Suitable examples, even if +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// NetworkPolicy, etc. +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go new file mode 100644 index 000000000..3b68f56fa --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.pb.go @@ -0,0 +1,1375 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/policy/v1beta1/generated.proto + + It has these top-level messages: + Eviction + PodDisruptionBudget + PodDisruptionBudgetList + PodDisruptionBudgetSpec + PodDisruptionBudgetStatus +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } +func (*PodDisruptionBudgetStatus) ProtoMessage() {} +func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{4} +} + +func init() { + proto.RegisterType((*Eviction)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.Eviction") + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.client-go.pkg.apis.policy.v1beta1.PodDisruptionBudgetStatus") +} +func (m *Eviction) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Eviction) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DeleteOptions != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DeleteOptions.Size())) + n2, err := m.DeleteOptions.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *PodDisruptionBudget) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil +} + +func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size())) + n7, err := m.MinAvailable.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.Selector != nil { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0x8 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k := range m.DisruptedPods { + data[i] = 0x12 + i++ + v := m.DisruptedPods[k] + msgSize := (&v).Size() + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64((&v).Size())) + n9, err := (&v).MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + } + } + data[i] = 0x18 + i++ + i = encodeVarintGenerated(data, i, uint64(m.PodDisruptionsAllowed)) + data[i] = 0x20 + i++ + i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy)) + data[i] = 0x28 + i++ + i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy)) + data[i] = 0x30 + i++ + i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods)) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *Eviction) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudget) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + var l int + _ = l + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Eviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(strings.Replace(this.MinAvailable.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" + } + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) + } + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Eviction) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) + } + m.DisruptedPods[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) + } + m.PodDisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.CurrentHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.DesiredHealthy |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + m.ExpectedPods |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 773 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x94, 0xcb, 0x6e, 0xf3, 0x44, + 0x18, 0x86, 0xe3, 0x26, 0x29, 0x61, 0x9a, 0x54, 0x65, 0xa0, 0x10, 0x22, 0xe1, 0xa2, 0xac, 0x5a, + 0x04, 0x63, 0xda, 0x22, 0x54, 0x58, 0x54, 0xd4, 0xa4, 0x82, 0xa2, 0x56, 0xa9, 0x5c, 0x24, 0x24, + 0x04, 0x12, 0x63, 0xfb, 0xc3, 0x99, 0xc6, 0x27, 0x8d, 0xc7, 0xa1, 0xd9, 0x71, 0x09, 0x2c, 0xb8, + 0xa8, 0x4a, 0x6c, 0xba, 0x44, 0x08, 0x55, 0x34, 0x70, 0x0b, 0xec, 0x91, 0xc7, 0x93, 0x83, 0x9b, + 0x44, 0x0d, 0xea, 0xaf, 0x7f, 0xe7, 0x39, 0x3c, 0xef, 0xfb, 0x9d, 0xc6, 0xe8, 0x93, 0xfe, 0x51, + 0x42, 0x58, 0x64, 0xf4, 0x53, 0x1b, 0x78, 0x08, 0x02, 0x12, 0x23, 0xee, 0x7b, 0x06, 0x8d, 0x59, + 0x62, 0xc4, 0x91, 0xcf, 0x9c, 0xa1, 0x31, 0xd8, 0xb7, 0x41, 0xd0, 0x7d, 0xc3, 0x83, 0x10, 0x38, + 0x15, 0xe0, 0x92, 0x98, 0x47, 0x22, 0xc2, 0x7b, 0x39, 0x4a, 0xa6, 0x28, 0x89, 0xfb, 0x1e, 0xc9, + 0x50, 0x92, 0xa3, 0x44, 0xa1, 0xad, 0x0f, 0x3c, 0x26, 0x7a, 0xa9, 0x4d, 0x9c, 0x28, 0x30, 0xbc, + 0xc8, 0x8b, 0x0c, 0xa9, 0x60, 0xa7, 0x3f, 0xca, 0x95, 0x5c, 0xc8, 0xaf, 0x5c, 0xb9, 0xf5, 0x91, + 0x0a, 0x8a, 0xc6, 0x2c, 0xa0, 0x4e, 0x8f, 0x85, 0xc0, 0x87, 0xd3, 0xb0, 0x02, 0x10, 0xd4, 0x18, + 0xcc, 0xc5, 0xd3, 0x32, 0x96, 0x51, 0x3c, 0x0d, 0x05, 0x0b, 0x60, 0x0e, 0xf8, 0xf8, 0x29, 0x20, + 0x71, 0x7a, 0x10, 0xd0, 0x39, 0xee, 0x70, 0x19, 0x97, 0x0a, 0xe6, 0x1b, 0x2c, 0x14, 0x89, 0xe0, + 0x73, 0xd0, 0x4c, 0x4e, 0x09, 0xf0, 0x01, 0xf0, 0x69, 0x42, 0x70, 0x43, 0x83, 0xd8, 0x87, 0x45, + 0x39, 0xbd, 0xbf, 0xb4, 0x3d, 0x0b, 0x6e, 0xb7, 0xff, 0xd0, 0x50, 0xed, 0x74, 0xc0, 0x1c, 0xc1, + 0xa2, 0x10, 0xff, 0x80, 0x6a, 0x59, 0xa5, 0x5c, 0x2a, 0x68, 0x53, 0x7b, 0x57, 0xdb, 0xdd, 0x38, + 0xf8, 0x90, 0xa8, 0x8e, 0xcd, 0x06, 0x3e, 0xed, 0x59, 0x76, 0x9b, 0x0c, 0xf6, 0x49, 0xd7, 0xbe, + 0x06, 0x47, 0x5c, 0x80, 0xa0, 0x26, 0xbe, 0xbd, 0xdf, 0x29, 0x8d, 0xee, 0x77, 0xd0, 0x74, 0xcf, + 0x9a, 0xa8, 0x62, 0x1f, 0x35, 0x5c, 0xf0, 0x41, 0x40, 0x37, 0xce, 0x1c, 0x93, 0xe6, 0x9a, 0xb4, + 0x39, 0x5c, 0xcd, 0xa6, 0x33, 0x8b, 0x9a, 0xaf, 0x8d, 0xee, 0x77, 0x1a, 0x85, 0x2d, 0xab, 0x28, + 0xde, 0xfe, 0x6d, 0x0d, 0xbd, 0x7e, 0x19, 0xb9, 0x1d, 0x96, 0xf0, 0x54, 0x6e, 0x99, 0xa9, 0xeb, + 0x81, 0x78, 0x09, 0x79, 0xba, 0xa8, 0x92, 0xc4, 0xe0, 0xa8, 0xf4, 0x4c, 0xb2, 0xf2, 0xdc, 0x93, + 0x05, 0xf1, 0x5e, 0xc5, 0xe0, 0x98, 0x75, 0xe5, 0x57, 0xc9, 0x56, 0x96, 0x54, 0xc7, 0x3e, 0x5a, + 0x4f, 0x04, 0x15, 0x69, 0xd2, 0x2c, 0x4b, 0x9f, 0xce, 0x33, 0x7d, 0xa4, 0x96, 0xb9, 0xa9, 0x9c, + 0xd6, 0xf3, 0xb5, 0xa5, 0x3c, 0xda, 0x7f, 0x6a, 0xe8, 0xad, 0x05, 0xd4, 0x39, 0x4b, 0x04, 0xfe, + 0x6e, 0xae, 0xa2, 0x64, 0xb5, 0x8a, 0x66, 0xb4, 0xac, 0xe7, 0x96, 0x72, 0xad, 0x8d, 0x77, 0x66, + 0xaa, 0xe9, 0xa0, 0x2a, 0x13, 0x10, 0x64, 0xd3, 0x52, 0xde, 0xdd, 0x38, 0x38, 0x7e, 0x5e, 0x9a, + 0x66, 0x43, 0x59, 0x55, 0xcf, 0x32, 0x51, 0x2b, 0xd7, 0x6e, 0xff, 0xb3, 0x38, 0xbd, 0xac, 0xdc, + 0xf8, 0x1a, 0xd5, 0x03, 0x16, 0x9e, 0x0c, 0x28, 0xf3, 0xa9, 0xed, 0xc3, 0x93, 0x43, 0x93, 0xbd, + 0x6a, 0x92, 0xbf, 0x6a, 0x72, 0x16, 0x8a, 0x2e, 0xbf, 0x12, 0x9c, 0x85, 0x9e, 0xf9, 0x86, 0x72, + 0xae, 0x5f, 0xcc, 0xa8, 0x59, 0x05, 0x6d, 0xfc, 0x3d, 0xaa, 0x25, 0xe0, 0x83, 0x23, 0x22, 0xfe, + 0xff, 0x5e, 0xc7, 0x39, 0xb5, 0xc1, 0xbf, 0x52, 0xa8, 0x59, 0xcf, 0x6a, 0x39, 0x5e, 0x59, 0x13, + 0xc9, 0xf6, 0xbf, 0x15, 0xf4, 0xf6, 0xd2, 0xde, 0xe3, 0xaf, 0x10, 0x8e, 0x6c, 0xf9, 0xb3, 0x71, + 0xbf, 0xc8, 0xff, 0x14, 0x2c, 0x0a, 0x65, 0xba, 0x65, 0xb3, 0xa5, 0x82, 0xc7, 0xdd, 0xb9, 0x1b, + 0xd6, 0x02, 0x0a, 0xff, 0xaa, 0xa1, 0x86, 0x9b, 0xdb, 0x80, 0x7b, 0x19, 0xb9, 0xe3, 0xf6, 0x7d, + 0xf3, 0x22, 0xa6, 0x94, 0x74, 0x66, 0x95, 0x4f, 0x43, 0xc1, 0x87, 0xe6, 0xb6, 0x0a, 0xb0, 0x51, + 0x38, 0xb3, 0x8a, 0x41, 0xe0, 0x0b, 0x84, 0xdd, 0x89, 0x64, 0x72, 0xe2, 0xfb, 0xd1, 0x4f, 0xe0, + 0xca, 0x07, 0x54, 0x35, 0xdf, 0x51, 0x0a, 0xdb, 0x05, 0xdf, 0xf1, 0x25, 0x6b, 0x01, 0x88, 0x8f, + 0xd1, 0xa6, 0x93, 0x72, 0x0e, 0xa1, 0xf8, 0x12, 0xa8, 0x2f, 0x7a, 0xc3, 0x66, 0x45, 0x4a, 0xbd, + 0xa9, 0xa4, 0x36, 0x3f, 0x2f, 0x9c, 0x5a, 0x8f, 0x6e, 0x67, 0xbc, 0x0b, 0x09, 0xe3, 0xe0, 0x8e, + 0xf9, 0x6a, 0x91, 0xef, 0x14, 0x4e, 0xad, 0x47, 0xb7, 0xf1, 0x11, 0xaa, 0xc3, 0x4d, 0x0c, 0xce, + 0xb8, 0xc6, 0xeb, 0x92, 0x9e, 0x0c, 0xda, 0xe9, 0xcc, 0x99, 0x55, 0xb8, 0xd9, 0xf2, 0x11, 0x9e, + 0x2f, 0x22, 0xde, 0x42, 0xe5, 0x3e, 0x0c, 0x65, 0xcb, 0x5f, 0xb5, 0xb2, 0x4f, 0xfc, 0x19, 0xaa, + 0x0e, 0xa8, 0x9f, 0x82, 0x9a, 0xc6, 0xf7, 0x56, 0x9b, 0xc6, 0xaf, 0x59, 0x00, 0x56, 0x0e, 0x7e, + 0xba, 0x76, 0xa4, 0x99, 0x7b, 0xb7, 0x0f, 0x7a, 0xe9, 0xee, 0x41, 0x2f, 0xfd, 0xfe, 0xa0, 0x97, + 0x7e, 0x1e, 0xe9, 0xda, 0xed, 0x48, 0xd7, 0xee, 0x46, 0xba, 0xf6, 0xd7, 0x48, 0xd7, 0x7e, 0xf9, + 0x5b, 0x2f, 0x7d, 0xfb, 0x8a, 0x6a, 0xfa, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xe9, 0x2f, + 0xf1, 0x61, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto new file mode 100644 index 000000000..d14d5de30 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/generated.proto @@ -0,0 +1,109 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.policy.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods/<pod name>/evictions. +message Eviction { + // ObjectMeta describes the pod that is being evicted. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DeleteOptions may be provided + optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; +} + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +message PodDisruptionBudget { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the PodDisruptionBudget. + optional PodDisruptionBudgetSpec spec = 2; + + // Most recently observed status of the PodDisruptionBudget. + optional PodDisruptionBudgetStatus status = 3; +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +message PodDisruptionBudgetList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated PodDisruptionBudget items = 2; +} + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +message PodDisruptionBudgetSpec { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + + // Label query over pods whose evictions are managed by the disruption + // budget. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +message PodDisruptionBudgetStatus { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + optional int64 observedGeneration = 1; + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + map<string, k8s.io.apimachinery.pkg.apis.meta.v1.Time> disruptedPods = 2; + + // Number of pod disruptions that are currently allowed. + optional int32 disruptionsAllowed = 3; + + // current number of healthy pods + optional int32 currentHealthy = 4; + + // minimum desired number of healthy pods + optional int32 desiredHealthy = 5; + + // total number of pods counted by this disruption budget + optional int32 expectedPods = 6; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go new file mode 100644 index 000000000..52bd65c8b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + &Eviction{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go new file mode 100644 index 000000000..049bfe8e6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.generated.go @@ -0,0 +1,2203 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg3_types "k8s.io/apimachinery/pkg/types" + pkg1_intstr "k8s.io/apimachinery/pkg/util/intstr" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_v1.LabelSelector + var v1 pkg3_types.UID + var v2 pkg1_intstr.IntOrString + var v3 time.Time + _, _, _, _ = v0, v1, v2, v3 + } +} + +func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [2]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = true + yyq2[1] = x.Selector != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(2) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yy4 := &x.MinAvailable + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(yy4) { + } else if !yym5 && z.IsJSONHandle() { + z.EncJSONMarshal(yy4) + } else { + z.EncFallback(yy4) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy6 := &x.MinAvailable + yym7 := z.EncBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.EncExt(yy6) { + } else if !yym7 && z.IsJSONHandle() { + z.EncJSONMarshal(yy6) + } else { + z.EncFallback(yy6) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.Selector == nil { + r.EncodeNil() + } else { + yym9 := z.EncBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("selector")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Selector == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.EncExt(x.Selector) { + } else { + z.EncFallback(x.Selector) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "minAvailable": + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv4 := &x.MinAvailable + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + case "selector": + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym7 := z.DecBinary() + _ = yym7 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.MinAvailable = pkg1_intstr.IntOrString{} + } else { + yyv9 := &x.MinAvailable + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else if !yym10 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv9) + } else { + z.DecFallback(yyv9, false) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.Selector != nil { + x.Selector = nil + } + } else { + if x.Selector == nil { + x.Selector = new(pkg2_v1.LabelSelector) + } + yym12 := z.DecBinary() + _ = yym12 + if false { + } else if z.HasExtensions() && z.DecExt(x.Selector) { + } else { + z.DecFallback(x.Selector, false) + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj8-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [6]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.ObservedGeneration != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(6) + } else { + yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.ObservedGeneration)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.DisruptedPods == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("disruptedPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DisruptedPods == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + h.encMapstringv1_Time((map[string]pkg2_v1.Time)(x.DisruptedPods), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeInt(int64(x.PodDisruptionsAllowed)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("disruptionsAllowed")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeInt(int64(x.PodDisruptionsAllowed)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeInt(int64(x.CurrentHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeInt(int64(x.DesiredHealthy)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeInt(int64(x.ExpectedPods)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "observedGeneration": + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv4 := &x.ObservedGeneration + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + case "disruptedPods": + if r.TryDecodeAsNil() { + x.DisruptedPods = nil + } else { + yyv6 := &x.DisruptedPods + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv6), d) + } + } + case "disruptionsAllowed": + if r.TryDecodeAsNil() { + x.PodDisruptionsAllowed = 0 + } else { + yyv8 := &x.PodDisruptionsAllowed + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*int32)(yyv8)) = int32(r.DecodeInt(32)) + } + } + case "currentHealthy": + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + yyv10 := &x.CurrentHealthy + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*int32)(yyv10)) = int32(r.DecodeInt(32)) + } + } + case "desiredHealthy": + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + yyv12 := &x.DesiredHealthy + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*int32)(yyv12)) = int32(r.DecodeInt(32)) + } + } + case "expectedPods": + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + yyv14 := &x.ExpectedPods + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*int32)(yyv14)) = int32(r.DecodeInt(32)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObservedGeneration = 0 + } else { + yyv17 := &x.ObservedGeneration + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*int64)(yyv17)) = int64(r.DecodeInt(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DisruptedPods = nil + } else { + yyv19 := &x.DisruptedPods + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decMapstringv1_Time((*map[string]pkg2_v1.Time)(yyv19), d) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.PodDisruptionsAllowed = 0 + } else { + yyv21 := &x.PodDisruptionsAllowed + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*int32)(yyv21)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CurrentHealthy = 0 + } else { + yyv23 := &x.CurrentHealthy + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*int32)(yyv23)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.DesiredHealthy = 0 + } else { + yyv25 := &x.DesiredHealthy + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*int32)(yyv25)) = int32(r.DecodeInt(32)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ExpectedPods = 0 + } else { + yyv27 := &x.ExpectedPods + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*int32)(yyv27)) = int32(r.DecodeInt(32)) + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj16-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = true + yyq2[4] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yy15 := &x.Spec + yy15.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("spec")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy17 := &x.Spec + yy17.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + yy20 := &x.Status + yy20.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("status")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy22 := &x.Status + yy22.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "spec": + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv10 := &x.Spec + yyv10.CodecDecodeSelf(d) + } + case "status": + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv11 := &x.Status + yyv11.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Spec = PodDisruptionBudgetSpec{} + } else { + yyv19 := &x.Spec + yyv19.CodecDecodeSelf(d) + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Status = PodDisruptionBudgetStatus{} + } else { + yyv20 := &x.Status + yyv20.CodecDecodeSelf(d) + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg2_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Eviction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[3] = x.DeleteOptions != nil + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 0 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.DeleteOptions == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { + } else { + z.EncFallback(x.DeleteOptions) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deleteOptions")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.DeleteOptions == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.EncExt(x.DeleteOptions) { + } else { + z.EncFallback(x.DeleteOptions) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Eviction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Eviction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "deleteOptions": + if r.TryDecodeAsNil() { + if x.DeleteOptions != nil { + x.DeleteOptions = nil + } + } else { + if x.DeleteOptions == nil { + x.DeleteOptions = new(pkg2_v1.DeleteOptions) + } + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { + } else { + z.DecFallback(x.DeleteOptions, false) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Eviction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + if x.DeleteOptions != nil { + x.DeleteOptions = nil + } + } else { + if x.DeleteOptions == nil { + x.DeleteOptions = new(pkg2_v1.DeleteOptions) + } + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(x.DeleteOptions) { + } else { + z.DecFallback(x.DeleteOptions, false) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encMapstringv1_Time(v map[string]pkg2_v1.Time, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeMapStart(len(v)) + for yyk1, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + yym2 := z.EncBinary() + _ = yym2 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyk1)) + } + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy3 := &yyv1 + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(yy3) { + } else if yym4 { + z.EncBinaryMarshal(yy3) + } else if !yym4 && z.IsJSONHandle() { + z.EncJSONMarshal(yy3) + } else { + z.EncFallback(yy3) + } + } + z.EncSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) decMapstringv1_Time(v *map[string]pkg2_v1.Time, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyl1 := r.ReadMapStart() + yybh1 := z.DecBasicHandle() + if yyv1 == nil { + yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) + yyv1 = make(map[string]pkg2_v1.Time, yyrl1) + *v = yyv1 + } + var yymk1 string + var yymv1 pkg2_v1.Time + var yymg1 bool + if yybh1.MapValueReset { + yymg1 = true + } + if yyl1 > 0 { + for yyj1 := 0; yyj1 < yyl1; yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv2 := &yymk1 + yym3 := z.DecBinary() + _ = yym3 + if false { + } else { + *((*string)(yyv2)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg2_v1.Time{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg2_v1.Time{} + } else { + yyv4 := &yymv1 + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else if yym5 { + z.DecBinaryUnmarshal(yyv4) + } else if !yym5 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv4) + } else { + z.DecFallback(yyv4, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } else if yyl1 < 0 { + for yyj1 := 0; !r.CheckBreak(); yyj1++ { + z.DecSendContainerState(codecSelfer_containerMapKey1234) + if r.TryDecodeAsNil() { + yymk1 = "" + } else { + yyv6 := &yymk1 + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + + if yymg1 { + yymv1 = yyv1[yymk1] + } else { + yymv1 = pkg2_v1.Time{} + } + z.DecSendContainerState(codecSelfer_containerMapValue1234) + if r.TryDecodeAsNil() { + yymv1 = pkg2_v1.Time{} + } else { + yyv8 := &yymv1 + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else if yym9 { + z.DecBinaryUnmarshal(yyv8) + } else if !yym9 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv8) + } else { + z.DecFallback(yyv8, false) + } + } + + if yyv1 != nil { + yyv1[yymk1] = yymv1 + } + } + } // else len==0: TODO: Should we clear map entries? + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + } else { + yyv1 = make([]PodDisruptionBudget, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PodDisruptionBudget{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PodDisruptionBudget{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PodDisruptionBudget{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go new file mode 100644 index 000000000..ca5ea3730 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + DisruptedPods map[string]metav1.Time `json:"disruptedPods" protobuf:"bytes,2,rep,name=disruptedPods"` + + // Number of pod disruptions that are currently allowed. + PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"` +} + +// +genclient=true + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the PodDisruptionBudget. + Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the PodDisruptionBudget. + Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +noMethods=true + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods/<pod name>/evictions. +type Eviction struct { + metav1.TypeMeta `json:",inline"` + + // ObjectMeta describes the pod that is being evicted. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DeleteOptions may be provided + DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..d919856b2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Eviction = map[string]string{ + "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods/<pod name>/evictions.", + "metadata": "ObjectMeta describes the pod that is being evicted.", + "deleteOptions": "DeleteOptions may be provided", +} + +func (Eviction) SwaggerDoc() map[string]string { + return map_Eviction +} + +var map_PodDisruptionBudget = map[string]string{ + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", +} + +func (PodDisruptionBudget) SwaggerDoc() map[string]string { + return map_PodDisruptionBudget +} + +var map_PodDisruptionBudgetList = map[string]string{ + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", +} + +func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetList +} + +var map_PodDisruptionBudgetSpec = map[string]string{ + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget.", +} + +func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetSpec +} + +var map_PodDisruptionBudgetStatus = map[string]string{ + "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", + "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", + "currentHealthy": "current number of healthy pods", + "desiredHealthy": "minimum desired number of healthy pods", + "expectedPods": "total number of pods counted by this disruption budget", +} + +func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..5a54f5039 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.conversion.go @@ -0,0 +1,172 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + policy "k8s.io/client-go/pkg/apis/policy" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_Eviction_To_policy_Eviction, + Convert_policy_Eviction_To_v1beta1_Eviction, + Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget, + Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget, + Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList, + Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList, + Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec, + Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec, + Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus, + Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus, + ) +} + +func autoConvert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) + return nil +} + +func Convert_v1beta1_Eviction_To_policy_Eviction(in *Eviction, out *policy.Eviction, s conversion.Scope) error { + return autoConvert_v1beta1_Eviction_To_policy_Eviction(in, out, s) +} + +func autoConvert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.DeleteOptions = (*v1.DeleteOptions)(unsafe.Pointer(in.DeleteOptions)) + return nil +} + +func Convert_policy_Eviction_To_v1beta1_Eviction(in *policy.Eviction, out *Eviction, s conversion.Scope) error { + return autoConvert_policy_Eviction_To_v1beta1_Eviction(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudget_To_v1beta1_PodDisruptionBudget(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]policy.PodDisruptionBudget)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]PodDisruptionBudget, 0) + } else { + out.Items = *(*[]PodDisruptionBudget)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetList_To_v1beta1_PodDisruptionBudgetList(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + out.MinAvailable = in.MinAvailable + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + return nil +} + +func Convert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + out.MinAvailable = in.MinAvailable + out.Selector = (*v1.LabelSelector)(unsafe.Pointer(in.Selector)) + return nil +} + +func Convert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetSpec_To_v1beta1_PodDisruptionBudgetSpec(in, out, s) +} + +func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) + out.PodDisruptionsAllowed = in.PodDisruptionsAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s) +} + +func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + out.ObservedGeneration = in.ObservedGeneration + out.DisruptedPods = *(*map[string]v1.Time)(unsafe.Pointer(&in.DisruptedPods)) + out.PodDisruptionsAllowed = in.PodDisruptionsAllowed + out.CurrentHealthy = in.CurrentHealthy + out.DesiredHealthy = in.DesiredHealthy + out.ExpectedPods = in.ExpectedPods + return nil +} + +func Convert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { + return autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudgetStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..395e6689e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Eviction, InType: reflect.TypeOf(&Eviction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudget, InType: reflect.TypeOf(&PodDisruptionBudget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetList, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetSpec, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PodDisruptionBudgetStatus, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, + ) +} + +func DeepCopy_v1beta1_Eviction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Eviction) + out := out.(*Eviction) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.DeleteOptions) + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudget) + out := out.(*PodDisruptionBudget) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudgetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetList) + out := out.(*PodDisruptionBudgetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PodDisruptionBudget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudgetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetSpec) + out := out.(*PodDisruptionBudgetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetStatus) + out := out.(*PodDisruptionBudgetStatus) + *out = *in + if in.DisruptedPods != nil { + in, out := &in.DisruptedPods, &out.DisruptedPods + *out = make(map[string]v1.Time) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go new file mode 100644 index 000000000..298ec5fda --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/policy/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package policy + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_Eviction, InType: reflect.TypeOf(&Eviction{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudget, InType: reflect.TypeOf(&PodDisruptionBudget{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetList, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetSpec, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_policy_PodDisruptionBudgetStatus, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, + ) +} + +func DeepCopy_policy_Eviction(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Eviction) + out := out.(*Eviction) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.DeleteOptions) + } + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudget(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudget) + out := out.(*PodDisruptionBudget) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + if err := DeepCopy_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudgetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetList) + out := out.(*PodDisruptionBudgetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + if err := DeepCopy_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudgetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetSpec) + out := out.(*PodDisruptionBudgetSpec) + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if newVal, err := c.DeepCopy(*in); err != nil { + return err + } else { + *out = newVal.(*v1.LabelSelector) + } + } + return nil + } +} + +func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodDisruptionBudgetStatus) + out := out.(*PodDisruptionBudgetStatus) + *out = *in + if in.DisruptedPods != nil { + in, out := &in.DisruptedPods, &out.DisruptedPods + *out = make(map[string]v1.Time) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS b/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS new file mode 100755 index 000000000..a35477b92 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/OWNERS @@ -0,0 +1,17 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- deads2k +- sttts +- ncdc +- timothysc +- dims +- krousey +- mml +- mbohlool +- david-mcmahon +- ericchiang +- lixiaobing10051267 +- jianhuiz +- liggitt diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go b/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go new file mode 100644 index 000000000..c5f057484 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package rbac diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go new file mode 100644 index 000000000..a2ec9e8a5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/helpers.go @@ -0,0 +1,340 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +func RoleRefGroupKind(roleRef RoleRef) schema.GroupKind { + return schema.GroupKind{Group: roleRef.APIGroup, Kind: roleRef.Kind} +} + +func VerbMatches(rule PolicyRule, requestedVerb string) bool { + for _, ruleVerb := range rule.Verbs { + if ruleVerb == VerbAll { + return true + } + if ruleVerb == requestedVerb { + return true + } + } + + return false +} + +func APIGroupMatches(rule PolicyRule, requestedGroup string) bool { + for _, ruleGroup := range rule.APIGroups { + if ruleGroup == APIGroupAll { + return true + } + if ruleGroup == requestedGroup { + return true + } + } + + return false +} + +func ResourceMatches(rule PolicyRule, requestedResource string) bool { + for _, ruleResource := range rule.Resources { + if ruleResource == ResourceAll { + return true + } + if ruleResource == requestedResource { + return true + } + } + + return false +} + +func ResourceNameMatches(rule PolicyRule, requestedName string) bool { + if len(rule.ResourceNames) == 0 { + return true + } + + for _, ruleName := range rule.ResourceNames { + if ruleName == requestedName { + return true + } + } + + return false +} + +func NonResourceURLMatches(rule PolicyRule, requestedURL string) bool { + for _, ruleURL := range rule.NonResourceURLs { + if ruleURL == NonResourceAll { + return true + } + if ruleURL == requestedURL { + return true + } + if strings.HasSuffix(ruleURL, "*") && strings.HasPrefix(requestedURL, strings.TrimRight(ruleURL, "*")) { + return true + } + } + + return false +} + +// subjectsStrings returns users, groups, serviceaccounts, unknown for display purposes. +func SubjectsStrings(subjects []Subject) ([]string, []string, []string, []string) { + users := []string{} + groups := []string{} + sas := []string{} + others := []string{} + + for _, subject := range subjects { + switch subject.Kind { + case ServiceAccountKind: + sas = append(sas, fmt.Sprintf("%s/%s", subject.Namespace, subject.Name)) + + case UserKind: + users = append(users, subject.Name) + + case GroupKind: + groups = append(groups, subject.Name) + + default: + others = append(others, fmt.Sprintf("%s/%s/%s", subject.Kind, subject.Namespace, subject.Name)) + } + } + + return users, groups, sas, others +} + +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: sets.NewString(verbs...).List()}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = combine(r.PolicyRule.APIGroups, groups) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = combine(r.PolicyRule.Resources, resources) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = combine(r.PolicyRule.ResourceNames, names) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = combine(r.PolicyRule.NonResourceURLs, urls) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func combine(s1, s2 []string) []string { + s := sets.NewString(s1...) + s.Insert(s2...) + return s.List() +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, APIGroup: GroupName, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, APIGroup: GroupName, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} + +// RoleBindingBuilder let's us attach methods. It is similar to +// ClusterRoleBindingBuilder above. +type RoleBindingBuilder struct { + RoleBinding RoleBinding +} + +// NewRoleBinding creates a RoleBinding builder that can be used +// to define the subjects of a role binding. At least one of +// the `Groups`, `Users` or `SAs` method must be called before +// calling the `Binding*` methods. +func NewRoleBinding(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "Role", + Name: roleName, + }, + }, + } +} + +func NewRoleBindingForClusterRole(roleName, namespace string) *RoleBindingBuilder { + return &RoleBindingBuilder{ + RoleBinding: RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: namespace, + }, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: roleName, + }, + }, + } +} + +// Groups adds the specified groups as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Groups(groups ...string) *RoleBindingBuilder { + for _, group := range groups { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +// Users adds the specified users as the subjects of the RoleBinding. +func (r *RoleBindingBuilder) Users(users ...string) *RoleBindingBuilder { + for _, user := range users { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +// SAs adds the specified service accounts as the subjects of the +// RoleBinding. +func (r *RoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *RoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.RoleBinding.Subjects = append(r.RoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +// BindingOrDie calls the binding method and panics if there is an error. +func (r *RoleBindingBuilder) BindingOrDie() RoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +// Binding builds and returns the RoleBinding API object from the builder +// object. +func (r *RoleBindingBuilder) Binding() (RoleBinding, error) { + if len(r.RoleBinding.Subjects) == 0 { + return RoleBinding{}, fmt.Errorf("subjects are required: %#v", r.RoleBinding) + } + + return r.RoleBinding, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go b/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go new file mode 100644 index 000000000..f92c11e06 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/install/install.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the batch API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/rbac" + "k8s.io/client-go/pkg/apis/rbac/v1alpha1" + "k8s.io/client-go/pkg/apis/rbac/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: rbac.GroupName, + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/rbac", + RootScopedKinds: sets.NewString("ClusterRole", "ClusterRoleBinding"), + AddInternalObjectsToScheme: rbac.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/register.go b/vendor/k8s.io/client-go/pkg/apis/rbac/register.go new file mode 100644 index 000000000..f4a838bd8 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/types.go b/vendor/k8s.io/client-go/pkg/apis/rbac/types.go new file mode 100644 index 000000000..ddc2456a0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/types.go @@ -0,0 +1,188 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string + + // APIGroups is the name of the APIGroup that contains the resources. + // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. + APIGroups []string + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + NonResourceURLs []string +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + APIGroup string + // Name of the object being referenced. + Name string + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + Namespace string +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string + // Kind is the type of resource being referenced + Kind string + // Name is the name of resource being referenced + Name string +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of roleBindings + Items []RoleBinding +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of roles + Items []Role +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ObjectMeta + + // Subjects holds references to the objects the role applies to. + Subjects []Subject + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta + // Standard object's metadata. + metav1.ListMeta + + // Items is a list of ClusterRoles + Items []ClusterRole +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go new file mode 100644 index 000000000..22b3c4076 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/conversion.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + api "k8s.io/client-go/pkg/apis/rbac" +) + +// allAuthenticated matches k8s.io/apiserver/pkg/authentication/user.AllAuthenticated, +// but we don't want an client library (which must include types), depending on a server library +const allAuthenticated = "system:authenticated" + +func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *api.Subject, s conversion.Scope) error { + if err := autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s); err != nil { + return err + } + + // specifically set the APIGroup for the three subjects recognized in v1alpha1 + switch { + case in.Kind == ServiceAccountKind: + out.APIGroup = "" + case in.Kind == UserKind: + out.APIGroup = GroupName + case in.Kind == GroupKind: + out.APIGroup = GroupName + default: + // For unrecognized kinds, use the group portion of the APIVersion if we can get it + if gv, err := schema.ParseGroupVersion(in.APIVersion); err == nil { + out.APIGroup = gv.Group + } + } + + // User * in v1alpha1 will only match all authenticated users + // This is only for compatibility with old RBAC bindings + // Special treatment for * should not be included in v1beta1 + if out.Kind == UserKind && out.APIGroup == GroupName && out.Name == "*" { + out.Kind = GroupKind + out.Name = allAuthenticated + } + + return nil +} + +func Convert_rbac_Subject_To_v1alpha1_Subject(in *api.Subject, out *Subject, s conversion.Scope) error { + if err := autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s); err != nil { + return err + } + + switch { + case in.Kind == ServiceAccountKind && in.APIGroup == "": + // Make service accounts v1 + out.APIVersion = "v1" + case in.Kind == UserKind && in.APIGroup == GroupName: + // users in the rbac API group get v1alpha + out.APIVersion = SchemeGroupVersion.String() + case in.Kind == GroupKind && in.APIGroup == GroupName: + // groups in the rbac API group get v1alpha + out.APIVersion = SchemeGroupVersion.String() + default: + // otherwise, they get an unspecified version of a group + out.APIVersion = schema.GroupVersion{Group: in.APIGroup}.String() + } + + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go new file mode 100644 index 000000000..49e934916 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/defaults.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_ClusterRoleBinding, + SetDefaults_RoleBinding, + SetDefaults_Subject, + ) +} + +func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_RoleBinding(obj *RoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_Subject(obj *Subject) { + if len(obj.APIVersion) == 0 { + switch obj.Kind { + case ServiceAccountKind: + obj.APIVersion = "v1" + case UserKind: + obj.APIVersion = SchemeGroupVersion.String() + case GroupKind: + obj.APIVersion = SchemeGroupVersion.String() + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go new file mode 100644 index 000000000..f43f7bc08 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go new file mode 100644 index 000000000..406c12c17 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.pb.go @@ -0,0 +1,2817 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingBuilder + ClusterRoleBindingList + ClusterRoleList + PolicyRule + PolicyRuleBuilder + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } +func (*ClusterRoleBindingBuilder) ProtoMessage() {} +func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } +func (*PolicyRuleBuilder) ProtoMessage() {} +func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBindingBuilder") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.PolicyRuleBuilder") + proto.RegisterType((*Role)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.client-go.pkg.apis.rbac.v1alpha1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) + n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) + n7, err := m.PolicyRule.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n10, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleRef) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleRef) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) + i += copy(data[i:], m.APIVersion) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingBuilder) Size() (n int) { + var l int + _ = l + l = m.ClusterRoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRuleBuilder) Size() (n int) { + var l int + _ = l + l = m.PolicyRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, + `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRuleBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRuleBuilder{`, + `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 847 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xb4, 0x09, 0x4d, 0x5e, 0xa9, 0x4a, 0x07, 0x09, 0x99, 0x1e, 0x9c, 0xca, 0xa7, 0x0a, + 0x16, 0x9b, 0x96, 0x05, 0xf6, 0x00, 0x87, 0x35, 0x07, 0x54, 0xb1, 0x94, 0x6a, 0x56, 0xac, 0xc4, + 0x6a, 0x25, 0x98, 0x38, 0xb3, 0xc9, 0x10, 0xff, 0xd2, 0x8c, 0x1d, 0xb1, 0x42, 0x48, 0x1c, 0x39, + 0xf2, 0x57, 0x70, 0xe4, 0x80, 0xc4, 0x91, 0x13, 0x97, 0x0a, 0x2e, 0x3d, 0xc2, 0x25, 0xa2, 0xe6, + 0x0f, 0x01, 0x79, 0x3c, 0xfe, 0x51, 0x9c, 0xaa, 0x3f, 0x90, 0x22, 0x21, 0x71, 0x4a, 0xfc, 0xde, + 0xf7, 0x7d, 0xf3, 0xbe, 0x79, 0xf6, 0x07, 0xf7, 0x66, 0xf7, 0xa4, 0xcd, 0x23, 0x67, 0x96, 0x8e, + 0x98, 0x08, 0x59, 0xc2, 0xa4, 0x13, 0xcf, 0x26, 0x0e, 0x8d, 0xb9, 0x74, 0xc4, 0x88, 0x7a, 0xce, + 0xfc, 0x80, 0xfa, 0xf1, 0x94, 0x1e, 0x38, 0x13, 0x16, 0x32, 0x41, 0x13, 0x36, 0xb6, 0x63, 0x11, + 0x25, 0x11, 0xde, 0x2f, 0x98, 0x76, 0xcd, 0xb4, 0xe3, 0xd9, 0xc4, 0xce, 0x99, 0x76, 0xce, 0xb4, + 0x4b, 0xe6, 0xee, 0x6b, 0x13, 0x9e, 0x4c, 0xd3, 0x91, 0xed, 0x45, 0x81, 0x33, 0x89, 0x26, 0x91, + 0xa3, 0x04, 0x46, 0xe9, 0x53, 0xf5, 0xa4, 0x1e, 0xd4, 0xbf, 0x42, 0x78, 0xf7, 0xae, 0x1e, 0x89, + 0xc6, 0x3c, 0xa0, 0xde, 0x94, 0x87, 0x4c, 0x3c, 0xab, 0x87, 0x0a, 0x58, 0x42, 0x9d, 0x79, 0x6b, + 0x9c, 0x5d, 0xe7, 0x32, 0x96, 0x48, 0xc3, 0x84, 0x07, 0xac, 0x45, 0x78, 0xeb, 0x2a, 0x82, 0xf4, + 0xa6, 0x2c, 0xa0, 0x2d, 0xde, 0x1b, 0x97, 0xf1, 0xd2, 0x84, 0xfb, 0x0e, 0x0f, 0x13, 0x99, 0x88, + 0x16, 0xa9, 0xe1, 0x49, 0x32, 0x31, 0x67, 0xa2, 0x36, 0xc4, 0xbe, 0xa0, 0x41, 0xec, 0xb3, 0x65, + 0x9e, 0xee, 0x5c, 0xba, 0x9c, 0x25, 0x68, 0xeb, 0x17, 0x04, 0x9b, 0xef, 0xf9, 0xa9, 0x4c, 0x98, + 0x20, 0x91, 0xcf, 0xf0, 0x67, 0xd0, 0xcf, 0x2f, 0x6b, 0x4c, 0x13, 0x6a, 0xa0, 0x3d, 0xb4, 0xbf, + 0x79, 0xf8, 0xba, 0xad, 0x77, 0xd6, 0x9c, 0xbd, 0xde, 0x5a, 0x8e, 0xb6, 0xe7, 0x07, 0xf6, 0x47, + 0xa3, 0xcf, 0x99, 0x97, 0x7c, 0xc8, 0x12, 0xea, 0xe2, 0xd3, 0xc5, 0xb0, 0x93, 0x2d, 0x86, 0x50, + 0xd7, 0x48, 0xa5, 0x8a, 0x3f, 0x81, 0x9e, 0x48, 0x7d, 0x26, 0x8d, 0xb5, 0xbd, 0xf5, 0xfd, 0xcd, + 0xc3, 0xbb, 0xf6, 0x75, 0x5f, 0x09, 0xfb, 0x24, 0xf2, 0xb9, 0xf7, 0x8c, 0xa4, 0x3e, 0x73, 0xb7, + 0xf4, 0x11, 0xbd, 0xfc, 0x49, 0x92, 0x42, 0xd1, 0xfa, 0x71, 0x0d, 0x70, 0xc3, 0x8c, 0xcb, 0xc3, + 0x31, 0x0f, 0x27, 0x2b, 0xf0, 0xf4, 0x29, 0xf4, 0x65, 0xaa, 0x1a, 0xa5, 0xad, 0x83, 0xeb, 0xdb, + 0x7a, 0x58, 0x30, 0xdd, 0x17, 0xf4, 0x11, 0x7d, 0x5d, 0x90, 0xa4, 0x12, 0xc5, 0x4f, 0x60, 0x43, + 0x44, 0x3e, 0x23, 0xec, 0xa9, 0xb1, 0xae, 0x1c, 0xdc, 0x40, 0x9f, 0x14, 0x44, 0x77, 0x5b, 0xeb, + 0x6f, 0xe8, 0x02, 0x29, 0x25, 0xad, 0xef, 0x10, 0xbc, 0xdc, 0xbe, 0x37, 0x37, 0xe5, 0xfe, 0x98, + 0x09, 0xfc, 0x0d, 0x02, 0xec, 0xb5, 0xba, 0xfa, 0x26, 0xdf, 0xb9, 0xfe, 0x1c, 0x4b, 0x4e, 0xd8, + 0xd5, 0x23, 0x2d, 0xd9, 0x1a, 0x59, 0x72, 0xa6, 0xf5, 0x3b, 0x82, 0x97, 0xda, 0xd0, 0x07, 0x5c, + 0x26, 0xf8, 0x49, 0x6b, 0xc9, 0xf6, 0xf5, 0x96, 0x9c, 0xb3, 0xd5, 0x8a, 0xab, 0xfb, 0x2f, 0x2b, + 0x8d, 0x05, 0x53, 0xe8, 0xf1, 0x84, 0x05, 0xe5, 0x76, 0xff, 0x9d, 0xeb, 0xea, 0xe5, 0x3d, 0xca, + 0x25, 0x49, 0xa1, 0x6c, 0xfd, 0x8a, 0x60, 0xbb, 0x01, 0x5e, 0x81, 0xa9, 0xc7, 0x17, 0x4d, 0xbd, + 0x79, 0x3b, 0x53, 0xcb, 0xdd, 0xfc, 0x85, 0x00, 0xea, 0xef, 0x15, 0x0f, 0xa1, 0x37, 0x67, 0x62, + 0x24, 0x0d, 0xb4, 0xb7, 0xbe, 0x3f, 0x70, 0x07, 0x39, 0xfe, 0x51, 0x5e, 0x20, 0x45, 0x1d, 0xbf, + 0x0a, 0x03, 0x1a, 0xf3, 0xf7, 0x45, 0x94, 0xc6, 0xd2, 0x58, 0x57, 0xa0, 0xad, 0x6c, 0x31, 0x1c, + 0xdc, 0x3f, 0x39, 0x2a, 0x8a, 0xa4, 0xee, 0xe7, 0x60, 0xc1, 0x64, 0x94, 0x0a, 0x8f, 0x49, 0xa3, + 0x5b, 0x83, 0x49, 0x59, 0x24, 0x75, 0x1f, 0xbf, 0x0d, 0x5b, 0xe5, 0xc3, 0x31, 0x0d, 0x98, 0x34, + 0x7a, 0x8a, 0xb0, 0x93, 0x2d, 0x86, 0x5b, 0xa4, 0xd9, 0x20, 0x17, 0x71, 0xf8, 0x5d, 0xd8, 0x0e, + 0xa3, 0xb0, 0x84, 0x7c, 0x4c, 0x1e, 0x48, 0xe3, 0x39, 0x45, 0x7d, 0x31, 0x5b, 0x0c, 0xb7, 0x8f, + 0x2f, 0xb6, 0xc8, 0x3f, 0xb1, 0xd6, 0x57, 0xb0, 0xd3, 0x08, 0x2c, 0xfd, 0x2d, 0x4d, 0x01, 0xe2, + 0xaa, 0xa8, 0x57, 0x7a, 0xbb, 0x04, 0xac, 0x02, 0xa9, 0xae, 0x91, 0x86, 0xb6, 0xf5, 0x33, 0x82, + 0xee, 0x7f, 0x3f, 0xd1, 0xbf, 0x5f, 0x83, 0xcd, 0xff, 0xa3, 0xfc, 0x06, 0x51, 0x9e, 0xa7, 0xc8, + 0x6a, 0xa3, 0xf1, 0xf6, 0x29, 0x72, 0x75, 0x26, 0xfe, 0x84, 0xa0, 0xbf, 0xa2, 0x30, 0x7c, 0x78, + 0xd1, 0x86, 0x7d, 0x43, 0x1b, 0xcb, 0xe7, 0xff, 0x12, 0xca, 0x0d, 0xe1, 0x3b, 0xd0, 0x2f, 0x03, + 0x4c, 0x4d, 0x3f, 0xa8, 0xa7, 0x29, 0x33, 0x8e, 0x54, 0x08, 0xbc, 0x07, 0xdd, 0x19, 0x0f, 0xc7, + 0xc6, 0x9a, 0x42, 0x3e, 0xaf, 0x91, 0xdd, 0x0f, 0x78, 0x38, 0x26, 0xaa, 0x93, 0x23, 0x42, 0x1a, + 0x30, 0xf5, 0x0e, 0x35, 0x10, 0x79, 0x74, 0x11, 0xd5, 0xb1, 0x7e, 0x40, 0xb0, 0xa1, 0xdf, 0xbf, + 0x4a, 0x0f, 0x5d, 0xaa, 0x77, 0x08, 0x40, 0x63, 0xfe, 0x88, 0x09, 0xc9, 0xa3, 0x50, 0x9f, 0x5b, + 0x7d, 0x29, 0xf7, 0x4f, 0x8e, 0x74, 0x87, 0x34, 0x50, 0x57, 0xcf, 0x80, 0x1d, 0x18, 0xe4, 0xbf, + 0x32, 0xa6, 0x1e, 0x33, 0xba, 0x0a, 0xb6, 0xa3, 0x61, 0x83, 0xe3, 0xb2, 0x41, 0x6a, 0x8c, 0xfb, + 0xca, 0xe9, 0xb9, 0xd9, 0x39, 0x3b, 0x37, 0x3b, 0xbf, 0x9d, 0x9b, 0x9d, 0xaf, 0x33, 0x13, 0x9d, + 0x66, 0x26, 0x3a, 0xcb, 0x4c, 0xf4, 0x47, 0x66, 0xa2, 0x6f, 0xff, 0x34, 0x3b, 0x8f, 0xfb, 0xe5, + 0xc5, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x53, 0x64, 0x1d, 0x0e, 0x87, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto new file mode 100644 index 000000000..5a75fb240 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/generated.proto @@ -0,0 +1,202 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +message ClusterRoleBindingBuilder { + optional ClusterRoleBinding clusterRoleBinding = 1; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 5; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 6; +} + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +message PolicyRuleBuilder { + optional PolicyRule policyRule = 1; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false + // +optional + optional string apiVersion = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go new file mode 100644 index 000000000..f417f3be0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/helpers.go @@ -0,0 +1,146 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go new file mode 100644 index 000000000..3977e99c5 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go new file mode 100644 index 000000000..64ba53a9b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.generated.go @@ -0,0 +1,4879 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1alpha1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.APIGroups) != 0 + yyq2[2] = len(x.Resources) != 0 + yyq2[3] = len(x.ResourceNames) != 0 + yyq2[4] = len(x.NonResourceURLs) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verbs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Resources == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "verbs": + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "apiGroups": + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv6 := &x.APIGroups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv8 := &x.Resources + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "resourceNames": + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv10 := &x.ResourceNames + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "nonResourceURLs": + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv12 := &x.NonResourceURLs + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv15 := &x.Verbs + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv17 := &x.APIGroups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv19 := &x.Resources + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceStringX(yyv19, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv21 := &x.ResourceNames + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv23 := &x.NonResourceURLs + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.APIVersion != "" + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv4 := &x.APIGroup + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv6 := &x.Kind + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv11 := &x.APIGroup + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRole((*[]Role)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRole((*[]Role)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Subject, yyrl1) + } + } else { + yyv1 = make([]Subject, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Role, yyrl1) + } + } else { + yyv1 = make([]Role, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go new file mode 100644 index 000000000..e9f8efb3b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types.go @@ -0,0 +1,209 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..d58a722af --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiVersion": "APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..1471d529f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,445 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + rbac "k8s.io/client-go/pkg/apis/rbac" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, + Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, + Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, + Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder, + Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, + Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, + Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, + Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, + Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder, + Convert_v1alpha1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1alpha1_Role, + Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding, + Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList, + Convert_v1alpha1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1alpha1_RoleList, + Convert_v1alpha1_RoleRef_To_rbac_RoleRef, + Convert_rbac_RoleRef_To_v1alpha1_RoleRef, + Convert_v1alpha1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1alpha1_Subject, + ) +} + +func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) + } else { + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + } + return nil +} + +func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = make([]Subject, 0) + } + if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1alpha1_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]ClusterRoleBinding, 0) + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ClusterRole, 0) + } else { + out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + if in.Verbs == nil { + out.Verbs = make([]string, 0) + } else { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + } + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) +} + +func autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_v1alpha1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_rbac_PolicyRuleBuilder_To_v1alpha1_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) + } else { + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + } + return nil +} + +func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s) +} + +func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]rbac.Subject, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = nil + } + if err := Convert_v1alpha1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + for i := range *in { + if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Subjects = make([]Subject, 0) + } + if err := Convert_rbac_RoleRef_To_v1alpha1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s) +} + +func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]rbac.RoleBinding, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]RoleBinding, 0) + } + return nil +} + +func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s) +} + +func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Role, 0) + } else { + out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s) +} + +func autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_v1alpha1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + return autoConvert_v1alpha1_RoleRef_To_rbac_RoleRef(in, out, s) +} + +func autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_rbac_RoleRef_To_v1alpha1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + return autoConvert_rbac_RoleRef_To_v1alpha1_RoleRef(in, out, s) +} + +func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + // INFO: in.APIVersion opted out of conversion generation + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + // WARNING: in.APIGroup requires manual conversion: does not exist in peer-type + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..f4dfd3ca7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_v1alpha1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1alpha1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1alpha1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_v1alpha1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..1a5749be3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,66 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) + scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) + scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) + scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) + return nil +} + +func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { + SetDefaults_ClusterRoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ClusterRoleBinding(a) + } +} + +func SetObjectDefaults_RoleBinding(in *RoleBinding) { + SetDefaults_RoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_RoleBinding(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go new file mode 100644 index 000000000..6c29ae500 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/defaults.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + RegisterDefaults(scheme) + return scheme.AddDefaultingFuncs( + SetDefaults_ClusterRoleBinding, + SetDefaults_RoleBinding, + SetDefaults_Subject, + ) +} + +func SetDefaults_ClusterRoleBinding(obj *ClusterRoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_RoleBinding(obj *RoleBinding) { + if len(obj.RoleRef.APIGroup) == 0 { + obj.RoleRef.APIGroup = GroupName + } +} +func SetDefaults_Subject(obj *Subject) { + if len(obj.APIGroup) == 0 { + switch obj.Kind { + case ServiceAccountKind: + obj.APIGroup = "" + case UserKind: + obj.APIGroup = GroupName + case GroupKind: + obj.APIGroup = GroupName + } + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go new file mode 100644 index 000000000..a9ad4296e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=rbac.authorization.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go new file mode 100644 index 000000000..5f553a57e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.pb.go @@ -0,0 +1,2816 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/rbac/v1beta1/generated.proto + + It has these top-level messages: + ClusterRole + ClusterRoleBinding + ClusterRoleBindingBuilder + ClusterRoleBindingList + ClusterRoleList + PolicyRule + PolicyRuleBuilder + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *ClusterRoleBindingBuilder) Reset() { *m = ClusterRoleBindingBuilder{} } +func (*ClusterRoleBindingBuilder) ProtoMessage() {} +func (*ClusterRoleBindingBuilder) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func (m *PolicyRuleBuilder) Reset() { *m = PolicyRuleBuilder{} } +func (*PolicyRuleBuilder) ProtoMessage() {} +func (*PolicyRuleBuilder) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func init() { + proto.RegisterType((*ClusterRole)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBindingBuilder") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.PolicyRule") + proto.RegisterType((*PolicyRuleBuilder)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.PolicyRuleBuilder") + proto.RegisterType((*Role)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.client-go.pkg.apis.rbac.v1beta1.Subject") +} +func (m *ClusterRole) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRole) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n2, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n3, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ClusterRoleBindingBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ClusterRoleBinding.Size())) + n4, err := m.ClusterRoleBinding.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterRoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PolicyRule) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRule) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + data[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + data[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + data[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + data[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + data[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + data[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + data[i] = uint8(l) + i++ + i += copy(data[i:], s) + } + } + return i, nil +} + +func (m *PolicyRuleBuilder) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PolicyRuleBuilder) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.PolicyRule.Size())) + n7, err := m.PolicyRule.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil +} + +func (m *Role) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Role) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleBinding) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBinding) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Subjects) > 0 { + for _, msg := range m.Subjects { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) + n10, err := m.RoleRef.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil +} + +func (m *RoleBindingList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n11 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n12, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n12 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *RoleRef) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *RoleRef) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + return i, nil +} + +func (m *Subject) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *Subject) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) + i += copy(data[i:], m.Kind) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.APIGroup))) + i += copy(data[i:], m.APIGroup) + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Name))) + i += copy(data[i:], m.Name) + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) + i += copy(data[i:], m.Namespace) + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *ClusterRole) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingBuilder) Size() (n int) { + var l int + _ = l + l = m.ClusterRoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRuleBuilder) Size() (n int) { + var l int + _ = l + l = m.PolicyRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingBuilder{`, + `ClusterRoleBinding:` + strings.Replace(strings.Replace(this.ClusterRoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRuleBuilder) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRuleBuilder{`, + `PolicyRule:` + strings.Replace(strings.Replace(this.PolicyRule.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRole) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClusterRoleBinding.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRuleBuilder) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRuleBuilder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRuleBuilder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PolicyRule.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x54, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xec, 0x20, 0x21, 0x93, 0xc2, 0x89, 0xdc, 0xb0, + 0x88, 0x3b, 0xfb, 0xf6, 0xee, 0xc4, 0x21, 0x21, 0x0a, 0x4c, 0x81, 0x4e, 0x1c, 0xcb, 0x69, 0x10, + 0x88, 0x5f, 0x42, 0x37, 0x71, 0xe6, 0xbc, 0x43, 0xfc, 0x4b, 0x33, 0xe3, 0x88, 0x13, 0x14, 0x74, + 0xb4, 0xfc, 0x13, 0x74, 0xd7, 0xd1, 0x52, 0x51, 0x2d, 0x54, 0x57, 0x6e, 0x15, 0xb1, 0xe6, 0x0f, + 0x01, 0xd9, 0x1e, 0xff, 0x08, 0x4e, 0xd8, 0xb0, 0x48, 0x91, 0x90, 0xa8, 0x92, 0x79, 0xef, 0xfb, + 0xde, 0xbc, 0xef, 0xbd, 0xf1, 0x07, 0xef, 0xcd, 0x5f, 0x17, 0x16, 0x8b, 0xec, 0x79, 0x32, 0xa5, + 0x3c, 0xa4, 0x92, 0x0a, 0x3b, 0x9e, 0x7b, 0x36, 0x89, 0x99, 0xb0, 0xf9, 0x94, 0xb8, 0xf6, 0xe2, + 0x64, 0x4a, 0x25, 0x39, 0xb1, 0x3d, 0x1a, 0x52, 0x4e, 0x24, 0x9d, 0x59, 0x31, 0x8f, 0x64, 0x84, + 0x5e, 0x2e, 0x88, 0x56, 0x4d, 0xb4, 0xe2, 0xb9, 0x67, 0x65, 0x44, 0x2b, 0x23, 0x5a, 0x8a, 0x38, + 0xba, 0xe9, 0x31, 0x79, 0x96, 0x4c, 0x2d, 0x37, 0x0a, 0x6c, 0x2f, 0xf2, 0x22, 0x3b, 0xe7, 0x4f, + 0x93, 0xc7, 0xf9, 0x29, 0x3f, 0xe4, 0xff, 0x8a, 0xba, 0xa3, 0xbb, 0xaa, 0x21, 0x12, 0xb3, 0x80, + 0xb8, 0x67, 0x2c, 0xa4, 0xfc, 0x49, 0xdd, 0x52, 0x40, 0x25, 0xb1, 0x17, 0xad, 0x6e, 0x46, 0xf6, + 0x26, 0x16, 0x4f, 0x42, 0xc9, 0x02, 0xda, 0x22, 0xbc, 0x76, 0x15, 0x41, 0xb8, 0x67, 0x34, 0x20, + 0x2d, 0xde, 0x9d, 0x4d, 0xbc, 0x44, 0x32, 0xdf, 0x66, 0xa1, 0x14, 0x92, 0xb7, 0x48, 0x0d, 0x4d, + 0x82, 0xf2, 0x05, 0xe5, 0xb5, 0x20, 0xfa, 0x15, 0x09, 0x62, 0x9f, 0xae, 0xd3, 0x74, 0x63, 0xe3, + 0x6a, 0xd6, 0xa0, 0xcd, 0x5f, 0x00, 0x3c, 0x78, 0xdb, 0x4f, 0x84, 0xa4, 0x1c, 0x47, 0x3e, 0x45, + 0x8f, 0xe0, 0x20, 0x1b, 0xd6, 0x8c, 0x48, 0xa2, 0x83, 0x09, 0x38, 0x3e, 0xb8, 0x7d, 0xcb, 0x52, + 0x2b, 0x6b, 0xf6, 0x5e, 0x2f, 0x2d, 0x43, 0x5b, 0x8b, 0x13, 0xeb, 0xfd, 0xe9, 0x97, 0xd4, 0x95, + 0xef, 0x51, 0x49, 0x1c, 0x74, 0xbe, 0x1c, 0x77, 0xd2, 0xe5, 0x18, 0xd6, 0x31, 0x5c, 0x55, 0x45, + 0x1f, 0xc3, 0x3e, 0x4f, 0x7c, 0x2a, 0xf4, 0xee, 0x64, 0xef, 0xf8, 0xe0, 0xf6, 0x1d, 0x6b, 0xcb, + 0x17, 0x61, 0x3d, 0x8c, 0x7c, 0xe6, 0x3e, 0xc1, 0x89, 0x4f, 0x9d, 0x43, 0x75, 0x43, 0x3f, 0x3b, + 0x09, 0x5c, 0x14, 0x34, 0x7f, 0xec, 0x42, 0xd4, 0xd0, 0xe2, 0xb0, 0x70, 0xc6, 0x42, 0x6f, 0x07, + 0x92, 0xbe, 0x80, 0x03, 0x91, 0xe4, 0x89, 0x52, 0xd5, 0xad, 0xad, 0x55, 0x7d, 0x50, 0x10, 0x9d, + 0xe7, 0xd5, 0x0d, 0x03, 0x15, 0x10, 0xb8, 0xaa, 0x89, 0x3e, 0x83, 0xfb, 0x3c, 0xf2, 0x29, 0xa6, + 0x8f, 0xf5, 0xbd, 0x55, 0x01, 0x57, 0x96, 0xc7, 0x05, 0xcf, 0x19, 0xaa, 0xf2, 0xfb, 0x2a, 0x80, + 0xcb, 0x8a, 0xe6, 0x0f, 0x00, 0xbe, 0xd4, 0x9e, 0x9a, 0x93, 0x30, 0x7f, 0x46, 0x39, 0xfa, 0x0e, + 0x40, 0xe4, 0xb6, 0xb2, 0x6a, 0x8e, 0x6f, 0x6c, 0xdd, 0xc6, 0x9a, 0x0b, 0x46, 0xaa, 0xa3, 0x35, + 0x2b, 0xc3, 0x6b, 0xae, 0x34, 0x2f, 0x00, 0x7c, 0xb1, 0x0d, 0x7d, 0xc0, 0x84, 0x44, 0x9f, 0xb7, + 0x36, 0x6c, 0x6d, 0xb7, 0xe1, 0x8c, 0x9d, 0xef, 0xb7, 0x9a, 0x7e, 0x19, 0x69, 0x6c, 0xf7, 0x11, + 0xec, 0x33, 0x49, 0x83, 0x72, 0xb5, 0xff, 0x4a, 0x74, 0xf5, 0x70, 0xef, 0x67, 0x15, 0x71, 0x51, + 0xd8, 0xfc, 0x15, 0xc0, 0x61, 0x03, 0xbc, 0x03, 0x4d, 0x9f, 0xac, 0x6a, 0xba, 0x7b, 0x2d, 0x4d, + 0xeb, 0xc5, 0xfc, 0x01, 0x20, 0xac, 0x3f, 0x55, 0x34, 0x86, 0xfd, 0x05, 0xe5, 0x53, 0xa1, 0x83, + 0xc9, 0xde, 0xb1, 0xe6, 0x68, 0x19, 0xfe, 0xa3, 0x2c, 0x80, 0x8b, 0x38, 0x7a, 0x15, 0x6a, 0x24, + 0x66, 0xef, 0xf0, 0x28, 0x89, 0x8b, 0x76, 0x34, 0xe7, 0x30, 0x5d, 0x8e, 0xb5, 0xb7, 0x1e, 0xde, + 0x2f, 0x82, 0xb8, 0xce, 0x67, 0x60, 0x4e, 0x45, 0x94, 0x70, 0x97, 0x0a, 0x7d, 0xaf, 0x06, 0xe3, + 0x32, 0x88, 0xeb, 0x3c, 0xba, 0x07, 0x0f, 0xcb, 0xc3, 0x29, 0x09, 0xa8, 0xd0, 0x7b, 0x39, 0xe1, + 0x28, 0x5d, 0x8e, 0x0f, 0x71, 0x33, 0x81, 0x57, 0x71, 0xe8, 0x4d, 0x38, 0x0c, 0xa3, 0xb0, 0x84, + 0x7c, 0x88, 0x1f, 0x08, 0xbd, 0x9f, 0x53, 0x5f, 0x48, 0x97, 0xe3, 0xe1, 0xe9, 0x6a, 0x0a, 0xff, + 0x15, 0x6b, 0x7e, 0x03, 0x8f, 0x1a, 0x5e, 0xa5, 0x3e, 0x24, 0x0f, 0xc2, 0xb8, 0x0a, 0xaa, 0x8d, + 0x5e, 0xcb, 0xfb, 0x2a, 0x2b, 0xaa, 0x63, 0xb8, 0x51, 0xda, 0xfc, 0x19, 0xc0, 0xde, 0x7f, 0xde, + 0xca, 0x9f, 0x76, 0xe1, 0xc1, 0xff, 0x1e, 0xbe, 0xb5, 0x87, 0x67, 0x06, 0xb2, 0x5b, 0x53, 0xbc, + 0xb6, 0x81, 0x5c, 0xed, 0x86, 0x3f, 0x01, 0x38, 0xd8, 0x91, 0x0d, 0xe2, 0x55, 0x15, 0x37, 0xff, + 0x99, 0x8a, 0xf5, 0xed, 0x7f, 0x0d, 0xcb, 0xfd, 0xa0, 0x1b, 0x70, 0x50, 0x5a, 0x57, 0xde, 0xbc, + 0x56, 0x37, 0x53, 0xba, 0x1b, 0xae, 0x10, 0x68, 0x02, 0x7b, 0x73, 0x16, 0xce, 0xf4, 0x6e, 0x8e, + 0x7c, 0x4e, 0x21, 0x7b, 0xef, 0xb2, 0x70, 0x86, 0xf3, 0x4c, 0x86, 0x08, 0x49, 0x40, 0xf3, 0x07, + 0xd4, 0x40, 0x64, 0xa6, 0x85, 0xf3, 0x8c, 0xf9, 0x14, 0xc0, 0x7d, 0xf5, 0xf8, 0xaa, 0x7a, 0x60, + 0x63, 0xbd, 0x66, 0x7f, 0xdd, 0x6d, 0xfa, 0xfb, 0xfb, 0xdb, 0x91, 0x0d, 0xb5, 0xec, 0x57, 0xc4, + 0xc4, 0xa5, 0x7a, 0x2f, 0x87, 0x1d, 0x29, 0x98, 0x76, 0x5a, 0x26, 0x70, 0x8d, 0x71, 0x5e, 0x39, + 0xbf, 0x34, 0x3a, 0xcf, 0x2e, 0x8d, 0xce, 0xc5, 0xa5, 0xd1, 0xf9, 0x36, 0x35, 0xc0, 0x79, 0x6a, + 0x80, 0x67, 0xa9, 0x01, 0x7e, 0x4b, 0x0d, 0xf0, 0xfd, 0xef, 0x46, 0xe7, 0xd3, 0x7d, 0x35, 0xf1, + 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x96, 0xa1, 0xd4, 0x72, 0x0c, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto new file mode 100644 index 000000000..542e2b027 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/generated.proto @@ -0,0 +1,200 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.rbac.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// +k8s:deepcopy-gen=false +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +message ClusterRoleBindingBuilder { + optional ClusterRoleBinding clusterRoleBinding = 1; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 4; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 5; +} + +// +k8s:deepcopy-gen=false +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +message PolicyRuleBuilder { + optional PolicyRule policyRule = 1; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + optional string apiGroup = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go new file mode 100644 index 000000000..063b9ed3f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/helpers.go @@ -0,0 +1,146 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PolicyRuleBuilder let's us attach methods. A no-no for API types. +// We use it to construct rules in code. It's more compact than trying to write them +// out in a literal and allows us to perform some basic checking during construction +type PolicyRuleBuilder struct { + PolicyRule PolicyRule `protobuf:"bytes,1,opt,name=policyRule"` +} + +func NewRule(verbs ...string) *PolicyRuleBuilder { + return &PolicyRuleBuilder{ + PolicyRule: PolicyRule{Verbs: verbs}, + } +} + +func (r *PolicyRuleBuilder) Groups(groups ...string) *PolicyRuleBuilder { + r.PolicyRule.APIGroups = append(r.PolicyRule.APIGroups, groups...) + return r +} + +func (r *PolicyRuleBuilder) Resources(resources ...string) *PolicyRuleBuilder { + r.PolicyRule.Resources = append(r.PolicyRule.Resources, resources...) + return r +} + +func (r *PolicyRuleBuilder) Names(names ...string) *PolicyRuleBuilder { + r.PolicyRule.ResourceNames = append(r.PolicyRule.ResourceNames, names...) + return r +} + +func (r *PolicyRuleBuilder) URLs(urls ...string) *PolicyRuleBuilder { + r.PolicyRule.NonResourceURLs = append(r.PolicyRule.NonResourceURLs, urls...) + return r +} + +func (r *PolicyRuleBuilder) RuleOrDie() PolicyRule { + ret, err := r.Rule() + if err != nil { + panic(err) + } + return ret +} + +func (r *PolicyRuleBuilder) Rule() (PolicyRule, error) { + if len(r.PolicyRule.Verbs) == 0 { + return PolicyRule{}, fmt.Errorf("verbs are required: %#v", r.PolicyRule) + } + + switch { + case len(r.PolicyRule.NonResourceURLs) > 0: + if len(r.PolicyRule.APIGroups) != 0 || len(r.PolicyRule.Resources) != 0 || len(r.PolicyRule.ResourceNames) != 0 { + return PolicyRule{}, fmt.Errorf("non-resource rule may not have apiGroups, resources, or resourceNames: %#v", r.PolicyRule) + } + case len(r.PolicyRule.Resources) > 0: + if len(r.PolicyRule.NonResourceURLs) != 0 { + return PolicyRule{}, fmt.Errorf("resource rule may not have nonResourceURLs: %#v", r.PolicyRule) + } + if len(r.PolicyRule.APIGroups) == 0 { + // this a common bug + return PolicyRule{}, fmt.Errorf("resource rule must have apiGroups: %#v", r.PolicyRule) + } + default: + return PolicyRule{}, fmt.Errorf("a rule must have either nonResourceURLs or resources: %#v", r.PolicyRule) + } + + return r.PolicyRule, nil +} + +// ClusterRoleBindingBuilder let's us attach methods. A no-no for API types. +// We use it to construct bindings in code. It's more compact than trying to write them +// out in a literal. +type ClusterRoleBindingBuilder struct { + ClusterRoleBinding ClusterRoleBinding `protobuf:"bytes,1,opt,name=clusterRoleBinding"` +} + +func NewClusterBinding(clusterRoleName string) *ClusterRoleBindingBuilder { + return &ClusterRoleBindingBuilder{ + ClusterRoleBinding: ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: clusterRoleName}, + RoleRef: RoleRef{ + APIGroup: GroupName, + Kind: "ClusterRole", + Name: clusterRoleName, + }, + }, + } +} + +func (r *ClusterRoleBindingBuilder) Groups(groups ...string) *ClusterRoleBindingBuilder { + for _, group := range groups { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: GroupKind, Name: group}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) Users(users ...string) *ClusterRoleBindingBuilder { + for _, user := range users { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: UserKind, Name: user}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) SAs(namespace string, serviceAccountNames ...string) *ClusterRoleBindingBuilder { + for _, saName := range serviceAccountNames { + r.ClusterRoleBinding.Subjects = append(r.ClusterRoleBinding.Subjects, Subject{Kind: ServiceAccountKind, Namespace: namespace, Name: saName}) + } + return r +} + +func (r *ClusterRoleBindingBuilder) BindingOrDie() ClusterRoleBinding { + ret, err := r.Binding() + if err != nil { + panic(err) + } + return ret +} + +func (r *ClusterRoleBindingBuilder) Binding() (ClusterRoleBinding, error) { + if len(r.ClusterRoleBinding.Subjects) == 0 { + return ClusterRoleBinding{}, fmt.Errorf("subjects are required: %#v", r.ClusterRoleBinding) + } + + return r.ClusterRoleBinding, nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go new file mode 100644 index 000000000..cab2e77e4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go new file mode 100644 index 000000000..56de3e3e7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.generated.go @@ -0,0 +1,4879 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = len(x.APIGroups) != 0 + yyq2[2] = len(x.Resources) != 0 + yyq2[3] = len(x.ResourceNames) != 0 + yyq2[4] = len(x.NonResourceURLs) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("verbs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Verbs == nil { + r.EncodeNil() + } else { + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + z.F.EncSliceStringV(x.Verbs, false, e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.APIGroups == nil { + r.EncodeNil() + } else { + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + z.F.EncSliceStringV(x.APIGroups, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + if x.Resources == nil { + r.EncodeNil() + } else { + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resources")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Resources == nil { + r.EncodeNil() + } else { + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + z.F.EncSliceStringV(x.Resources, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.ResourceNames == nil { + r.EncodeNil() + } else { + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + z.F.EncSliceStringV(x.ResourceNames, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.NonResourceURLs == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncSliceStringV(x.NonResourceURLs, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "verbs": + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv4 := &x.Verbs + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + z.F.DecSliceStringX(yyv4, false, d) + } + } + case "apiGroups": + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv6 := &x.APIGroups + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + z.F.DecSliceStringX(yyv6, false, d) + } + } + case "resources": + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv8 := &x.Resources + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + z.F.DecSliceStringX(yyv8, false, d) + } + } + case "resourceNames": + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv10 := &x.ResourceNames + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + z.F.DecSliceStringX(yyv10, false, d) + } + } + case "nonResourceURLs": + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv12 := &x.NonResourceURLs + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecSliceStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Verbs = nil + } else { + yyv15 := &x.Verbs + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + z.F.DecSliceStringX(yyv15, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroups = nil + } else { + yyv17 := &x.APIGroups + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + z.F.DecSliceStringX(yyv17, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Resources = nil + } else { + yyv19 := &x.Resources + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + z.F.DecSliceStringX(yyv19, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ResourceNames = nil + } else { + yyv21 := &x.ResourceNames + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + z.F.DecSliceStringX(yyv21, false, d) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.NonResourceURLs = nil + } else { + yyv23 := &x.NonResourceURLs + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecSliceStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.APIGroup != "" + yyq2[3] = x.Namespace != "" + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[3] { + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[3] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("namespace")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv6 := &x.APIGroup + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "namespace": + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv10 := &x.Namespace + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv15 := &x.APIGroup + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv17 := &x.Name + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Namespace = "" + } else { + yyv19 := &x.Namespace + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleRef) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [3]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(3) + } else { + yynn2 = 3 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiGroup")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIGroup)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("name")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Name)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleRef) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleRef) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "apiGroup": + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv4 := &x.APIGroup + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv6 := &x.Kind + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "name": + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv8 := &x.Name + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleRef) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIGroup = "" + } else { + yyv11 := &x.APIGroup + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*string)(yyv11)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Name = "" + } else { + yyv15 := &x.Name + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj10-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRoleBinding((*[]RoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceRole(([]Role)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceRole((*[]Role)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceRole((*[]Role)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("rules")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Rules == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "rules": + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv10 := &x.Rules + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv17 := &x.ObjectMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Rules = nil + } else { + yyv19 := &x.Rules + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSlicePolicyRule((*[]PolicyRule)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 2 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("subjects")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Subjects == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceSubject(([]Subject)(x.Subjects), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy18 := &x.RoleRef + yy18.CodecEncodeSelf(e) + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("roleRef")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy20 := &x.RoleRef + yy20.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "subjects": + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv10 := &x.Subjects + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv10), d) + } + } + case "roleRef": + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv12 := &x.RoleRef + yyv12.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj13 int + var yyb13 bool + var yyhl13 bool = l >= 0 + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv14 := &x.Kind + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*string)(yyv14)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv16 := &x.APIVersion + yym17 := z.DecBinary() + _ = yym17 + if false { + } else { + *((*string)(yyv16)) = r.DecodeString() + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv18 := &x.ObjectMeta + yym19 := z.DecBinary() + _ = yym19 + if false { + } else if z.HasExtensions() && z.DecExt(yyv18) { + } else { + z.DecFallback(yyv18, false) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Subjects = nil + } else { + yyv20 := &x.Subjects + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + h.decSliceSubject((*[]Subject)(yyv20), d) + } + } + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.RoleRef = RoleRef{} + } else { + yyv22 := &x.RoleRef + yyv22.CodecDecodeSelf(d) + } + for { + yyj13++ + if yyhl13 { + yyb13 = yyj13 > l + } else { + yyb13 = r.CheckBreak() + } + if yyb13 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj13-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceClusterRole(([]ClusterRole)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceClusterRole((*[]ClusterRole)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + } else { + yyv1 = make([]PolicyRule, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, PolicyRule{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = PolicyRule{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []PolicyRule{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Subject, yyrl1) + } + } else { + yyv1 = make([]Subject, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Subject{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Subject{}) // var yyz1 Subject + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Subject{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Subject{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + } else { + yyv1 = make([]RoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, RoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = RoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []RoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]Role, yyrl1) + } + } else { + yyv1 = make([]Role, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, Role{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, Role{}) // var yyz1 Role + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = Role{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []Role{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 328) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + } else { + yyv1 = make([]ClusterRoleBinding, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRoleBinding{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRoleBinding{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRoleBinding{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + } else { + yyv1 = make([]ClusterRole, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, ClusterRole{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = ClusterRole{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []ClusterRole{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go new file mode 100644 index 000000000..89a47db9e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types.go @@ -0,0 +1,207 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient=true + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient=true +// +nonNamespaced=true + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..1463d8fea --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,148 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..6a9b31f9d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.conversion.go @@ -0,0 +1,389 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + rbac "k8s.io/client-go/pkg/apis/rbac" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_ClusterRole_To_rbac_ClusterRole, + Convert_rbac_ClusterRole_To_v1beta1_ClusterRole, + Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, + Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding, + Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder, + Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder, + Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, + Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList, + Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList, + Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList, + Convert_v1beta1_PolicyRule_To_rbac_PolicyRule, + Convert_rbac_PolicyRule_To_v1beta1_PolicyRule, + Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder, + Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder, + Convert_v1beta1_Role_To_rbac_Role, + Convert_rbac_Role_To_v1beta1_Role, + Convert_v1beta1_RoleBinding_To_rbac_RoleBinding, + Convert_rbac_RoleBinding_To_v1beta1_RoleBinding, + Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList, + Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList, + Convert_v1beta1_RoleList_To_rbac_RoleList, + Convert_rbac_RoleList_To_v1beta1_RoleList, + Convert_v1beta1_RoleRef_To_rbac_RoleRef, + Convert_rbac_RoleRef_To_v1beta1_RoleRef, + Convert_v1beta1_Subject_To_rbac_Subject, + Convert_rbac_Subject_To_v1beta1_Subject, + ) +} + +func autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRole_To_rbac_ClusterRole(in, out, s) +} + +func autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) + } else { + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + } + return nil +} + +func Convert_rbac_ClusterRole_To_v1beta1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { + return autoConvert_rbac_ClusterRole_To_v1beta1_ClusterRole(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects == nil { + out.Subjects = make([]Subject, 0) + } else { + out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) + } + if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_v1beta1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in *ClusterRoleBindingBuilder, out *rbac.ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBindingBuilder_To_rbac_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + if err := Convert_rbac_ClusterRoleBinding_To_v1beta1_ClusterRoleBinding(&in.ClusterRoleBinding, &out.ClusterRoleBinding, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in *rbac.ClusterRoleBindingBuilder, out *ClusterRoleBindingBuilder, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingBuilder_To_v1beta1_ClusterRoleBindingBuilder(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ClusterRoleBinding, 0) + } else { + out.Items = *(*[]ClusterRoleBinding)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleBindingList_To_v1beta1_ClusterRoleBindingList(in, out, s) +} + +func autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.ClusterRole)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { + return autoConvert_v1beta1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) +} + +func autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]ClusterRole, 0) + } else { + out.Items = *(*[]ClusterRole)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { + return autoConvert_rbac_ClusterRoleList_To_v1beta1_ClusterRoleList(in, out, s) +} + +func autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRule_To_rbac_PolicyRule(in, out, s) +} + +func autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + if in.Verbs == nil { + out.Verbs = make([]string, 0) + } else { + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + } + out.APIGroups = *(*[]string)(unsafe.Pointer(&in.APIGroups)) + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + return nil +} + +func Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_rbac_PolicyRule_To_v1beta1_PolicyRule(in, out, s) +} + +func autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_v1beta1_PolicyRule_To_rbac_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in *PolicyRuleBuilder, out *rbac.PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_v1beta1_PolicyRuleBuilder_To_rbac_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + if err := Convert_rbac_PolicyRule_To_v1beta1_PolicyRule(&in.PolicyRule, &out.PolicyRule, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in *rbac.PolicyRuleBuilder, out *PolicyRuleBuilder, s conversion.Scope) error { + return autoConvert_rbac_PolicyRuleBuilder_To_v1beta1_PolicyRuleBuilder(in, out, s) +} + +func autoConvert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]rbac.PolicyRule)(unsafe.Pointer(&in.Rules)) + return nil +} + +func Convert_v1beta1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { + return autoConvert_v1beta1_Role_To_rbac_Role(in, out, s) +} + +func autoConvert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Rules == nil { + out.Rules = make([]PolicyRule, 0) + } else { + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + } + return nil +} + +func Convert_rbac_Role_To_v1beta1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { + return autoConvert_rbac_Role_To_v1beta1_Role(in, out, s) +} + +func autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subjects = *(*[]rbac.Subject)(unsafe.Pointer(&in.Subjects)) + if err := Convert_v1beta1_RoleRef_To_rbac_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_v1beta1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { + return autoConvert_v1beta1_RoleBinding_To_rbac_RoleBinding(in, out, s) +} + +func autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if in.Subjects == nil { + out.Subjects = make([]Subject, 0) + } else { + out.Subjects = *(*[]Subject)(unsafe.Pointer(&in.Subjects)) + } + if err := Convert_rbac_RoleRef_To_v1beta1_RoleRef(&in.RoleRef, &out.RoleRef, s); err != nil { + return err + } + return nil +} + +func Convert_rbac_RoleBinding_To_v1beta1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { + return autoConvert_rbac_RoleBinding_To_v1beta1_RoleBinding(in, out, s) +} + +func autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.RoleBinding)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { + return autoConvert_v1beta1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) +} + +func autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]RoleBinding, 0) + } else { + out.Items = *(*[]RoleBinding)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { + return autoConvert_rbac_RoleBindingList_To_v1beta1_RoleBindingList(in, out, s) +} + +func autoConvert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]rbac.Role)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { + return autoConvert_v1beta1_RoleList_To_rbac_RoleList(in, out, s) +} + +func autoConvert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]Role, 0) + } else { + out.Items = *(*[]Role)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_rbac_RoleList_To_v1beta1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { + return autoConvert_rbac_RoleList_To_v1beta1_RoleList(in, out, s) +} + +func autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_v1beta1_RoleRef_To_rbac_RoleRef(in *RoleRef, out *rbac.RoleRef, s conversion.Scope) error { + return autoConvert_v1beta1_RoleRef_To_rbac_RoleRef(in, out, s) +} + +func autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + out.APIGroup = in.APIGroup + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +func Convert_rbac_RoleRef_To_v1beta1_RoleRef(in *rbac.RoleRef, out *RoleRef, s conversion.Scope) error { + return autoConvert_rbac_RoleRef_To_v1beta1_RoleRef(in, out, s) +} + +func autoConvert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIGroup = in.APIGroup + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_v1beta1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { + return autoConvert_v1beta1_Subject_To_rbac_Subject(in, out, s) +} + +func autoConvert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + out.Kind = in.Kind + out.APIGroup = in.APIGroup + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +func Convert_rbac_Subject_To_v1beta1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { + return autoConvert_rbac_Subject_To_v1beta1_Subject(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..dccae9b4f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_v1beta1_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_v1beta1_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1beta1_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_v1beta1_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..fa5bfb6ab --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/v1beta1/zz_generated.defaults.go @@ -0,0 +1,66 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&ClusterRoleBinding{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBinding(obj.(*ClusterRoleBinding)) }) + scheme.AddTypeDefaultingFunc(&ClusterRoleBindingList{}, func(obj interface{}) { SetObjectDefaults_ClusterRoleBindingList(obj.(*ClusterRoleBindingList)) }) + scheme.AddTypeDefaultingFunc(&RoleBinding{}, func(obj interface{}) { SetObjectDefaults_RoleBinding(obj.(*RoleBinding)) }) + scheme.AddTypeDefaultingFunc(&RoleBindingList{}, func(obj interface{}) { SetObjectDefaults_RoleBindingList(obj.(*RoleBindingList)) }) + return nil +} + +func SetObjectDefaults_ClusterRoleBinding(in *ClusterRoleBinding) { + SetDefaults_ClusterRoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_ClusterRoleBindingList(in *ClusterRoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ClusterRoleBinding(a) + } +} + +func SetObjectDefaults_RoleBinding(in *RoleBinding) { + SetDefaults_RoleBinding(in) + for i := range in.Subjects { + a := &in.Subjects[i] + SetDefaults_Subject(a) + } +} + +func SetObjectDefaults_RoleBindingList(in *RoleBindingList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_RoleBinding(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go new file mode 100644 index 000000000..db5d08cdf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/rbac/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package rbac + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRole, InType: reflect.TypeOf(&ClusterRole{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleBinding, InType: reflect.TypeOf(&ClusterRoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleBindingList, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_ClusterRoleList, InType: reflect.TypeOf(&ClusterRoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_PolicyRule, InType: reflect.TypeOf(&PolicyRule{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_Role, InType: reflect.TypeOf(&Role{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleBinding, InType: reflect.TypeOf(&RoleBinding{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleBindingList, InType: reflect.TypeOf(&RoleBindingList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleList, InType: reflect.TypeOf(&RoleList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_RoleRef, InType: reflect.TypeOf(&RoleRef{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_rbac_Subject, InType: reflect.TypeOf(&Subject{})}, + ) +} + +func DeepCopy_rbac_ClusterRole(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRole) + out := out.(*ClusterRole) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_ClusterRoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBinding) + out := out.(*ClusterRoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_rbac_ClusterRoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleBindingList) + out := out.(*ClusterRoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_ClusterRoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*ClusterRoleList) + out := out.(*ClusterRoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_ClusterRole(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_PolicyRule(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PolicyRule) + out := out.(*PolicyRule) + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_rbac_Role(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Role) + out := out.(*Role) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_PolicyRule(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_RoleBinding(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBinding) + out := out.(*RoleBinding) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + return nil + } +} + +func DeepCopy_rbac_RoleBindingList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleBindingList) + out := out.(*RoleBindingList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_RoleBinding(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_RoleList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleList) + out := out.(*RoleList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + if err := DeepCopy_rbac_Role(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_rbac_RoleRef(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*RoleRef) + out := out.(*RoleRef) + *out = *in + return nil + } +} + +func DeepCopy_rbac_Subject(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*Subject) + out := out.(*Subject) + *out = *in + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/doc.go b/vendor/k8s.io/client-go/pkg/apis/settings/doc.go new file mode 100644 index 000000000..90ccf882a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=settings.k8s.io +package settings diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go b/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go new file mode 100644 index 000000000..ccdc70400 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/install/install.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the settings API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/settings" + "k8s.io/client-go/pkg/apis/settings/v1alpha1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: settings.GroupName, + VersionPreferenceOrder: []string{v1alpha1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/settings", + AddInternalObjectsToScheme: settings.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/register.go b/vendor/k8s.io/client-go/pkg/apis/settings/register.go new file mode 100644 index 000000000..858470127 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/types.go b/vendor/k8s.io/client-go/pkg/apis/settings/types.go new file mode 100644 index 000000000..f89a16d25 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/types.go @@ -0,0 +1,63 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package settings + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api" +) + +// +genclient=true + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Spec PodPresetSpec +} + +// PodPresetSpec is a description of a pod injection policy. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []api.EnvVar + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []api.EnvFromSource + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []api.Volume + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []api.VolumeMount +} + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodPreset +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go new file mode 100644 index 000000000..86390b523 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=settings.k8s.io +package v1alpha1 diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go new file mode 100644 index 000000000..88f85877a --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.pb.go @@ -0,0 +1,924 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/settings/v1alpha1/generated.proto + + It has these top-level messages: + PodPreset + PodPresetList + PodPresetSpec +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/pkg/api/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*PodPreset)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPreset") + proto.RegisterType((*PodPresetList)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetList") + proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.client-go.pkg.apis.settings.v1alpha1.PodPresetSpec") +} +func (m *PodPreset) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPreset) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *PodPresetList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPresetList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodPresetSpec) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PodPresetSpec) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Env) > 0 { + for _, msg := range m.Env { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + data[i] = 0x2a + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PodPreset) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodPresetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodPresetSpec) Size() (n int) { + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PodPreset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPreset{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPresetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPresetSpec{`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_kubernetes_pkg_api_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_kubernetes_pkg_api_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_kubernetes_pkg_api_v1.Volume", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_kubernetes_pkg_api_v1.VolumeMount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PodPreset) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodPreset{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetSpec) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, k8s_io_kubernetes_pkg_api_v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, k8s_io_kubernetes_pkg_api_v1.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, k8s_io_kubernetes_pkg_api_v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, k8s_io_kubernetes_pkg_api_v1.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 550 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x9b, 0x6d, 0x4b, 0xeb, 0xb4, 0x8b, 0x12, 0x3c, 0x84, 0x1e, 0xb2, 0x4b, 0xf1, 0xb0, + 0xea, 0x3a, 0xb1, 0xab, 0xa8, 0xa0, 0xa7, 0xc8, 0x0a, 0x82, 0xcb, 0x2e, 0x29, 0xf4, 0x20, 0x2b, + 0x38, 0x4d, 0x9f, 0x69, 0x6c, 0x93, 0x09, 0x33, 0x93, 0xa0, 0x37, 0x3f, 0x82, 0x5f, 0x4a, 0x28, + 0xe8, 0x61, 0x8f, 0x9e, 0x16, 0x5b, 0xbf, 0x88, 0xcc, 0x74, 0xd2, 0x44, 0xba, 0x65, 0xab, 0xb7, + 0x79, 0x8f, 0xf7, 0xff, 0xbd, 0xff, 0x3f, 0x2f, 0xe8, 0xc5, 0xe4, 0x19, 0xc7, 0x21, 0x75, 0x26, + 0xe9, 0x10, 0x58, 0x0c, 0x02, 0xb8, 0x93, 0x4c, 0x02, 0x87, 0x24, 0x21, 0x77, 0x38, 0x08, 0x11, + 0xc6, 0x01, 0x77, 0xb2, 0x1e, 0x99, 0x26, 0x63, 0xd2, 0x73, 0x02, 0x88, 0x81, 0x11, 0x01, 0x23, + 0x9c, 0x30, 0x2a, 0xa8, 0x79, 0xb8, 0x54, 0xe3, 0x42, 0x8d, 0x93, 0x49, 0x80, 0xa5, 0x1a, 0xe7, + 0x6a, 0x9c, 0xab, 0x3b, 0x0f, 0x82, 0x50, 0x8c, 0xd3, 0x21, 0xf6, 0x69, 0xe4, 0x04, 0x34, 0xa0, + 0x8e, 0x82, 0x0c, 0xd3, 0x0f, 0xaa, 0x52, 0x85, 0x7a, 0x2d, 0xe1, 0x9d, 0xc7, 0xda, 0x1a, 0x49, + 0xc2, 0x88, 0xf8, 0xe3, 0x30, 0x06, 0xf6, 0xb9, 0x30, 0x17, 0x81, 0x20, 0x4e, 0xb6, 0x66, 0xa9, + 0xe3, 0x6c, 0x52, 0xb1, 0x34, 0x16, 0x61, 0x04, 0x6b, 0x82, 0x27, 0xd7, 0x09, 0xb8, 0x3f, 0x86, + 0x88, 0xac, 0xe9, 0x4a, 0xf6, 0x38, 0xb0, 0x0c, 0x58, 0xe1, 0x0d, 0x3e, 0x91, 0x28, 0x99, 0xc2, + 0x55, 0xf6, 0x0e, 0x37, 0x7e, 0xef, 0x2b, 0xa6, 0xbb, 0x3f, 0x0c, 0x74, 0xe3, 0x8c, 0x8e, 0xce, + 0x18, 0x70, 0x10, 0xe6, 0x7b, 0xd4, 0x94, 0xa9, 0x47, 0x44, 0x10, 0xcb, 0xd8, 0x37, 0x0e, 0x5a, + 0x47, 0x0f, 0xb1, 0x3e, 0x40, 0xd9, 0x7c, 0x71, 0x02, 0x39, 0x8d, 0xb3, 0x1e, 0x3e, 0x1d, 0x7e, + 0x04, 0x5f, 0x9c, 0x80, 0x20, 0xae, 0x39, 0xbb, 0xdc, 0xab, 0x2c, 0x2e, 0xf7, 0x50, 0xd1, 0xf3, + 0x56, 0x54, 0xf3, 0x1d, 0xaa, 0xf1, 0x04, 0x7c, 0x6b, 0x47, 0xd1, 0x9f, 0xe3, 0x7f, 0x39, 0x2f, + 0x5e, 0x19, 0xed, 0x27, 0xe0, 0xbb, 0x6d, 0xbd, 0xa8, 0x26, 0x2b, 0x4f, 0x61, 0xbb, 0xdf, 0x0d, + 0xb4, 0xbb, 0x9a, 0x7a, 0x13, 0x72, 0x61, 0x9e, 0xaf, 0x45, 0xc2, 0xdb, 0x45, 0x92, 0x6a, 0x15, + 0xe8, 0x96, 0xde, 0xd3, 0xcc, 0x3b, 0xa5, 0x38, 0xe7, 0xa8, 0x1e, 0x0a, 0x88, 0xb8, 0xb5, 0xb3, + 0x5f, 0x3d, 0x68, 0x1d, 0x3d, 0xfd, 0xcf, 0x3c, 0xee, 0xae, 0xde, 0x51, 0x7f, 0x2d, 0x69, 0xde, + 0x12, 0xda, 0xfd, 0x56, 0x2d, 0xa5, 0x91, 0x29, 0x4d, 0x82, 0x9a, 0x1c, 0xa6, 0xe0, 0x0b, 0xca, + 0x74, 0x9a, 0x47, 0x5b, 0xa6, 0x21, 0x43, 0x98, 0xf6, 0xb5, 0xb4, 0x88, 0x94, 0x77, 0xbc, 0x15, + 0xd6, 0x7c, 0x89, 0xaa, 0x10, 0x67, 0x3a, 0xd0, 0x9d, 0xcd, 0x81, 0x24, 0xf5, 0x38, 0xce, 0x06, + 0x84, 0xb9, 0x2d, 0x8d, 0xab, 0x1e, 0xc7, 0x99, 0x27, 0xd5, 0xe6, 0x00, 0x35, 0x20, 0xce, 0x5e, + 0x31, 0x1a, 0x59, 0x55, 0x05, 0xba, 0x7f, 0x2d, 0x48, 0x0e, 0xf7, 0x69, 0xca, 0x7c, 0x70, 0x6f, + 0x6a, 0x5e, 0x43, 0xb7, 0xbd, 0x1c, 0x66, 0x9e, 0xa2, 0x46, 0x46, 0xa7, 0x69, 0x04, 0xdc, 0xaa, + 0x6d, 0x63, 0x70, 0xa0, 0x86, 0x0b, 0xe0, 0xb2, 0xe6, 0x5e, 0x4e, 0x31, 0x7d, 0xd4, 0x5e, 0x3e, + 0x4f, 0x68, 0x1a, 0x0b, 0x6e, 0xd5, 0x15, 0xf5, 0xee, 0x36, 0x54, 0xa5, 0x70, 0x6f, 0x6b, 0x74, + 0xbb, 0xd4, 0xe4, 0xde, 0x5f, 0x50, 0xf7, 0xde, 0x6c, 0x6e, 0x57, 0x2e, 0xe6, 0x76, 0xe5, 0xe7, + 0xdc, 0xae, 0x7c, 0x59, 0xd8, 0xc6, 0x6c, 0x61, 0x1b, 0x17, 0x0b, 0xdb, 0xf8, 0xb5, 0xb0, 0x8d, + 0xaf, 0xbf, 0xed, 0xca, 0xdb, 0x66, 0xfe, 0x4f, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0x91, 0x84, + 0x03, 0x10, 0x2f, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto new file mode 100644 index 000000000..b5a80fa63 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.settings.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +message PodPreset { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // +optional + optional PodPresetSpec spec = 2; +} + +// PodPresetList is a list of PodPreset objects. +message PodPresetList { + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodPreset items = 2; +} + +// PodPresetSpec is a description of a pod injection policy. +message PodPresetSpec { + // Selector is a label query over a set of resources, in this case pods. + // Required. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // Env defines the collection of EnvVar to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.EnvVar env = 2; + + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.EnvFromSource envFrom = 3; + + // Volumes defines the collection of Volume to inject into the pod. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.Volume volumes = 4; + + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + repeated k8s.io.kubernetes.pkg.api.v1.VolumeMount volumeMounts = 5; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go new file mode 100644 index 000000000..45afb50ca --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go new file mode 100644 index 000000000..5b0c5d19f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types.go @@ -0,0 +1,67 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/pkg/api/v1" +) + +// +genclient=true + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // +optional + Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodPresetSpec is a description of a pod injection policy. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` +} + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 000000000..5b4dc665d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_PodPreset = map[string]string{ + "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", +} + +func (PodPreset) SwaggerDoc() map[string]string { + return map_PodPreset +} + +var map_PodPresetList = map[string]string{ + "": "PodPresetList is a list of PodPreset objects.", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (PodPresetList) SwaggerDoc() map[string]string { + return map_PodPresetList +} + +var map_PodPresetSpec = map[string]string{ + "": "PodPresetSpec is a description of a pod injection policy.", + "selector": "Selector is a label query over a set of resources, in this case pods. Required.", + "env": "Env defines the collection of EnvVar to inject into containers.", + "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", + "volumes": "Volumes defines the collection of Volume to inject into the pod.", + "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", +} + +func (PodPresetSpec) SwaggerDoc() map[string]string { + return map_PodPresetSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go new file mode 100644 index 000000000..304af7ea0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,159 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + v1 "k8s.io/client-go/pkg/api/v1" + settings "k8s.io/client-go/pkg/apis/settings" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1alpha1_PodPreset_To_settings_PodPreset, + Convert_settings_PodPreset_To_v1alpha1_PodPreset, + Convert_v1alpha1_PodPresetList_To_settings_PodPresetList, + Convert_settings_PodPresetList_To_v1alpha1_PodPresetList, + Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec, + Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec, + ) +} + +func autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_v1alpha1_PodPreset_To_settings_PodPreset(in *PodPreset, out *settings.PodPreset, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPreset_To_settings_PodPreset(in, out, s) +} + +func autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func Convert_settings_PodPreset_To_v1alpha1_PodPreset(in *settings.PodPreset, out *PodPreset, s conversion.Scope) error { + return autoConvert_settings_PodPreset_To_v1alpha1_PodPreset(in, out, s) +} + +func autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]settings.PodPreset, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_PodPreset_To_settings_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1alpha1_PodPresetList_To_settings_PodPresetList(in *PodPresetList, out *settings.PodPresetList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPresetList_To_settings_PodPresetList(in, out, s) +} + +func autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := Convert_settings_PodPreset_To_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = make([]PodPreset, 0) + } + return nil +} + +func Convert_settings_PodPresetList_To_v1alpha1_PodPresetList(in *settings.PodPresetList, out *PodPresetList, s conversion.Scope) error { + return autoConvert_settings_PodPresetList_To_v1alpha1_PodPresetList(in, out, s) +} + +func autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + return nil +} + +func Convert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in *PodPresetSpec, out *settings.PodPresetSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_PodPresetSpec_To_settings_PodPresetSpec(in, out, s) +} + +func autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { + out.Selector = in.Selector + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + return nil +} + +func Convert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in *settings.PodPresetSpec, out *PodPresetSpec, s conversion.Scope) error { + return autoConvert_settings_PodPresetSpec_To_v1alpha1_PodPresetSpec(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..017d8ccb0 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api_v1 "k8s.io/client-go/pkg/api/v1" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, + ) +} + +func DeepCopy_v1alpha1_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPreset) + out := out.(*PodPreset) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_v1alpha1_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_v1alpha1_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetList) + out := out.(*PodPresetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := DeepCopy_v1alpha1_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_v1alpha1_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetSpec) + out := out.(*PodPresetSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.Selector); err != nil { + return err + } else { + out.Selector = *newVal.(*v1.LabelSelector) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api_v1.EnvVar, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]api_v1.EnvFromSource, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api_v1.Volume, len(*in)) + for i := range *in { + if err := api_v1.DeepCopy_v1_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api_v1.VolumeMount, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go new file mode 100644 index 000000000..c178a3072 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,98 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/client-go/pkg/api/v1" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&PodPreset{}, func(obj interface{}) { SetObjectDefaults_PodPreset(obj.(*PodPreset)) }) + scheme.AddTypeDefaultingFunc(&PodPresetList{}, func(obj interface{}) { SetObjectDefaults_PodPresetList(obj.(*PodPresetList)) }) + return nil +} + +func SetObjectDefaults_PodPreset(in *PodPreset) { + for i := range in.Spec.Env { + a := &in.Spec.Env[i] + if a.ValueFrom != nil { + if a.ValueFrom.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(a.ValueFrom.FieldRef) + } + } + } + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + v1.SetDefaults_Volume(a) + if a.VolumeSource.Secret != nil { + v1.SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + v1.SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + v1.SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + v1.SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + v1.SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + v1.SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + v1.SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + v1.SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + } + } + if a.VolumeSource.ScaleIO != nil { + v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } +} + +func SetObjectDefaults_PodPresetList(in *PodPresetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodPreset(a) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go new file mode 100644 index 000000000..eb109b553 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/settings/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package settings + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + api "k8s.io/client-go/pkg/api" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPreset, InType: reflect.TypeOf(&PodPreset{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetList, InType: reflect.TypeOf(&PodPresetList{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_settings_PodPresetSpec, InType: reflect.TypeOf(&PodPresetSpec{})}, + ) +} + +func DeepCopy_settings_PodPreset(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPreset) + out := out.(*PodPreset) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if err := DeepCopy_settings_PodPresetSpec(&in.Spec, &out.Spec, c); err != nil { + return err + } + return nil + } +} + +func DeepCopy_settings_PodPresetList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetList) + out := out.(*PodPresetList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + if err := DeepCopy_settings_PodPreset(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} + +func DeepCopy_settings_PodPresetSpec(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*PodPresetSpec) + out := out.(*PodPresetSpec) + *out = *in + if newVal, err := c.DeepCopy(&in.Selector); err != nil { + return err + } else { + out.Selector = *newVal.(*v1.LabelSelector) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]api.EnvVar, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_EnvVar(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]api.EnvFromSource, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_EnvFromSource(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]api.Volume, len(*in)) + for i := range *in { + if err := api.DeepCopy_api_Volume(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]api.VolumeMount, len(*in)) + copy(*out, *in) + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS b/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS new file mode 100755 index 000000000..d59ed6e1d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/OWNERS @@ -0,0 +1,3 @@ +reviewers: +- deads2k +- mbohlool diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/doc.go new file mode 100644 index 000000000..ef6d2a9ba --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=storage.k8s.io +package storage diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go b/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go new file mode 100644 index 000000000..fb590fecd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/install/install.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/apimachinery/announced" + "k8s.io/apimachinery/pkg/apimachinery/registered" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/apis/storage" + "k8s.io/client-go/pkg/apis/storage/v1" + "k8s.io/client-go/pkg/apis/storage/v1beta1" +) + +func init() { + Install(api.GroupFactoryRegistry, api.Registry, api.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { + if err := announced.NewGroupMetaFactory( + &announced.GroupMetaFactoryArgs{ + GroupName: storage.GroupName, + // TODO: change the order when GKE supports v1 + VersionPreferenceOrder: []string{v1beta1.SchemeGroupVersion.Version, v1.SchemeGroupVersion.Version}, + ImportPrefix: "k8s.io/client-go/pkg/apis/storage", + RootScopedKinds: sets.NewString("StorageClass"), + AddInternalObjectsToScheme: storage.AddToScheme, + }, + announced.VersionToSchemeFunc{ + v1.SchemeGroupVersion.Version: v1.AddToScheme, + v1beta1.SchemeGroupVersion.Version: v1beta1.AddToScheme, + }, + ).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/register.go new file mode 100644 index 000000000..aaa619b4d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/types.go new file mode 100644 index 000000000..3d589de5c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes a named "class" of storage offered in a cluster. +// Different classes might map to quality-of-service levels, or to backup policies, +// or to arbitrary policies determined by the cluster administrators. Kubernetes +// itself is unopinionated about what classes represent. This concept is sometimes +// called "profiles" in other storage systems. +// The name of a StorageClass object is significant, and is how users can request a particular class. +type StorageClass struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // provisioner is the driver expected to handle this StorageClass. + // This is an optionally-prefixed name, like a label key. + // For example: "kubernetes.io/gce-pd" or "kubernetes.io/aws-ebs". + // This value may not be empty. + Provisioner string + + // parameters holds parameters for the provisioner. + // These values are opaque to the system and are passed directly + // to the provisioner. The only validation done on keys is that they are + // not empty. The maximum number of parameters is + // 512, with a cumulative max size of 256K + // +optional + Parameters map[string]string +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // Items is the list of StorageClasses + Items []StorageClass +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go new file mode 100644 index 000000000..2b8d05cfd --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=storage.k8s.io +package v1 diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go new file mode 100644 index 000000000..531282ba6 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.pb.go @@ -0,0 +1,730 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto +// DO NOT EDIT! + +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/storage/v1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1.StorageClassList") +} +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) + i += copy(data[i:], m.Provisioner) + if len(m.Parameters) > 0 { + for k := range m.Parameters { + data[i] = 0x1a + i++ + v := m.Parameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 474 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0xe3, 0x54, 0x95, 0x36, 0x17, 0x44, 0x15, 0x38, 0x54, 0x3d, 0x64, 0xd5, 0x04, 0x52, + 0x2f, 0xd8, 0x74, 0x63, 0x68, 0x42, 0xe2, 0xd2, 0x89, 0x03, 0x12, 0x88, 0x29, 0x5c, 0x10, 0xe2, + 0x80, 0xdb, 0xbd, 0xa4, 0x26, 0x4d, 0x1c, 0xd9, 0x6f, 0x02, 0xbd, 0xf1, 0x11, 0xf8, 0x58, 0x15, + 0xa7, 0x1d, 0x39, 0x0d, 0x1a, 0xbe, 0x08, 0xca, 0x1f, 0x96, 0x88, 0x6c, 0xa2, 0xda, 0x2d, 0xaf, + 0xed, 0xdf, 0xe3, 0xe7, 0x79, 0x1c, 0x7a, 0x14, 0x1c, 0x1b, 0x26, 0x15, 0x0f, 0x92, 0x19, 0xe8, + 0x08, 0x10, 0x0c, 0x8f, 0x03, 0x9f, 0x8b, 0x58, 0x1a, 0x6e, 0x50, 0x69, 0xe1, 0x03, 0x4f, 0x27, + 0xdc, 0x87, 0x08, 0xb4, 0x40, 0x38, 0x63, 0xb1, 0x56, 0xa8, 0x9c, 0x07, 0x25, 0xc6, 0x6a, 0x8c, + 0xc5, 0x81, 0xcf, 0x72, 0x8c, 0x55, 0x18, 0x4b, 0x27, 0xc3, 0x87, 0xbe, 0xc4, 0x45, 0x32, 0x63, + 0x73, 0x15, 0x72, 0x5f, 0xf9, 0x8a, 0x17, 0xf4, 0x2c, 0xf9, 0x58, 0x4c, 0xc5, 0x50, 0x7c, 0x95, + 0xaa, 0xc3, 0xc7, 0x95, 0x19, 0x11, 0xcb, 0x50, 0xcc, 0x17, 0x32, 0x02, 0xbd, 0xaa, 0xed, 0x84, + 0x80, 0xe2, 0x0a, 0x2f, 0x43, 0x7e, 0x1d, 0xa5, 0x93, 0x08, 0x65, 0x08, 0x2d, 0xe0, 0xc9, 0xff, + 0x00, 0x33, 0x5f, 0x40, 0x28, 0x5a, 0xdc, 0xe1, 0x75, 0x5c, 0x82, 0x72, 0xc9, 0x65, 0x84, 0x06, + 0x75, 0x0b, 0x6a, 0x64, 0x32, 0xa0, 0x53, 0xd0, 0x75, 0x20, 0xf8, 0x22, 0xc2, 0x78, 0x79, 0x55, + 0xbf, 0xfb, 0x3f, 0x6d, 0x7a, 0xeb, 0x4d, 0xd9, 0xe3, 0xc9, 0x52, 0x18, 0xe3, 0x7c, 0xa0, 0x3b, + 0x79, 0xfe, 0x33, 0x81, 0x62, 0x40, 0x46, 0x64, 0xdc, 0x3b, 0x78, 0xc4, 0xaa, 0x37, 0x68, 0xda, + 0xa9, 0x5f, 0x21, 0x3f, 0xcd, 0xd2, 0x09, 0x7b, 0x3d, 0xfb, 0x04, 0x73, 0x7c, 0x05, 0x28, 0xa6, + 0xce, 0xfa, 0x62, 0xcf, 0xca, 0x2e, 0xf6, 0x68, 0xbd, 0xe6, 0x5d, 0xaa, 0x3a, 0x47, 0xb4, 0x17, + 0x6b, 0x95, 0x4a, 0x23, 0x55, 0x04, 0x7a, 0x60, 0x8f, 0xc8, 0x78, 0x77, 0x7a, 0xb7, 0x42, 0x7a, + 0xa7, 0xf5, 0x96, 0xd7, 0x3c, 0xe7, 0x7c, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0xa0, + 0x33, 0xea, 0x8c, 0x7b, 0x07, 0x27, 0x6c, 0xab, 0xdf, 0x83, 0x35, 0x13, 0xb2, 0xd3, 0x4b, 0x95, + 0xe7, 0x11, 0xea, 0x55, 0xed, 0xb6, 0xde, 0xf0, 0x1a, 0x57, 0x0d, 0x9f, 0xd1, 0x3b, 0xff, 0x20, + 0x4e, 0x9f, 0x76, 0x02, 0x58, 0x15, 0xfd, 0xec, 0x7a, 0xf9, 0xa7, 0x73, 0x8f, 0x76, 0x53, 0xb1, + 0x4c, 0xa0, 0x8c, 0xe3, 0x95, 0xc3, 0x53, 0xfb, 0x98, 0xec, 0x7f, 0x27, 0xb4, 0xdf, 0xbc, 0xff, + 0xa5, 0x34, 0xe8, 0xbc, 0x6f, 0xb5, 0xcc, 0xb6, 0x6b, 0x39, 0xa7, 0x8b, 0x8e, 0xfb, 0x95, 0xeb, + 0x9d, 0xbf, 0x2b, 0x8d, 0x86, 0xdf, 0xd2, 0xae, 0x44, 0x08, 0xcd, 0xc0, 0x2e, 0x5a, 0x3a, 0xbc, + 0x41, 0x4b, 0xd3, 0xdb, 0x95, 0x7e, 0xf7, 0x45, 0xae, 0xe4, 0x95, 0x82, 0xd3, 0xfb, 0xeb, 0x8d, + 0x6b, 0x9d, 0x6f, 0x5c, 0xeb, 0xc7, 0xc6, 0xb5, 0xbe, 0x66, 0x2e, 0x59, 0x67, 0x2e, 0x39, 0xcf, + 0x5c, 0xf2, 0x2b, 0x73, 0xc9, 0xb7, 0xdf, 0xae, 0xf5, 0xce, 0x4e, 0x27, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xe8, 0xe1, 0xb9, 0x93, 0xec, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto new file mode 100644 index 000000000..92e18cf9e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/generated.proto @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.storage.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map<string, string> parameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go new file mode 100644 index 000000000..24d6bfa7d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go new file mode 100644 index 000000000..0591b397c --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go new file mode 100644 index 000000000..6f2717708 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/types_swagger_doc_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go new file mode 100644 index 000000000..3c0c5d4b3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.conversion.go @@ -0,0 +1,89 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + storage "k8s.io/client-go/pkg/apis/storage" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_StorageClass_To_storage_StorageClass, + Convert_storage_StorageClass_To_v1_StorageClass, + Convert_v1_StorageClassList_To_storage_StorageClassList, + Convert_storage_StorageClassList_To_v1_StorageClassList, + ) +} + +func autoConvert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_v1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + return autoConvert_v1_StorageClass_To_storage_StorageClass(in, out, s) +} + +func autoConvert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_storage_StorageClass_To_v1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_storage_StorageClass_To_v1_StorageClass(in, out, s) +} + +func autoConvert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + return autoConvert_v1_StorageClassList_To_storage_StorageClassList(in, out, s) +} + +func autoConvert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]StorageClass, 0) + } else { + out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_storage_StorageClassList_To_v1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_storage_StorageClassList_To_v1_StorageClassList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8b3786ab1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_v1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*meta_v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go new file mode 100644 index 000000000..6df448eb9 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go new file mode 100644 index 000000000..364321710 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +groupName=storage.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go new file mode 100644 index 000000000..f1130cf37 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.pb.go @@ -0,0 +1,731 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.pkg.apis.storage.v1beta1.StorageClassList") +} +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) + i += copy(data[i:], m.Provisioner) + if len(m.Parameters) > 0 { + for k := range m.Parameters { + data[i] = 0x1a + i++ + v := m.Parameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 486 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x33, 0x5d, 0x8a, 0xbb, 0x53, 0xc5, 0x12, 0x3d, 0x94, 0x1e, 0xb2, 0x65, 0x4f, 0x55, + 0x74, 0xc6, 0xae, 0x3f, 0x28, 0x0b, 0x5e, 0x2a, 0x82, 0x82, 0xe2, 0x12, 0x6f, 0xa2, 0xe0, 0xa4, + 0xfb, 0x4c, 0xc7, 0x34, 0x99, 0x30, 0xf3, 0x12, 0x2c, 0x78, 0xf0, 0x4f, 0xf0, 0xcf, 0xea, 0xcd, + 0x3d, 0x7a, 0x5a, 0x6c, 0xf4, 0x0f, 0x91, 0xfc, 0x70, 0x13, 0xcc, 0x16, 0x17, 0x6f, 0x99, 0x99, + 0xf7, 0xf9, 0xbe, 0xef, 0xfb, 0xbe, 0xd0, 0xa3, 0x60, 0x6a, 0x98, 0x54, 0x3c, 0x48, 0x3c, 0xd0, + 0x11, 0x20, 0x18, 0x1e, 0x07, 0x3e, 0x17, 0xb1, 0x34, 0xdc, 0xa0, 0xd2, 0xc2, 0x07, 0x9e, 0x4e, + 0x3c, 0x40, 0x31, 0xe1, 0x3e, 0x44, 0xa0, 0x05, 0xc2, 0x09, 0x8b, 0xb5, 0x42, 0x65, 0xdf, 0x2e, + 0x59, 0x56, 0xb3, 0x2c, 0x0e, 0x7c, 0x96, 0xb3, 0xac, 0x62, 0x59, 0xc5, 0x0e, 0xef, 0xfa, 0x12, + 0x17, 0x89, 0xc7, 0xe6, 0x2a, 0xe4, 0xbe, 0xf2, 0x15, 0x2f, 0x24, 0xbc, 0xe4, 0x43, 0x71, 0x2a, + 0x0e, 0xc5, 0x57, 0x29, 0x3d, 0x7c, 0x50, 0xd9, 0x12, 0xb1, 0x0c, 0xc5, 0x7c, 0x21, 0x23, 0xd0, + 0xab, 0xda, 0x58, 0x08, 0x28, 0x78, 0xda, 0x32, 0x34, 0xe4, 0xdb, 0x28, 0x9d, 0x44, 0x28, 0x43, + 0x68, 0x01, 0x8f, 0xfe, 0x05, 0x98, 0xf9, 0x02, 0x42, 0xd1, 0xe2, 0xee, 0x6f, 0xe3, 0x12, 0x94, + 0x4b, 0x2e, 0x23, 0x34, 0xa8, 0x5b, 0x50, 0x63, 0x26, 0x03, 0x3a, 0x05, 0x5d, 0x0f, 0x04, 0x9f, + 0x44, 0x18, 0x2f, 0xe1, 0xa2, 0x99, 0xee, 0x6c, 0x5d, 0xd0, 0x05, 0xd5, 0x07, 0xbf, 0x3a, 0xf4, + 0xea, 0xeb, 0x32, 0xfa, 0x27, 0x4b, 0x61, 0x8c, 0xfd, 0x9e, 0xee, 0xe6, 0x69, 0x9d, 0x08, 0x14, + 0x03, 0x32, 0x22, 0xe3, 0xde, 0xe1, 0x3d, 0x56, 0xad, 0xad, 0x69, 0xbe, 0x5e, 0x5c, 0x5e, 0xcd, + 0xd2, 0x09, 0x7b, 0xe5, 0x7d, 0x84, 0x39, 0xbe, 0x04, 0x14, 0x33, 0x7b, 0x7d, 0xb6, 0x6f, 0x65, + 0x67, 0xfb, 0xb4, 0xbe, 0x73, 0xcf, 0x55, 0xed, 0x87, 0xb4, 0x17, 0x6b, 0x95, 0x4a, 0x23, 0x55, + 0x04, 0x7a, 0xd0, 0x19, 0x91, 0xf1, 0xde, 0xec, 0x46, 0x85, 0xf4, 0x8e, 0xeb, 0x27, 0xb7, 0x59, + 0x67, 0x7f, 0xa6, 0x34, 0x16, 0x5a, 0x84, 0x80, 0xa0, 0xcd, 0x60, 0x67, 0xb4, 0x33, 0xee, 0x1d, + 0x3e, 0x63, 0x97, 0xff, 0xa3, 0x58, 0x73, 0x4c, 0x76, 0x7c, 0x2e, 0xf5, 0x34, 0x42, 0xbd, 0xaa, + 0x2d, 0xd7, 0x0f, 0x6e, 0xa3, 0xdf, 0xf0, 0x31, 0xbd, 0xfe, 0x17, 0x62, 0xf7, 0xe9, 0x4e, 0x00, + 0xab, 0x22, 0xa4, 0x3d, 0x37, 0xff, 0xb4, 0x6f, 0xd2, 0x6e, 0x2a, 0x96, 0x09, 0x94, 0x33, 0xb9, + 0xe5, 0xe1, 0xa8, 0x33, 0x25, 0x07, 0xdf, 0x08, 0xed, 0x37, 0xfb, 0xbf, 0x90, 0x06, 0xed, 0xb7, + 0xad, 0xa8, 0xd9, 0xe5, 0xa2, 0xce, 0xe9, 0x22, 0xe8, 0x7e, 0xe5, 0x7a, 0xf7, 0xcf, 0x4d, 0x23, + 0xe6, 0x77, 0xb4, 0x2b, 0x11, 0x42, 0x33, 0xe8, 0x14, 0x51, 0x4d, 0xff, 0x37, 0xaa, 0xd9, 0xb5, + 0xaa, 0x49, 0xf7, 0x79, 0x2e, 0xe7, 0x96, 0xaa, 0xb3, 0x5b, 0xeb, 0x8d, 0x63, 0x9d, 0x6e, 0x1c, + 0xeb, 0xfb, 0xc6, 0xb1, 0xbe, 0x64, 0x0e, 0x59, 0x67, 0x0e, 0x39, 0xcd, 0x1c, 0xf2, 0x23, 0x73, + 0xc8, 0xd7, 0x9f, 0x8e, 0xf5, 0xe6, 0x4a, 0xa5, 0xf6, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x99, 0xdf, + 0x66, 0x94, 0x33, 0x04, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto new file mode 100644 index 000000000..0b9dbaead --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/generated.proto @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.storage.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; +import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map<string, string> parameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go new file mode 100644 index 000000000..70087f379 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go new file mode 100644 index 000000000..ddc516b28 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.generated.go @@ -0,0 +1,985 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + pkg2_types "k8s.io/apimachinery/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_v1.TypeMeta + var v1 pkg2_types.UID + var v2 time.Time + _, _, _ = v0, v1, v2 + } +} + +func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = len(x.Parameters) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ObjectMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisioner")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Parameters == nil { + r.EncodeNil() + } else { + yym18 := z.EncBinary() + _ = yym18 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parameters")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parameters == nil { + r.EncodeNil() + } else { + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv8 := &x.ObjectMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "provisioner": + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + yyv10 := &x.Provisioner + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "parameters": + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv12 := &x.Parameters + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + z.F.DecMapStringStringX(yyv12, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv15 := &x.Kind + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv17 := &x.APIVersion + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg1_v1.ObjectMeta{} + } else { + yyv19 := &x.ObjectMeta + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + z.DecFallback(yyv19, false) + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + yyv21 := &x.Provisioner + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv23 := &x.Parameters + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + z.F.DecMapStringStringX(yyv23, false, d) + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj14-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [4]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(4) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ListMeta + yym11 := z.EncBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.EncExt(yy10) { + } else { + z.EncFallback(yy10) + } + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy12 := &x.ListMeta + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(yy12) { + } else { + z.EncFallback(yy12) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym15 := z.EncBinary() + _ = yym15 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap1234 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray1234 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys3Slc = r.DecodeBytes(yys3Slc, true, true) + yys3 := string(yys3Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys3 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv4 := &x.Kind + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv6 := &x.APIVersion + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv8 := &x.ListMeta + yym9 := z.DecBinary() + _ = yym9 + if false { + } else if z.HasExtensions() && z.DecExt(yyv8) { + } else { + z.DecFallback(yyv8, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv10 := &x.Items + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv10), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + yyv13 := &x.Kind + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + yyv15 := &x.APIVersion + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_v1.ListMeta{} + } else { + yyv17 := &x.ListMeta + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(yyv17) { + } else { + z.DecFallback(yyv17, false) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv19 := &x.Items + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv19), d) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj12-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv1 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy2 := &yyv1 + yy2.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else if yyl1 > 0 { + var yyrr1, yyrl1 int + var yyrt1 bool + _, _ = yyrl1, yyrt1 + yyrr1 = yyl1 // len(yyv1) + if yyl1 > cap(yyv1) { + + yyrg1 := len(yyv1) > 0 + yyv21 := yyv1 + yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1 { + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]StorageClass, yyrl1) + } + } else { + yyv1 = make([]StorageClass, yyrl1) + } + yyc1 = true + yyrr1 = len(yyv1) + if yyrg1 { + copy(yyv1, yyv21) + } + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + yyj1 := 0 + for ; yyj1 < yyrr1; yyj1++ { + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv2 := &yyv1[yyj1] + yyv2.CodecDecodeSelf(d) + } + + } + if yyrt1 { + for ; yyj1 < yyl1; yyj1++ { + yyv1 = append(yyv1, StorageClass{}) + yyh1.ElemContainerState(yyj1) + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv3 := &yyv1[yyj1] + yyv3.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj1 := 0 + for ; !r.CheckBreak(); yyj1++ { + + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, StorageClass{}) // var yyz1 StorageClass + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + if yyj1 < len(yyv1) { + if r.TryDecodeAsNil() { + yyv1[yyj1] = StorageClass{} + } else { + yyv4 := &yyv1[yyj1] + yyv4.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = []StorageClass{} + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go new file mode 100644 index 000000000..afbd5bcdb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000..e8362e381 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..f5b1b2aa4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -0,0 +1,89 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + storage "k8s.io/client-go/pkg/apis/storage" + unsafe "unsafe" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_StorageClass_To_storage_StorageClass, + Convert_storage_StorageClass_To_v1beta1_StorageClass, + Convert_v1beta1_StorageClassList_To_storage_StorageClassList, + Convert_storage_StorageClassList_To_v1beta1_StorageClassList, + ) +} + +func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in, out, s) +} + +func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Provisioner = in.Provisioner + out.Parameters = *(*map[string]string)(unsafe.Pointer(&in.Parameters)) + return nil +} + +func Convert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in, out, s) +} + +func autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]storage.StorageClass)(unsafe.Pointer(&in.Items)) + return nil +} + +func Convert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in, out, s) +} + +func autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items == nil { + out.Items = make([]StorageClass, 0) + } else { + out.Items = *(*[]StorageClass)(unsafe.Pointer(&in.Items)) + } + return nil +} + +func Convert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..f2ea4f2fb --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..e24e70be3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by defaulter-gen. Do not edit it manually! + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go new file mode 100644 index 000000000..2ef9f1767 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/storage/zz_generated.deepcopy.go @@ -0,0 +1,80 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package storage + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_storage_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + *out = *in + if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { + return err + } else { + out.ObjectMeta = *newVal.(*v1.ObjectMeta) + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } + return nil + } +} + +func DeepCopy_storage_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_storage_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/pkg/util/doc.go b/vendor/k8s.io/client-go/pkg/util/doc.go new file mode 100644 index 000000000..1747db550 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util implements various utility functions used in both testing and implementation +// of Kubernetes. Package util may not depend on any other package in the Kubernetes +// package tree. +package util diff --git a/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go rename to vendor/k8s.io/client-go/pkg/util/parsers/parsers.go index a02f18d3e..4e70cc682 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go +++ b/vendor/k8s.io/client-go/pkg/util/parsers/parsers.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/template.go b/vendor/k8s.io/client-go/pkg/util/template.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/util/template.go rename to vendor/k8s.io/client-go/pkg/util/template.go index 1f9668533..d09d7dc86 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/template.go +++ b/vendor/k8s.io/client-go/pkg/util/template.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/umask.go b/vendor/k8s.io/client-go/pkg/util/umask.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/util/umask.go rename to vendor/k8s.io/client-go/pkg/util/umask.go index 48311f4e3..35ccce50b 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/umask.go +++ b/vendor/k8s.io/client-go/pkg/util/umask.go @@ -1,7 +1,7 @@ // +build !windows /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go b/vendor/k8s.io/client-go/pkg/util/umask_windows.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/util/umask_windows.go rename to vendor/k8s.io/client-go/pkg/util/umask_windows.go index 0f97c26ed..7a1ba1538 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/umask_windows.go +++ b/vendor/k8s.io/client-go/pkg/util/umask_windows.go @@ -1,7 +1,7 @@ // +build windows /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,6 @@ import ( "errors" ) -func Umask(mask int) (old int, err error) { +func Umask(mask int) (int, error) { return 0, errors.New("platform and architecture is not supported") } diff --git a/vendor/k8s.io/client-go/pkg/util/util.go b/vendor/k8s.io/client-go/pkg/util/util.go new file mode 100644 index 000000000..356b295a3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/util/util.go @@ -0,0 +1,131 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "reflect" + "regexp" +) + +// Takes a list of strings and compiles them into a list of regular expressions +func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { + regexps := []*regexp.Regexp{} + for _, regexpStr := range regexpStrings { + r, err := regexp.Compile(regexpStr) + if err != nil { + return []*regexp.Regexp{}, err + } + regexps = append(regexps, r) + } + return regexps, nil +} + +// Detects if using systemd as the init system +// Please note that simply reading /proc/1/cmdline can be misleading because +// some installation of various init programs can automatically make /sbin/init +// a symlink or even a renamed version of their main program. +// TODO(dchen1107): realiably detects the init system using on the system: +// systemd, upstart, initd, etc. +func UsingSystemdInitSystem() bool { + if _, err := os.Stat("/run/systemd/system"); err == nil { + return true + } + + return false +} + +// Tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// ReadDirNoStat returns a string of files/directories contained +// in dirname without calling lstat on them. +func ReadDirNoStat(dirname string) ([]string, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + return f.Readdirnames(-1) +} + +// IntPtr returns a pointer to an int +func IntPtr(i int) *int { + o := i + return &o +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + o := i + return &o +} + +// IntPtrDerefOr dereference the int ptr and returns it i not nil, +// else returns def. +func IntPtrDerefOr(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} diff --git a/vendor/k8s.io/kubernetes/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/version/.gitattributes rename to vendor/k8s.io/client-go/pkg/version/.gitattributes diff --git a/vendor/k8s.io/kubernetes/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/version/base.go rename to vendor/k8s.io/client-go/pkg/version/base.go index 0b41ab88f..f0f7338c7 100644 --- a/vendor/k8s.io/kubernetes/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -39,10 +39,10 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "1" // major version, always numeric - gitMinor string = "3" // minor version, numeric possibly followed by "+" + gitMajor string = "1" // major version, always numeric + gitMinor string = "6+" // minor version, numeric possibly followed by "+" - // semantic version, dervied by build scripts (see + // semantic version, derived by build scripts (see // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md // for a detailed discussion of this field) // @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v1.3.4+$Format:%h$" + gitVersion string = "v1.6.1-beta.0+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go new file mode 100644 index 000000000..ccedec76f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package version supplies version information collected at build time to +// kubernetes components. +package version diff --git a/vendor/k8s.io/client-go/pkg/version/version.go b/vendor/k8s.io/client-go/pkg/version/version.go new file mode 100644 index 000000000..8c8350d13 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS new file mode 100755 index 000000000..8d97da007 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -0,0 +1,24 @@ +reviewers: +- thockin +- smarterclayton +- caesarxuchao +- wojtek-t +- deads2k +- brendandburns +- liggitt +- nikhiljindal +- gmarek +- erictune +- sttts +- luxas +- dims +- errordeveloper +- hongchaodeng +- krousey +- resouer +- cjcullen +- rmmh +- lixiaobing10051267 +- asalkeld +- juanvallejo +- lojies diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go new file mode 100644 index 000000000..524e0d8eb --- /dev/null +++ b/vendor/k8s.io/client-go/rest/client.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "mime" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + // Environment variables: Note that the duration should be long enough that the backoff + // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". + envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" + envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" +) + +// Interface captures the set of operations for generically interacting with Kubernetes REST apis. +type Interface interface { + GetRateLimiter() flowcontrol.RateLimiter + Verb(verb string) *Request + Post() *Request + Put() *Request + Patch(pt types.PatchType) *Request + Get() *Request + Delete() *Request + APIVersion() schema.GroupVersion +} + +// RESTClient imposes common Kubernetes API conventions on a set of resource paths. +// The baseURL is expected to point to an HTTP or HTTPS path that is the parent +// of one or more resources. The server should return a decodable API resource +// object, or an api.Status object which contains information about the reason for +// any failure. +// +// Most consumers should use client.New() to get a Kubernetes API client. +type RESTClient struct { + // base is the root URL for all invocations of the client + base *url.URL + // versionedAPIPath is a path segment connecting the base URL to the resource root + versionedAPIPath string + + // contentConfig is the information used to communicate with the server. + contentConfig ContentConfig + + // serializers contain all serializers for underlying content type. + serializers Serializers + + // creates BackoffManager that is passed to requests. + createBackoffMgr func() BackoffManager + + // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. + Throttle flowcontrol.RateLimiter + + // Set specific behavior of the client. If not set http.DefaultClient will be used. + Client *http.Client +} + +type Serializers struct { + Encoder runtime.Encoder + Decoder runtime.Decoder + StreamingSerializer runtime.Serializer + Framer runtime.Framer + RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) +} + +// NewRESTClient creates a new RESTClient. This client performs generic REST functions +// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and +// decoding of responses from the server. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + base := *baseURL + if !strings.HasSuffix(base.Path, "/") { + base.Path += "/" + } + base.RawQuery = "" + base.Fragment = "" + + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + serializers, err := createSerializers(config) + if err != nil { + return nil, err + } + + var throttle flowcontrol.RateLimiter + if maxQPS > 0 && rateLimiter == nil { + throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) + } else if rateLimiter != nil { + throttle = rateLimiter + } + return &RESTClient{ + base: &base, + versionedAPIPath: versionedAPIPath, + contentConfig: config, + serializers: *serializers, + createBackoffMgr: readExpBackoffConfig, + Throttle: throttle, + Client: client, + }, nil +} + +// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client +func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { + if c == nil { + return nil + } + return c.Throttle +} + +// readExpBackoffConfig handles the internal logic of determining what the +// backoff policy is. By default if no information is available, NoBackoff. +// TODO Generalize this see #17727 . +func readExpBackoffConfig() BackoffManager { + backoffBase := os.Getenv(envBackoffBase) + backoffDuration := os.Getenv(envBackoffDuration) + + backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) + backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) + if errBase != nil || errDuration != nil { + return &NoBackoff{} + } + return &URLBackoff{ + Backoff: flowcontrol.NewBackOff( + time.Duration(backoffBaseInt)*time.Second, + time.Duration(backoffDurationInt)*time.Second)} +} + +// createSerializers creates all necessary serializers for given contentType. +// TODO: the negotiated serializer passed to this method should probably return +// serializers that control decoding and versioning without this package +// being aware of the types. Depends on whether RESTClient must deal with +// generic infrastructure. +func createSerializers(config ContentConfig) (*Serializers, error) { + mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() + contentType := config.ContentType + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) + } + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, fmt.Errorf("no serializers registered for %s", contentType) + } + info = mediaTypes[0] + } + + internalGV := schema.GroupVersions{ + { + Group: config.GroupVersion.Group, + Version: runtime.APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: runtime.APIVersionInternal, + }, + } + + s := &Serializers{ + Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), + Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), + + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil + }, + } + if info.StreamSerializer != nil { + s.StreamingSerializer = info.StreamSerializer.Serializer + s.Framer = info.StreamSerializer.Framer + } + + return s, nil +} + +// Verb begins a request with a verb (GET, POST, PUT, DELETE). +// +// Example usage of RESTClient's request building interface: +// c, err := NewRESTClient(...) +// if err != nil { ... } +// resp, err := c.Verb("GET"). +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// if err != nil { ... } +// list, ok := resp.(*api.PodList) +// +func (c *RESTClient) Verb(verb string) *Request { + backoff := c.createBackoffMgr() + + if c.Client == nil { + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) + } + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) +} + +// Post begins a POST request. Short for c.Verb("POST"). +func (c *RESTClient) Post() *Request { + return c.Verb("POST") +} + +// Put begins a PUT request. Short for c.Verb("PUT"). +func (c *RESTClient) Put() *Request { + return c.Verb("PUT") +} + +// Patch begins a PATCH request. Short for c.Verb("Patch"). +func (c *RESTClient) Patch(pt types.PatchType) *Request { + return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) +} + +// Get begins a GET request. Short for c.Verb("GET"). +func (c *RESTClient) Get() *Request { + return c.Verb("GET") +} + +// Delete begins a DELETE request. Short for c.Verb("DELETE"). +func (c *RESTClient) Delete() *Request { + return c.Verb("DELETE") +} + +// APIVersion returns the APIVersion this RESTClient is expected to use. +func (c *RESTClient) APIVersion() schema.GroupVersion { + return *c.contentConfig.GroupVersion +} diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go new file mode 100644 index 000000000..2a2c03dff --- /dev/null +++ b/vendor/k8s.io/client-go/rest/config.go @@ -0,0 +1,384 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + gruntime "runtime" + "strings" + "time" + + "github.com/golang/glog" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/api" + "k8s.io/client-go/pkg/version" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + DefaultQPS float32 = 5.0 + DefaultBurst int = 10 +) + +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type Config struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + // Prefix is the sub path of the server. If not specified, the client will set + // a default value. Use "/" to indicate the server root should be used + Prefix string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + ContentConfig + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Impersonate is the configuration that RESTClient will use for impersonation. + Impersonate ImpersonationConfig + + // Server requires plugin-specified authentication. + AuthProvider *clientcmdapi.AuthProviderConfig + + // Callback to persist config for AuthProvider. + AuthConfigPersister AuthProviderConfigPersister + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // Transport may be used for custom HTTP behavior. This attribute may not + // be specified with the TLS client certificate options. Use WrapTransport + // for most client level operations. + Transport http.RoundTripper + // WrapTransport will be invoked for custom HTTP behavior after the underlying + // transport is initialized (either the transport created from TLSClientConfig, + // Transport, or http.DefaultTransport). The config may layer other RoundTrippers + // on top of the returned RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper + + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS float32 + + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter + + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration + + // Version forces a specific version to be used (if registered) + // Do we need this? + // Version string +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName is the username to impersonate on each request. + UserName string + // Groups are the groups to impersonate on each request. + Groups []string + // Extra is a free-form field which can be used to link some authentication information + // to authorization information. This field allows you to impersonate it. + Extra map[string][]string +} + +// TLSClientConfig contains settings to enable transport layer security +type TLSClientConfig struct { + // Server should be accessed without verifying the TLS certificate. For testing only. + Insecure bool + // ServerName is passed to the server for SNI and is used in the client to check server + // ceritificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string + + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +type ContentConfig struct { + // AcceptContentTypes specifies the types the client will accept and is optional. + // If not set, ContentType will be used to define the Accept header + AcceptContentTypes string + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server, and + // as the default content type on any object sent to the server. If not set, + // "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. + GroupVersion *schema.GroupVersion + // NegotiatedSerializer is used for obtaining encoders and decoders for multiple + // supported media types. + NegotiatedSerializer runtime.NegotiatedSerializer +} + +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func RESTClientFor(config *Config) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) +} + +// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows +// the config.Version to be empty. +func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + versionConfig := config.ContentConfig + if versionConfig.GroupVersion == nil { + v := metav1.SchemeGroupVersion + versionConfig.GroupVersion = &v + } + + return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) +} + +// SetKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +func SetKubernetesDefaults(config *Config) error { + if len(config.UserAgent) == 0 { + config.UserAgent = DefaultKubernetesUserAgent() + } + return nil +} + +// DefaultKubernetesUserAgent returns the default user agent that clients can use. +func DefaultKubernetesUserAgent() string { + commit := version.Get().GitCommit + if len(commit) > 7 { + commit = commit[:7] + } + if len(commit) == 0 { + commit = "unknown" + } + version := version.Get().GitVersion + seg := strings.SplitN(version, "-", 2) + version = seg[0] + return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) +} + +// InClusterConfig returns a config object which uses the service account +// kubernetes gives to pods. It's intended for clients that expect to be +// running inside a pod running on kubernetes. It will return an error if +// called from a process not running in a kubernetes environment. +func InClusterConfig() (*Config, error) { + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + if len(host) == 0 || len(port) == 0 { + return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") + } + + token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) + if err != nil { + return nil, err + } + tlsClientConfig := TLSClientConfig{} + rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey + if _, err := certutil.NewPool(rootCAFile); err != nil { + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + } else { + tlsClientConfig.CAFile = rootCAFile + } + + return &Config{ + // TODO: switch to using cluster DNS. + Host: "https://" + net.JoinHostPort(host, port), + BearerToken: string(token), + TLSClientConfig: tlsClientConfig, + }, nil +} + +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func IsConfigTransportTLS(config Config) bool { + baseURL, _, err := defaultServerUrlFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func LoadTLSFiles(c *Config) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +func AddUserAgent(config *Config, userAgent string) *Config { + fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent + config.UserAgent = fullUserAgent + return config +} + +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +func AnonymousClientConfig(config *Config) *Config { + // copy only known safe fields + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + Prefix: config.Prefix, + ContentConfig: config.ContentConfig, + TLSClientConfig: TLSClientConfig{ + Insecure: config.Insecure, + ServerName: config.ServerName, + CAFile: config.TLSClientConfig.CAFile, + CAData: config.TLSClientConfig.CAData, + }, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + } +} diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go new file mode 100644 index 000000000..cf8fbabfd --- /dev/null +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/http" + "sync" + + "github.com/golang/glog" + + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type AuthProvider interface { + // WrapTransport allows the plugin to create a modified RoundTripper that + // attaches authorization headers (or other info) to requests. + WrapTransport(http.RoundTripper) http.RoundTripper + // Login allows the plugin to initialize its configuration. It must not + // require direct user interaction. + Login() error +} + +// Factory generates an AuthProvider plugin. +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. +type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) + +// AuthProviderConfigPersister allows a plugin to persist configuration info +// for just itself. +type AuthProviderConfigPersister interface { + Persist(map[string]string) error +} + +// All registered auth provider plugins. +var pluginsLock sync.Mutex +var plugins = make(map[string]Factory) + +func RegisterAuthProviderPlugin(name string, plugin Factory) error { + pluginsLock.Lock() + defer pluginsLock.Unlock() + if _, found := plugins[name]; found { + return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + } + glog.V(4).Infof("Registered Auth Provider Plugin %q", name) + plugins[name] = plugin + return nil +} + +func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) { + pluginsLock.Lock() + defer pluginsLock.Unlock() + p, ok := plugins[apc.Name] + if !ok { + return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + } + return p(clusterAddress, apc.Config, persister) +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go new file mode 100644 index 000000000..b87ddaff5 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/request.go @@ -0,0 +1,1247 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/pkg/api/v1" + restclientwatch "k8s.io/client-go/rest/watch" + "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/flowcontrol" +) + +var ( + // specialParams lists parameters that are handled specially and which users of Request + // are therefore not allowed to set manually. + specialParams = sets.NewString("timeout") + + // longThrottleLatency defines threshold for logging requests. All requests being + // throttle for more than longThrottleLatency will be logged. + longThrottleLatency = 50 * time.Millisecond +) + +// HTTPClient is an interface for testing a request object. +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// ResponseWrapper is an interface for getting a response. +// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream. +type ResponseWrapper interface { + DoRaw() ([]byte, error) + Stream() (io.ReadCloser, error) +} + +// RequestConstructionError is returned when there's an error assembling a request. +type RequestConstructionError struct { + Err error +} + +// Error returns a textual description of 'r'. +func (r *RequestConstructionError) Error() string { + return fmt.Sprintf("request construction error: '%v'", r.Err) +} + +// Request allows for building up a request to a server in a chained fashion. +// Any errors are stored until the end of your call, so you only have to +// check once. +type Request struct { + // required + client HTTPClient + verb string + + baseURL *url.URL + content ContentConfig + serializers Serializers + + // generic components accessible via method setters + pathPrefix string + subpath string + params url.Values + headers http.Header + + // structural elements of the request that are part of the Kubernetes API conventions + namespace string + namespaceSet bool + resource string + resourceName string + subresource string + timeout time.Duration + + // output + err error + body io.Reader + + // This is only used for per-request timeouts, deadlines, and cancellations. + ctx context.Context + + backoffMgr BackoffManager + throttle flowcontrol.RateLimiter +} + +// NewRequest creates a new request helper object for accessing runtime.Objects on a server. +func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request { + if backoff == nil { + glog.V(2).Infof("Not implementing request backoff strategy.") + backoff = &NoBackoff{} + } + + pathPrefix := "/" + if baseURL != nil { + pathPrefix = path.Join(pathPrefix, baseURL.Path) + } + r := &Request{ + client: client, + verb: verb, + baseURL: baseURL, + pathPrefix: path.Join(pathPrefix, versionedAPIPath), + content: content, + serializers: serializers, + backoffMgr: backoff, + throttle: throttle, + } + switch { + case len(content.AcceptContentTypes) > 0: + r.SetHeader("Accept", content.AcceptContentTypes) + case len(content.ContentType) > 0: + r.SetHeader("Accept", content.ContentType+", */*") + } + return r +} + +// Prefix adds segments to the relative beginning to the request path. These +// items will be placed before the optional Namespace, Resource, or Name sections. +// Setting AbsPath will clear any previously set Prefix segments +func (r *Request) Prefix(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...)) + return r +} + +// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional +// Namespace, Resource, or Name sections. +func (r *Request) Suffix(segments ...string) *Request { + if r.err != nil { + return r + } + r.subpath = path.Join(r.subpath, path.Join(segments...)) + return r +} + +// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>) +func (r *Request) Resource(resource string) *Request { + if r.err != nil { + return r + } + if len(r.resource) != 0 { + r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) + return r + } + if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) + return r + } + r.resource = resource + return r +} + +// SubResource sets a sub-resource path which can be multiple segments segment after the resource +// name but before the suffix. +func (r *Request) SubResource(subresources ...string) *Request { + if r.err != nil { + return r + } + subresource := path.Join(subresources...) + if len(r.subresource) != 0 { + r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource) + return r + } + for _, s := range subresources { + if msgs := IsValidPathSegmentName(s); len(msgs) != 0 { + r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) + return r + } + } + r.subresource = subresource + return r +} + +// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>) +func (r *Request) Name(resourceName string) *Request { + if r.err != nil { + return r + } + if len(resourceName) == 0 { + r.err = fmt.Errorf("resource name may not be empty") + return r + } + if len(r.resourceName) != 0 { + r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) + return r + } + if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) + return r + } + r.resourceName = resourceName + return r +} + +// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>) +func (r *Request) Namespace(namespace string) *Request { + if r.err != nil { + return r + } + if r.namespaceSet { + r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) + return r + } + if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 { + r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) + return r + } + r.namespaceSet = true + r.namespace = namespace + return r +} + +// NamespaceIfScoped is a convenience function to set a namespace if scoped is true +func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request { + if scoped { + return r.Namespace(namespace) + } + return r +} + +// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved +// when a single segment is passed. +func (r *Request) AbsPath(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) + if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { + // preserve any trailing slashes for legacy behavior + r.pathPrefix += "/" + } + return r +} + +// RequestURI overwrites existing path and parameters with the value of the provided server relative +// URI. Some parameters (those in specialParameters) cannot be overwritten. +func (r *Request) RequestURI(uri string) *Request { + if r.err != nil { + return r + } + locator, err := url.Parse(uri) + if err != nil { + r.err = err + return r + } + r.pathPrefix = locator.Path + if len(locator.Query()) > 0 { + if r.params == nil { + r.params = make(url.Values) + } + for k, v := range locator.Query() { + r.params[k] = v + } + } + return r +} + +const ( + // A constant that clients can use to refer in a field selector to the object name field. + // Will be automatically emitted as the correct name for the API version. + nodeUnschedulable = "spec.unschedulable" + objectNameField = "metadata.name" + podHost = "spec.nodeName" + podStatus = "status.phase" + secretType = "type" + + eventReason = "reason" + eventSource = "source" + eventType = "type" + eventInvolvedKind = "involvedObject.kind" + eventInvolvedNamespace = "involvedObject.namespace" + eventInvolvedName = "involvedObject.name" + eventInvolvedUID = "involvedObject.uid" + eventInvolvedAPIVersion = "involvedObject.apiVersion" + eventInvolvedResourceVersion = "involvedObject.resourceVersion" + eventInvolvedFieldPath = "involvedObject.fieldPath" +) + +type clientFieldNameToAPIVersionFieldName map[string]string + +func (c clientFieldNameToAPIVersionFieldName) filterField(field, value string) (newField, newValue string, err error) { + newFieldName, ok := c[field] + if !ok { + return "", "", fmt.Errorf("%v - %v - no field mapping defined", field, value) + } + return newFieldName, value, nil +} + +type resourceTypeToFieldMapping map[string]clientFieldNameToAPIVersionFieldName + +func (r resourceTypeToFieldMapping) filterField(resourceType, field, value string) (newField, newValue string, err error) { + fMapping, ok := r[resourceType] + if !ok { + return "", "", fmt.Errorf("%v - %v - %v - no field mapping defined", resourceType, field, value) + } + return fMapping.filterField(field, value) +} + +type versionToResourceToFieldMapping map[schema.GroupVersion]resourceTypeToFieldMapping + +// filterField transforms the given field/value selector for the given groupVersion and resource +func (v versionToResourceToFieldMapping) filterField(groupVersion *schema.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { + rMapping, ok := v[*groupVersion] + if !ok { + // no groupVersion overrides registered, default to identity mapping + return field, value, nil + } + newField, newValue, err = rMapping.filterField(resourceType, field, value) + if err != nil { + // no groupVersionResource overrides registered, default to identity mapping + return field, value, nil + } + return newField, newValue, nil +} + +var fieldMappings = versionToResourceToFieldMapping{ + v1.SchemeGroupVersion: resourceTypeToFieldMapping{ + "nodes": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + nodeUnschedulable: nodeUnschedulable, + }, + "pods": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + podHost: podHost, + podStatus: podStatus, + }, + "secrets": clientFieldNameToAPIVersionFieldName{ + secretType: secretType, + }, + "serviceAccounts": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + }, + "endpoints": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + }, + "events": clientFieldNameToAPIVersionFieldName{ + objectNameField: objectNameField, + eventReason: eventReason, + eventSource: eventSource, + eventType: eventType, + eventInvolvedKind: eventInvolvedKind, + eventInvolvedNamespace: eventInvolvedNamespace, + eventInvolvedName: eventInvolvedName, + eventInvolvedUID: eventInvolvedUID, + eventInvolvedAPIVersion: eventInvolvedAPIVersion, + eventInvolvedResourceVersion: eventInvolvedResourceVersion, + eventInvolvedFieldPath: eventInvolvedFieldPath, + }, + }, +} + +// FieldsSelectorParam adds the given selector as a query parameter with the name paramName. +func (r *Request) FieldsSelectorParam(s fields.Selector) *Request { + if r.err != nil { + return r + } + if s == nil { + return r + } + if s.Empty() { + return r + } + s2, err := s.Transform(func(field, value string) (newField, newValue string, err error) { + return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) + }) + if err != nil { + r.err = err + return r + } + return r.setParam(metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) +} + +// LabelsSelectorParam adds the given selector as a query parameter +func (r *Request) LabelsSelectorParam(s labels.Selector) *Request { + if r.err != nil { + return r + } + if s == nil { + return r + } + if s.Empty() { + return r + } + return r.setParam(metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) +} + +// UintParam creates a query parameter with the given value. +func (r *Request) UintParam(paramName string, u uint64) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, strconv.FormatUint(u, 10)) +} + +// Param creates a query parameter with the given string value. +func (r *Request) Param(paramName, s string) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, s) +} + +// VersionedParams will take the provided object, serialize it to a map[string][]string using the +// implicit RESTClient API version and the default parameter codec, and then add those as parameters +// to the request. Use this to provide versioned query parameters from client libraries. +func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { + if r.err != nil { + return r + } + params, err := codec.EncodeParameters(obj, *r.content.GroupVersion) + if err != nil { + r.err = err + return r + } + for k, v := range params { + for _, value := range v { + // TODO: Move it to setParam method, once we get rid of + // FieldSelectorParam & LabelSelectorParam methods. + if k == metav1.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { + // Don't set an empty selector for backward compatibility. + // Since there is no way to get the difference between empty + // and unspecified string, we don't set it to avoid having + // labelSelector= param in every request. + continue + } + if k == metav1.FieldSelectorQueryParam(r.content.GroupVersion.String()) { + if len(value) == 0 { + // Don't set an empty selector for backward compatibility. + // Since there is no way to get the difference between empty + // and unspecified string, we don't set it to avoid having + // fieldSelector= param in every request. + continue + } + // TODO: Filtering should be handled somewhere else. + selector, err := fields.ParseSelector(value) + if err != nil { + r.err = fmt.Errorf("unparsable field selector: %v", err) + return r + } + filteredSelector, err := selector.Transform( + func(field, value string) (newField, newValue string, err error) { + return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) + }) + if err != nil { + r.err = fmt.Errorf("untransformable field selector: %v", err) + return r + } + value = filteredSelector.String() + } + + r.setParam(k, value) + } + } + return r +} + +func (r *Request) setParam(paramName, value string) *Request { + if specialParams.Has(paramName) { + r.err = fmt.Errorf("must set %v through the corresponding function, not directly.", paramName) + return r + } + if r.params == nil { + r.params = make(url.Values) + } + r.params[paramName] = append(r.params[paramName], value) + return r +} + +func (r *Request) SetHeader(key, value string) *Request { + if r.headers == nil { + r.headers = http.Header{} + } + r.headers.Set(key, value) + return r +} + +// Timeout makes the request use the given duration as a timeout. Sets the "timeout" +// parameter. +func (r *Request) Timeout(d time.Duration) *Request { + if r.err != nil { + return r + } + r.timeout = d + return r +} + +// Body makes the request use obj as the body. Optional. +// If obj is a string, try to read a file of that name. +// If obj is a []byte, send it directly. +// If obj is an io.Reader, use it directly. +// If obj is a runtime.Object, marshal it correctly, and set Content-Type header. +// If obj is a runtime.Object and nil, do nothing. +// Otherwise, set an error. +func (r *Request) Body(obj interface{}) *Request { + if r.err != nil { + return r + } + switch t := obj.(type) { + case string: + data, err := ioutil.ReadFile(t) + if err != nil { + r.err = err + return r + } + glogBody("Request Body", data) + r.body = bytes.NewReader(data) + case []byte: + glogBody("Request Body", t) + r.body = bytes.NewReader(t) + case io.Reader: + r.body = t + case runtime.Object: + // callers may pass typed interface pointers, therefore we must check nil with reflection + if reflect.ValueOf(t).IsNil() { + return r + } + data, err := runtime.Encode(r.serializers.Encoder, t) + if err != nil { + r.err = err + return r + } + glogBody("Request Body", data) + r.body = bytes.NewReader(data) + r.SetHeader("Content-Type", r.content.ContentType) + default: + r.err = fmt.Errorf("unknown type used for body: %+v", obj) + } + return r +} + +// Context adds a context to the request. Contexts are only used for +// timeouts, deadlines, and cancellations. +func (r *Request) Context(ctx context.Context) *Request { + r.ctx = ctx + return r +} + +// URL returns the current working URL. +func (r *Request) URL() *url.URL { + p := r.pathPrefix + if r.namespaceSet && len(r.namespace) > 0 { + p = path.Join(p, "namespaces", r.namespace) + } + if len(r.resource) != 0 { + p = path.Join(p, strings.ToLower(r.resource)) + } + // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed + if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 { + p = path.Join(p, r.resourceName, r.subresource, r.subpath) + } + + finalURL := &url.URL{} + if r.baseURL != nil { + *finalURL = *r.baseURL + } + finalURL.Path = p + + query := url.Values{} + for key, values := range r.params { + for _, value := range values { + query.Add(key, value) + } + } + + // timeout is handled specially here. + if r.timeout != 0 { + query.Set("timeout", r.timeout.String()) + } + finalURL.RawQuery = query.Encode() + return finalURL +} + +// finalURLTemplate is similar to URL(), but will make all specific parameter values equal +// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query +// parameters will be reset. This creates a copy of the request so as not to change the +// underyling object. This means some useful request info (like the types of field +// selectors in use) will be lost. +// TODO: preserve field selector keys +func (r Request) finalURLTemplate() url.URL { + if len(r.resourceName) != 0 { + r.resourceName = "{name}" + } + if r.namespaceSet && len(r.namespace) != 0 { + r.namespace = "{namespace}" + } + newParams := url.Values{} + v := []string{"{value}"} + for k := range r.params { + newParams[k] = v + } + r.params = newParams + url := r.URL() + return *url +} + +func (r *Request) tryThrottle() { + now := time.Now() + if r.throttle != nil { + r.throttle.Accept() + } + if latency := time.Since(now); latency > longThrottleLatency { + glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + } +} + +// Watch attempts to begin watching the requested location. +// Returns a watch.Interface, or an error. +func (r *Request) Watch() (watch.Interface, error) { + // We specifically don't want to rate limit watches, so we + // don't use r.throttle here. + if r.err != nil { + return nil, r.err + } + if r.serializers.Framer == nil { + return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) + } + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return nil, err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) + } + } + if err != nil { + // The watch stream mechanism handles many common partial data errors, so closed + // connections can be retried in many cases. + if net.IsProbableEOF(err) { + return watch.NewEmptyWatch(), nil + } + return nil, err + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + if result := r.transformResponse(resp, req); result.err != nil { + return nil, result.err + } + return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + } + framer := r.serializers.Framer.NewFrameReader(resp.Body) + decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) + return watch.NewStreamWatcher(restclientwatch.NewDecoder(decoder, r.serializers.Decoder)), nil +} + +// updateURLMetrics is a convenience function for pushing metrics. +// It also handles corner cases for incomplete/invalid request data. +func updateURLMetrics(req *Request, resp *http.Response, err error) { + url := "none" + if req.baseURL != nil { + url = req.baseURL.Host + } + + // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric + // system so we just report them as `<error>`. + if err != nil { + metrics.RequestResult.Increment("<error>", req.verb, url) + } else { + //Metrics for failure codes + metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url) + } +} + +// Stream formats and executes the request, and offers streaming of the response. +// Returns io.ReadCloser which could be used for streaming of the response, or an error +// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. +// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. +func (r *Request) Stream() (io.ReadCloser, error) { + if r.err != nil { + return nil, r.err + } + + r.tryThrottle() + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, nil) + if err != nil { + return nil, err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + } + if err != nil { + return nil, err + } + + switch { + case (resp.StatusCode >= 200) && (resp.StatusCode < 300): + return resp.Body, nil + + default: + // ensure we close the body before returning the error + defer resp.Body.Close() + + result := r.transformResponse(resp, req) + err := result.Error() + if err == nil { + err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) + } + return nil, err + } +} + +// request connects to the server and invokes the provided function when a server response is +// received. It handles retry behavior and up front validation of requests. It will invoke +// fn at most once. It will return an error if a problem occurred prior to connecting to the +// server - the provided function is responsible for handling server errors. +func (r *Request) request(fn func(*http.Request, *http.Response)) error { + //Metrics for total request latency + start := time.Now() + defer func() { + metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start)) + }() + + if r.err != nil { + glog.V(4).Infof("Error in request: %v", r.err) + return r.err + } + + // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) + if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set when a resource name is provided") + } + if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set during creation") + } + + client := r.client + if client == nil { + client = http.DefaultClient + } + + // Right now we make about ten retry attempts if we get a Retry-After response. + // TODO: Change to a timeout based approach. + maxRetries := 10 + retries := 0 + for { + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + if retries > 0 { + // We are retrying the request that we already send to apiserver + // at least once before. + // This request should also be throttled with the client-internal throttler. + r.tryThrottle() + } + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + if err != nil { + // "Connection reset by peer" is usually a transient error. + // Thus in case of "GET" operations, we simply retry it. + // We are not automatically retrying "write" operations, as + // they are not idempotent. + if !net.IsConnectionReset(err) || r.verb != "GET" { + return err + } + // For the purpose of retry, we set the artificial "retry-after" response. + // TODO: Should we clean the original response if it exists? + resp = &http.Response{ + StatusCode: http.StatusInternalServerError, + Header: http.Header{"Retry-After": []string{"1"}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + + done := func() bool { + // Ensure the response body is fully read and closed + // before we reconnect, so that we reuse the same TCP + // connection. + defer func() { + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength <= maxBodySlurpSize { + io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) + } + resp.Body.Close() + }() + + retries++ + if seconds, wait := checkWait(resp); wait && retries < maxRetries { + if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { + _, err := seeker.Seek(0, 0) + if err != nil { + glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + fn(req, resp) + return true + } + } + + glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url) + r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) + return false + } + fn(req, resp) + return true + }() + if done { + return nil + } + } +} + +// Do formats and executes the request. Returns a Result object for easy response +// processing. +// +// Error type: +// * If the request can't be constructed, or an error happened earlier while building its +// arguments: *RequestConstructionError +// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError +// * http.Client.Do errors are returned directly. +func (r *Request) Do() Result { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result = r.transformResponse(resp, req) + }) + if err != nil { + return Result{err: err} + } + return result +} + +// DoRaw executes the request but does not process the response body. +func (r *Request) DoRaw() ([]byte, error) { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result.body, result.err = ioutil.ReadAll(resp.Body) + glogBody("Response Body", result.body) + if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { + result.err = r.transformUnstructuredResponseError(resp, req, result.body) + } + }) + if err != nil { + return nil, err + } + return result.body, result.err +} + +// transformResponse converts an API response into a structured API object +func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { + var body []byte + if resp.Body != nil { + if data, err := ioutil.ReadAll(resp.Body); err == nil { + body = data + } + } + + glogBody("Response Body", body) + + // verify the content type is accurate + contentType := resp.Header.Get("Content-Type") + decoder := r.serializers.Decoder + if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return Result{err: errors.NewInternalError(err)} + } + decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + if err != nil { + // if we fail to negotiate a decoder, treat this as an unstructured error + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + return Result{err: r.transformUnstructuredResponseError(resp, req, body)} + } + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + } + } + } + + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + // calculate an unstructured error from the response which the Result object may use if the caller + // did not return a structured error. + retryAfter, _ := retryAfterSeconds(resp) + err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter) + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + err: err, + } + } + + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + } +} + +// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against +// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine +// whether the body is printable. +func glogBody(prefix string, body []byte) { + if glog.V(8) { + if bytes.IndexFunc(body, func(r rune) bool { + return r < 0x0a + }) != -1 { + glog.Infof("%s:\n%s", prefix, hex.Dump(body)) + } else { + glog.Infof("%s: %s", prefix, string(body)) + } + } +} + +// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error. +const maxUnstructuredResponseTextBytes = 2048 + +// transformUnstructuredResponseError handles an error from the server that is not in a structured form. +// It is expected to transform any response that is not recognizable as a clear server sent error from the +// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries +// introduce a level of uncertainty to the responses returned by servers that in common use result in +// unexpected responses. The rough structure is: +// +// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes +// - this is the happy path +// - when you get this output, trust what the server sends +// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to +// generate a reasonable facsimile of the original failure. +// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above +// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error +// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected +// initial contact, the presence of mismatched body contents from posted content types +// - Give these a separate distinct error type and capture as much as possible of the original message +// +// TODO: introduce transformation of generic http.Client.Do() errors that separates 4. +func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { + if body == nil && resp.Body != nil { + if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { + body = data + } + } + retryAfter, _ := retryAfterSeconds(resp) + return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter) +} + +// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body. +func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error { + // cap the amount of output we create + if len(body) > maxUnstructuredResponseTextBytes { + body = body[:maxUnstructuredResponseTextBytes] + } + + message := "unknown" + if isTextResponse { + message = strings.TrimSpace(string(body)) + } + var groupResource schema.GroupResource + if len(r.resource) > 0 { + groupResource.Group = r.content.GroupVersion.Group + groupResource.Resource = r.resource + } + return errors.NewGenericServerResponse( + statusCode, + method, + groupResource, + r.resourceName, + message, + retryAfter, + true, + ) +} + +// isTextResponse returns true if the response appears to be a textual media type. +func isTextResponse(resp *http.Response) bool { + contentType := resp.Header.Get("Content-Type") + if len(contentType) == 0 { + return true + } + media, _, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return strings.HasPrefix(media, "text/") +} + +// checkWait returns true along with a number of seconds if the server instructed us to wait +// before retrying. +func checkWait(resp *http.Response) (int, bool) { + switch r := resp.StatusCode; { + // any 500 error code and 429 can trigger a wait + case r == errors.StatusTooManyRequests, r >= 500: + default: + return 0, false + } + i, ok := retryAfterSeconds(resp) + return i, ok +} + +// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if +// the header was missing or not a valid number. +func retryAfterSeconds(resp *http.Response) (int, bool) { + if h := resp.Header.Get("Retry-After"); len(h) > 0 { + if i, err := strconv.Atoi(h); err == nil { + return i, true + } + } + return 0, false +} + +// Result contains the result of calling Request.Do(). +type Result struct { + body []byte + contentType string + err error + statusCode int + + decoder runtime.Decoder +} + +// Raw returns the raw result. +func (r Result) Raw() ([]byte, error) { + return r.body, r.err +} + +// Get returns the result as an object, which means it passes through the decoder. +// If the returned object is of type Status and has .Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +func (r Result) Get() (runtime.Object, error) { + if r.err != nil { + // Check whether the result has a Status object in the body and prefer that. + return nil, r.Error() + } + if r.decoder == nil { + return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + + // decode, but if the result is Status return that as an error instead. + out, _, err := r.decoder.Decode(r.body, nil, nil) + if err != nil { + return nil, err + } + switch t := out.(type) { + case *metav1.Status: + // any status besides StatusSuccess is considered an error. + if t.Status != metav1.StatusSuccess { + return nil, errors.FromObject(t) + } + } + return out, nil +} + +// StatusCode returns the HTTP status code of the request. (Only valid if no +// error was returned.) +func (r Result) StatusCode(statusCode *int) Result { + *statusCode = r.statusCode + return r +} + +// Into stores the result into obj, if possible. If obj is nil it is ignored. +// If the returned object is of type Status and has .Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +func (r Result) Into(obj runtime.Object) error { + if r.err != nil { + // Check whether the result has a Status object in the body and prefer that. + return r.Error() + } + if r.decoder == nil { + return fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + + out, _, err := r.decoder.Decode(r.body, nil, obj) + if err != nil || out == obj { + return err + } + // if a different object is returned, see if it is Status and avoid double decoding + // the object. + switch t := out.(type) { + case *metav1.Status: + // any status besides StatusSuccess is considered an error. + if t.Status != metav1.StatusSuccess { + return errors.FromObject(t) + } + } + return nil +} + +// WasCreated updates the provided bool pointer to whether the server returned +// 201 created or a different response. +func (r Result) WasCreated(wasCreated *bool) Result { + *wasCreated = r.statusCode == http.StatusCreated + return r +} + +// Error returns the error executing the request, nil if no error occurred. +// If the returned object is of type Status and has Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +// See the Request.Do() comment for what errors you might get. +func (r Result) Error() error { + // if we have received an unexpected server error, and we have a body and decoder, we can try to extract + // a Status object. + if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil { + return r.err + } + + // attempt to convert the body into a Status object + // to be backwards compatible with old servers that do not return a version, default to "v1" + out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) + if err != nil { + glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + return r.err + } + switch t := out.(type) { + case *metav1.Status: + // because we default the kind, we *must* check for StatusFailure + if t.Status == metav1.StatusFailure { + return errors.FromObject(t) + } + } + return r.err +} + +// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) +var NameMayNotBe = []string{".", ".."} + +// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) +var NameMayNotContain = []string{"/", "%"} + +// IsValidPathSegmentName validates the name can be safely encoded as a path segment +func IsValidPathSegmentName(name string) []string { + for _, illegalName := range NameMayNotBe { + if name == illegalName { + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} + } + } + + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment +// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid +func IsValidPathSegmentPrefix(name string) []string { + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// ValidatePathSegmentName validates the name can be safely encoded as a path segment +func ValidatePathSegmentName(name string, prefix bool) []string { + if prefix { + return IsValidPathSegmentPrefix(name) + } else { + return IsValidPathSegmentName(name) + } +} diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go new file mode 100644 index 000000000..ba43752bc --- /dev/null +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "crypto/tls" + "net/http" + + "k8s.io/client-go/transport" +) + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(config *Config) (*tls.Config, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.TLSConfigFor(cfg) +} + +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func TransportFor(config *Config) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.New(cfg) +} + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the +// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack +// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use +// the higher level TransportFor or RESTClientFor methods. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.HTTPWrappersForConfig(cfg, rt) +} + +// TransportConfig converts a client config to an appropriate transport config. +func (c *Config) TransportConfig() (*transport.Config, error) { + wt := c.WrapTransport + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + if wt != nil { + previousWT := wt + wt = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(previousWT(rt)) + } + } else { + wt = provider.WrapTransport + } + } + return &transport.Config{ + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: wt, + TLS: transport.TLSConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CAFile: c.CAFile, + CAData: c.CAData, + CertFile: c.CertFile, + CertData: c.CertData, + KeyFile: c.KeyFile, + KeyData: c.KeyData, + }, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + Impersonate: transport.ImpersonationConfig{ + UserName: c.Impersonate.UserName, + Groups: c.Impersonate.Groups, + Extra: c.Impersonate.Extra, + }, + }, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go rename to vendor/k8s.io/client-go/rest/url_utils.go index 9a83d7874..14f94650a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/url_utils.go +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,29 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "fmt" "net/url" "path" - "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/apimachinery/pkg/runtime/schema" ) // DefaultServerURL converts a host, host:port, or URL string to the default base server API path // to use with a Client at a given API version following the standard conventions for a // Kubernetes API. -func DefaultServerURL(host, apiPath string, groupVersion unversioned.GroupVersion, defaultTLS bool) (*url.URL, string, error) { +func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) { if host == "" { return nil, "", fmt.Errorf("host must be a URL or a host:port pair") } base := host hostURL, err := url.Parse(base) - if err != nil { - return nil, "", err - } - if hostURL.Scheme == "" { + if err != nil || hostURL.Scheme == "" || hostURL.Host == "" { scheme := "http://" if defaultTLS { scheme = "https://" @@ -89,5 +86,5 @@ func defaultServerUrlFor(config *Config) (*url.URL, string, error) { if config.GroupVersion != nil { return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS) } - return DefaultServerURL(host, config.APIPath, unversioned.GroupVersion{}, defaultTLS) + return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go rename to vendor/k8s.io/client-go/rest/urlbackoff.go index 6c672f08a..eff848abc 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "net/url" "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/flowcontrol" ) // Set of resp. Codes that we backoff for. diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go b/vendor/k8s.io/client-go/rest/versions.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go rename to vendor/k8s.io/client-go/rest/versions.go index e12c05c10..9d41812f2 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/versions.go +++ b/vendor/k8s.io/client-go/rest/versions.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restclient +package rest import ( "encoding/json" @@ -22,7 +22,7 @@ import ( "net/http" "path" - "k8s.io/kubernetes/pkg/api/unversioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -57,7 +57,7 @@ func ServerAPIVersions(c *Config) (groupVersions []string, err error) { if err != nil { return nil, err } - var v unversioned.APIVersions + var v metav1.APIVersions defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&v) if err != nil { @@ -71,7 +71,7 @@ func ServerAPIVersions(c *Config) (groupVersions []string, err error) { if err != nil { return nil, err } - var apiGroupList unversioned.APIGroupList + var apiGroupList metav1.APIGroupList defer resp2.Body.Close() err = json.NewDecoder(resp2.Body).Decode(&apiGroupList) if err != nil { diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go new file mode 100644 index 000000000..73bb63add --- /dev/null +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" +) + +// Decoder implements the watch.Decoder interface for io.ReadClosers that +// have contents which consist of a series of watchEvent objects encoded +// with the given streaming decoder. The internal objects will be then +// decoded by the embedded decoder. +type Decoder struct { + decoder streaming.Decoder + embeddedDecoder runtime.Decoder +} + +// NewDecoder creates an Decoder for the given writer and codec. +func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder { + return &Decoder{ + decoder: decoder, + embeddedDecoder: embeddedDecoder, + } +} + +// Decode blocks until it can return the next object in the reader. Returns an error +// if the reader is closed or an object can't be decoded. +func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { + var got metav1.WatchEvent + res, _, err := d.decoder.Decode(nil, &got) + if err != nil { + return "", nil, err + } + if res != &got { + return "", nil, fmt.Errorf("unable to decode to metav1.Event") + } + switch got.Type { + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + default: + return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) + } + + obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw) + if err != nil { + return "", nil, fmt.Errorf("unable to decode watch event: %v", err) + } + return watch.EventType(got.Type), obj, nil +} + +// Close closes the underlying r. +func (d *Decoder) Close() { + d.decoder.Close() +} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go rename to vendor/k8s.io/client-go/rest/watch/encoder.go index 8438ee984..e55aa12d9 100644 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/encoder.go +++ b/vendor/k8s.io/client-go/rest/watch/encoder.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,16 @@ package versioned import ( "encoding/json" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/watch" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" ) // Encoder serializes watch.Events into io.Writer. The internal objects // are encoded using embedded encoder, and the outer Event is serialized // using encoder. +// TODO: this type is only used by tests type Encoder struct { encoder streaming.Encoder embeddedEncoder runtime.Encoder @@ -47,5 +49,8 @@ func (e *Encoder) Encode(event *watch.Event) error { return err } // FIXME: get rid of json.RawMessage. - return e.encoder.Encode(&Event{string(event.Type), runtime.RawExtension{Raw: json.RawMessage(data)}}) + return e.encoder.Encode(&metav1.WatchEvent{ + Type: string(event.Type), + Object: runtime.RawExtension{Raw: json.RawMessage(data)}, + }) } diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go rename to vendor/k8s.io/client-go/tools/auth/clientauth.go index 64b3ef6be..2213b9878 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/auth/clientauth.go +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -68,7 +68,7 @@ import ( "io/ioutil" "os" - "k8s.io/kubernetes/pkg/client/restclient" + restclient "k8s.io/client-go/rest" ) // Info holds Kubernetes API authorization config. It is intended diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100755 index 000000000..e923c7709 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,41 @@ +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- bprashanth +- erictune +- davidopp +- pmorie +- kargakis +- janetkuo +- justinsb +- eparis +- soltysh +- jsafrane +- dims +- madhusudancs +- hongchaodeng +- krousey +- markturansky +- fgrzadkowski +- xiang90 +- mml +- ingvagabund +- resouer +- jessfraz +- david-mcmahon +- mfojtik +- '249043822' +- lixiaobing10051267 +- ddysher +- mqliang +- feihujiang +- sdminonne diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go new file mode 100644 index 000000000..06bc04f85 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -0,0 +1,349 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/clock" +) + +// Config contains all the settings for a Controller. +type Config struct { + // The queue for your objects; either a FIFO or + // a DeltaFIFO. Your Process() function should accept + // the output of this Queue's Pop() method. + Queue + + // Something that can list and watch your objects. + ListerWatcher + + // Something that can process your objects. + Process ProcessFunc + + // The type of your objects. + ObjectType runtime.Object + + // Reprocess everything at least this often. + // Note that if it takes longer for you to clear the queue than this + // period, you will end up processing items in the order determined + // by FIFO.Replace(). Currently, this is random. If this is a + // problem, we can change that replacement policy to append new + // things to the end of the queue instead of replacing the entire + // queue. + FullResyncPeriod time.Duration + + // ShouldResync, if specified, is invoked when the controller's reflector determines the next + // periodic sync should occur. If this returns true, it means the reflector should proceed with + // the resync. + ShouldResync ShouldResyncFunc + + // If true, when Process() returns an error, re-enqueue the object. + // TODO: add interface to let you inject a delay/backoff or drop + // the object completely if desired. Pass the object in + // question to this interface as a parameter. + RetryOnError bool +} + +// ShouldResyncFunc is a type of function that indicates if a reflector should perform a +// resync or not. It can be used by a shared informer to support multiple event handlers with custom +// resync periods. +type ShouldResyncFunc func() bool + +// ProcessFunc processes a single object. +type ProcessFunc func(obj interface{}) error + +// Controller is a generic controller framework. +type controller struct { + config Config + reflector *Reflector + reflectorMutex sync.RWMutex + clock clock.Clock +} + +type Controller interface { + Run(stopCh <-chan struct{}) + HasSynced() bool + LastSyncResourceVersion() string +} + +// New makes a new Controller from the given Config. +func New(c *Config) Controller { + ctlr := &controller{ + config: *c, + clock: &clock.RealClock{}, + } + return ctlr +} + +// Run begins processing items, and will continue until a value is sent down stopCh. +// It's an error to call Run more than once. +// Run blocks; call via go. +func (c *controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + go func() { + <-stopCh + c.config.Queue.Close() + }() + r := NewReflector( + c.config.ListerWatcher, + c.config.ObjectType, + c.config.Queue, + c.config.FullResyncPeriod, + ) + r.ShouldResync = c.config.ShouldResync + r.clock = c.clock + + c.reflectorMutex.Lock() + c.reflector = r + c.reflectorMutex.Unlock() + + r.RunUntil(stopCh) + + wait.Until(c.processLoop, time.Second, stopCh) +} + +// Returns true once this controller has completed an initial resource listing +func (c *controller) HasSynced() bool { + return c.config.Queue.HasSynced() +} + +func (c *controller) LastSyncResourceVersion() string { + if c.reflector == nil { + return "" + } + return c.reflector.LastSyncResourceVersion() +} + +// processLoop drains the work queue. +// TODO: Consider doing the processing in parallel. This will require a little thought +// to make sure that we don't end up processing the same object multiple times +// concurrently. +// +// TODO: Plumb through the stopCh here (and down to the queue) so that this can +// actually exit when the controller is stopped. Or just give up on this stuff +// ever being stoppable. Converting this whole package to use Context would +// also be helpful. +func (c *controller) processLoop() { + for { + obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) + if err != nil { + if err == FIFOClosedError { + return + } + if c.config.RetryOnError { + // This is the safe way to re-enqueue. + c.config.Queue.AddIfNotPresent(obj) + } + } + } +} + +// ResourceEventHandler can handle notifications for events that happen to a +// resource. The events are informational only, so you can't return an +// error. +// * OnAdd is called when an object is added. +// * OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// * OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj interface{}) + OnUpdate(oldObj, newObj interface{}) + OnDelete(obj interface{}) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj interface{}) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// DeletionHandlingMetaNamespaceKeyFunc checks for +// DeletedFinalStateUnknown objects before calling +// MetaNamespaceKeyFunc. +func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return MetaNamespaceKeyFunc(obj) +} + +// NewInformer returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// +func NewInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) +} + +// NewIndexerInformer returns a Indexer and a controller for populating the index +// while also providing event notifications. You should only used the returned +// Index for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// * lw is list and watch functions for the source of the resource you want to +// be informed of. +// * objType is an object of the type that you expect to receive. +// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// * h is the object you want notifications sent to. +// +func NewIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, clientState) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + if old, exists, err := clientState.Get(d.Object); err == nil && exists { + if err := clientState.Update(d.Object); err != nil { + return err + } + h.OnUpdate(old, d.Object) + } else { + if err := clientState.Add(d.Object); err != nil { + return err + } + h.OnAdd(d.Object) + } + case Deleted: + if err := clientState.Delete(d.Object); err != nil { + return err + } + h.OnDelete(d.Object) + } + } + return nil + }, + } + return clientState, New(cfg) +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go similarity index 86% rename from vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go rename to vendor/k8s.io/client-go/tools/cache/delta_fifo.go index 3cb077faf..a71db6048 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" "github.com/golang/glog" ) @@ -118,6 +118,12 @@ type DeltaFIFO struct { // purpose of figuring out which items have been deleted // when Replace() or Delete() is called. knownObjects KeyListerGetter + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRED operations. + closed bool + closedLock sync.Mutex } var ( @@ -132,6 +138,14 @@ var ( ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") ) +// Close the queue. +func (f *DeltaFIFO) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + f.closed = true + f.cond.Broadcast() +} + // KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or // DeletedFinalStateUnknown objects. func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { @@ -228,15 +242,21 @@ func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { } f.lock.Lock() defer f.lock.Unlock() + f.addIfNotPresent(id, deltas) + return nil +} + +// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller +// already holds the fifo lock. +func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) { f.populated = true if _, exists := f.items[id]; exists { - return nil + return } f.queue = append(f.queue, id) f.items[id] = deltas f.cond.Broadcast() - return nil } // re-listing and watching can deliver the same update multiple times in any @@ -381,13 +401,25 @@ func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err err return d, exists, nil } +// Checks if the queue is closed +func (f *DeltaFIFO) IsClosed() bool { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return true + } + return false +} + // Pop blocks until an item is added to the queue, and then returns it. If // multiple items are ready, they are returned in the order in which they were // added/updated. The item is removed from the queue (and the store) before it // is returned, so if you don't successfully process it, you need to add it back // with AddIfNotPresent(). // process function is called under lock, so it is safe update data structures -// in it that need to be in sync with the queue (e.g. knownKeys). +// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc +// may return an instance of ErrRequeue with a nested error to indicate the current +// item should be requeued (equivalent to calling AddIfNotPresent under the lock). // // Pop returns a 'Deltas', which has a complete list of all the things // that happened to the object (deltas) while it was sitting in the queue. @@ -396,6 +428,13 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { defer f.lock.Unlock() for { for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.IsClosed() { + return nil, FIFOClosedError + } + f.cond.Wait() } id := f.queue[0] @@ -409,9 +448,14 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } // Don't need to copyDeltas here, because we're transferring // ownership to the caller. - return item, process(item) + return item, err } } @@ -424,11 +468,6 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { defer f.lock.Unlock() keys := make(sets.String, len(list)) - if !f.populated { - f.populated = true - f.initialPopulationCount = len(list) - } - for _, item := range list { key, err := f.KeyOf(item) if err != nil { @@ -454,6 +493,12 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { return err } } + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(list) + } + return nil } @@ -461,6 +506,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { // TODO(lavalamp): This may be racy-- we aren't properly locked // with knownObjects. Unproven. knownKeys := f.knownObjects.ListKeys() + queuedDeletions := 0 for _, k := range knownKeys { if keys.Has(k) { continue @@ -474,34 +520,69 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { deletedObj = nil glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) } + queuedDeletions++ if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { return err } } + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(list) + queuedDeletions + } + return nil } // Resync will send a sync event for each item func (f *DeltaFIFO) Resync() error { - f.lock.RLock() - defer f.lock.RUnlock() - for _, k := range f.knownObjects.ListKeys() { - obj, exists, err := f.knownObjects.GetByKey(k) - if err != nil { - glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, k) - continue - } else if !exists { - glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", k) - continue - } + f.lock.Lock() + defer f.lock.Unlock() - if err := f.queueActionLocked(Sync, obj); err != nil { - return fmt.Errorf("couldn't queue object: %v", err) + keys := f.knownObjects.ListKeys() + for _, k := range keys { + if err := f.syncKeyLocked(k); err != nil { + return err } } return nil } +func (f *DeltaFIFO) syncKey(key string) error { + f.lock.Lock() + defer f.lock.Unlock() + + return f.syncKeyLocked(key) +} + +func (f *DeltaFIFO) syncKeyLocked(key string) error { + obj, exists, err := f.knownObjects.GetByKey(key) + if err != nil { + glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + return nil + } else if !exists { + glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + return nil + } + + // If we are doing Resync() and there is already an event queued for that object, + // we ignore the Resync for it. This is to avoid the race, in which the resync + // comes with the previous value of object (since queueing an event for the object + // doesn't trigger changing the underlying store <knownObjects>. + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + if len(f.items[id]) > 0 { + return nil + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + return nil +} + // A KeyListerGetter is anything that knows how to list its keys and look up by key. type KeyListerGetter interface { KeyLister diff --git a/vendor/k8s.io/client-go/tools/cache/doc.go b/vendor/k8s.io/client-go/tools/cache/doc.go new file mode 100644 index 000000000..4f593f0d3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache is a client-side caching mechanism. It is useful for +// reducing the number of server calls you'd otherwise need to make. +// Reflector watches a server and updates a Store. Two stores are provided; +// one that simply caches objects (for example, to allow a scheduler to +// list currently available nodes), and one that additionally acts as +// a FIFO queue (for example, to allow a scheduler to process incoming +// pods). +package cache diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go rename to vendor/k8s.io/client-go/tools/cache/expiration_cache.go index ad8684e8c..befac1c75 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" + "k8s.io/client-go/util/clock" ) // ExpirationCache implements the store interface @@ -38,7 +38,7 @@ import ( type ExpirationCache struct { cacheStorage ThreadSafeStore keyFunc KeyFunc - clock util.Clock + clock clock.Clock expirationPolicy ExpirationPolicy // expirationLock is a write lock used to guarantee that we don't clobber // newly inserted objects because of a stale expiration timestamp comparison @@ -58,7 +58,7 @@ type TTLPolicy struct { Ttl time.Duration // Clock used to calculate ttl expiration - Clock util.Clock + Clock clock.Clock } // IsExpired returns true if the given object is older than the ttl, or it can't @@ -73,7 +73,7 @@ type timestampedEntry struct { timestamp time.Time } -// getTimestampedEntry returnes the timestampedEntry stored under the given key. +// getTimestampedEntry returns the timestampedEntry stored under the given key. func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) { item, _ := c.cacheStorage.Get(key) if tsEntry, ok := item.(*timestampedEntry); ok { @@ -202,7 +202,7 @@ func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { return &ExpirationCache{ cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), keyFunc: keyFunc, - clock: util.RealClock{}, - expirationPolicy: &TTLPolicy{ttl, util.RealClock{}}, + clock: clock.RealClock{}, + expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}}, } } diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go rename to vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go index 3b9597705..ab2f57687 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/expiration_cache_fakes.go +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,8 +17,8 @@ limitations under the License. package cache import ( - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/clock" ) type fakeThreadSafeMap struct { @@ -43,7 +43,7 @@ func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool { return !p.NeverExpire.Has(key) } -func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock util.Clock) Store { +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) return &ExpirationCache{ cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go rename to vendor/k8s.io/client-go/tools/cache/fake_custom_store.go index ccd69ef7b..8d71c2474 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/fake_custom_store.go +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go similarity index 81% rename from vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go rename to vendor/k8s.io/client-go/tools/cache/fifo.go index eaa35e62c..3f6e2a948 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/fifo.go +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,21 +17,40 @@ limitations under the License. package cache import ( + "errors" "sync" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) // PopProcessFunc is passed to Pop() method of Queue interface. // It is supposed to process the element popped from the queue. type PopProcessFunc func(interface{}) error +// ErrRequeue may be returned by a PopProcessFunc to safely requeue +// the current item. The value of Err will be returned from Pop. +type ErrRequeue struct { + // Err is returned by the Pop function + Err error +} + +var FIFOClosedError error = errors.New("DeltaFIFO: manipulating with closed queue") + +func (e ErrRequeue) Error() string { + if e.Err == nil { + return "the popped item should be requeued without returning an error" + } + return e.Err.Error() +} + // Queue is exactly like a Store, but has a Pop() method too. type Queue interface { Store // Pop blocks until it has something to process. // It returns the object that was process and the result of processing. + // The PopProcessFunc may return an ErrRequeue{...} to indicate the item + // should be requeued before releasing the lock on the queue. Pop(PopProcessFunc) (interface{}, error) // AddIfNotPresent adds a value previously @@ -42,6 +61,9 @@ type Queue interface { // Return true if the first batch of items has been popped HasSynced() bool + + // Close queue + Close() } // Helper function for popping from Queue. @@ -84,12 +106,26 @@ type FIFO struct { // keyFunc is used to make the key used for queued item insertion and retrieval, and // should be deterministic. keyFunc KeyFunc + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRED operations. + closed bool + closedLock sync.Mutex } var ( _ = Queue(&FIFO{}) // FIFO is a Queue ) +// Close the queue. +func (f *FIFO) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + f.closed = true + f.cond.Broadcast() +} + // Return true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { @@ -129,15 +165,21 @@ func (f *FIFO) AddIfNotPresent(obj interface{}) error { } f.lock.Lock() defer f.lock.Unlock() + f.addIfNotPresent(id, obj) + return nil +} + +// addIfNotPresent assumes the fifo lock is already held and adds the the provided +// item to the queue under id if it does not already exist. +func (f *FIFO) addIfNotPresent(id string, obj interface{}) { f.populated = true if _, exists := f.items[id]; exists { - return nil + return } f.queue = append(f.queue, id) f.items[id] = obj f.cond.Broadcast() - return nil } // Update is the same as Add in this implementation. @@ -200,6 +242,16 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { return item, exists, nil } +// Checks if the queue is closed +func (f *FIFO) IsClosed() bool { + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.closed { + return true + } + return false +} + // Pop waits until an item is ready and processes it. If multiple items are // ready, they are returned in the order in which they were added/updated. // The item is removed from the queue (and the store) before it is processed, @@ -211,6 +263,13 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { defer f.lock.Unlock() for { for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.IsClosed() { + return nil, FIFOClosedError + } + f.cond.Wait() } id := f.queue[0] @@ -224,7 +283,12 @@ func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { continue } delete(f.items, id) - return item, process(item) + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + return item, err } } diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go new file mode 100644 index 000000000..42fc6c7a2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -0,0 +1,85 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Indexer is a storage interface that lets you list objects using multiple indexing functions +type Indexer interface { + Store + // Retrieve list of objects that match on the named indexing function + Index(indexName string, obj interface{}) ([]interface{}, error) + // ListIndexFuncValues returns the list of generated values of an Index func + ListIndexFuncValues(indexName string) []string + // ByIndex lists object that match on the named indexing function with the exact key + ByIndex(indexName, indexKey string) ([]interface{}, error) + // GetIndexer return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error +} + +// IndexFunc knows how to provide an indexed value for an object. +type IndexFunc func(obj interface{}) ([]string, error) + +// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns +// unique values for every object. This is conversion can create errors when more than one key is found. You +// should prefer to make proper key and index functions. +func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { + return func(obj interface{}) (string, error) { + indexKeys, err := indexFunc(obj) + if err != nil { + return "", err + } + if len(indexKeys) > 1 { + return "", fmt.Errorf("too many keys: %v", indexKeys) + } + if len(indexKeys) == 0 { + return "", fmt.Errorf("unexpected empty indexKeys") + } + return indexKeys[0], nil + } +} + +const ( + NamespaceIndex string = "namespace" +) + +// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} + +// Index maps the indexed value to a set of keys in the store that match on that value +type Index map[string]sets.String + +// Indexers maps a name to a IndexFunc +type Indexers map[string]IndexFunc + +// Indices maps a name to an Index +type Indices map[string]Index diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go new file mode 100644 index 000000000..27d51a6b3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -0,0 +1,160 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AppendFunc is used to add a matching item to whatever list the caller is using +type AppendFunc func(interface{}) + +func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + for _, m := range store.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + +func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + if namespace == metav1.NamespaceAll { + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil + } + + items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) + if err != nil { + // Ignore error; do slow search without index. + glog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + + } + return nil + } + for _, m := range items { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + + return nil +} + +// GenericLister is a lister skin on a generic Indexer +type GenericLister interface { + // List will return all objects across namespaces + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve assuming that name==key + Get(name string) (runtime.Object, error) + // ByNamespace will give you a GenericNamespaceLister for one namespace + ByNamespace(namespace string) GenericNamespaceLister +} + +// GenericNamespaceLister is a lister skin on a generic Indexer +type GenericNamespaceLister interface { + // List will return all objects in this namespace + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve by namespace and name + Get(name string) (runtime.Object, error) +} + +func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { + return &genericLister{indexer: indexer, resource: resource} +} + +type genericLister struct { + indexer Indexer + resource schema.GroupResource +} + +func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { + return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} +} + +func (s *genericLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +type genericNamespaceLister struct { + indexer Indexer + namespace string + resource schema.GroupResource +} + +func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go new file mode 100644 index 000000000..af01d4745 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -0,0 +1,162 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + // List should return a list type object; the Items field will be extracted, and the + // ResourceVersion field will be used to start the watch in the right place. + List(options metav1.ListOptions) (runtime.Object, error) + // Watch should begin a watch at the specified version. + Watch(options metav1.ListOptions) (watch.Interface, error) +} + +// ListFunc knows how to list resources +type ListFunc func(options metav1.ListOptions) (runtime.Object, error) + +// WatchFunc knows how to watch resources +type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) + +// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. +// It is a convenience function for users of NewReflector, etc. +// ListFunc and WatchFunc must not be nil +type ListWatch struct { + ListFunc ListFunc + WatchFunc WatchFunc +} + +// Getter interface knows how to access Get method from RESTClient. +type Getter interface { + Get() *restclient.Request +} + +// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. +func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + FieldsSelectorParam(fieldSelector). + Do(). + Get() + } + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + FieldsSelectorParam(fieldSelector). + Watch() + } + return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} +} + +func timeoutFromListOptions(options metav1.ListOptions) time.Duration { + if options.TimeoutSeconds != nil { + return time.Duration(*options.TimeoutSeconds) * time.Second + } + return 0 +} + +// List a set of apiserver resources +func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { + return lw.ListFunc(options) +} + +// Watch a set of apiserver resources +func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { + return lw.WatchFunc(options) +} + +// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. +func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) { + if len(conditions) == 0 { + return nil, nil + } + + list, err := lw.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + initialItems, err := meta.ExtractList(list) + if err != nil { + return nil, err + } + + // use the initial items as simulated "adds" + var lastEvent *watch.Event + currIndex := 0 + passedConditions := 0 + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + continue + } + } + + ConditionSucceeded: + for currIndex < len(initialItems) { + lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]} + currIndex++ + + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + break ConditionSucceeded + } + } + } + if passedConditions == len(conditions) { + return lastEvent, nil + } + remainingConditions := conditions[passedConditions:] + + metaObj, err := meta.ListAccessor(list) + if err != nil { + return nil, err + } + currResourceVersion := metaObj.GetResourceVersion() + + watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion}) + if err != nil { + return nil, err + } + + return watch.Until(timeout, watchInterface, remainingConditions...) +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go new file mode 100644 index 000000000..cc7399114 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/pkg/api" +) + +var mutationDetectionEnabled = false + +func init() { + mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) +} + +type CacheMutationDetector interface { + AddObject(obj interface{}) + Run(stopCh <-chan struct{}) +} + +func NewCacheMutationDetector(name string) CacheMutationDetector { + if !mutationDetectionEnabled { + return dummyMutationDetector{} + } + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second} +} + +type dummyMutationDetector struct{} + +func (dummyMutationDetector) Run(stopCh <-chan struct{}) { +} +func (dummyMutationDetector) AddObject(obj interface{}) { +} + +// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated +// It has a list of cached objects and their copies. I haven't thought of a way +// to see WHO is mutating it, just that it's getting mutated. +type defaultCacheMutationDetector struct { + name string + period time.Duration + + lock sync.Mutex + cachedObjs []cacheObj + + // failureFunc is injectable for unit testing. If you don't have it, the process will panic. + // This panic is intentional, since turning on this detection indicates you want a strong + // failure signal. This failure is effectively a p0 bug and you can't trust process results + // after a mutation anyway. + failureFunc func(message string) +} + +// cacheObj holds the actual object and a copy +type cacheObj struct { + cached interface{} + copied interface{} +} + +func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { + // we DON'T want protection from panics. If we're running this code, we want to die + go func() { + for { + d.CompareObjects() + + select { + case <-stopCh: + return + case <-time.After(d.period): + } + } + }() +} + +// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object +// but that covers the vast majority of our cached objects +func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { + if _, ok := obj.(DeletedFinalStateUnknown); ok { + return + } + if _, ok := obj.(runtime.Object); !ok { + return + } + + copiedObj, err := api.Scheme.Copy(obj.(runtime.Object)) + if err != nil { + return + } + + d.lock.Lock() + defer d.lock.Unlock() + d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj}) +} + +func (d *defaultCacheMutationDetector) CompareObjects() { + d.lock.Lock() + defer d.lock.Unlock() + + altered := false + for i, obj := range d.cachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectDiff(obj.cached, obj.copied)) + altered = true + } + } + + if altered { + msg := fmt.Sprintf("cache %s modified", d.name) + if d.failureFunc != nil { + d.failureFunc(msg) + return + } + panic(msg) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go rename to vendor/k8s.io/client-go/tools/cache/reflector.go index 3a5025a28..b43f43a5e 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,27 +34,19 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - apierrs "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/util/clock" ) -// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. -type ListerWatcher interface { - // List should return a list type object; the Items field will be extracted, and the - // ResourceVersion field will be used to start the watch in the right place. - List(options api.ListOptions) (runtime.Object, error) - // Watch should begin a watch at the specified version. - Watch(options api.ListOptions) (watch.Interface, error) -} - // Reflector watches a specified resource and causes all changes to be reflected in the given store. type Reflector struct { - // name identifies this reflector. By default it will be a file:line if possible. + // name identifies this reflector. By default it will be a file:line if possible. name string // The type of object we expect to place in the store. @@ -67,10 +59,9 @@ type Reflector struct { // the beginning of the next one. period time.Duration resyncPeriod time.Duration - // now() returns current time - exposed for testing purposes - now func() time.Time - // nextResync is approximate time of next resync (0 if not scheduled) - nextResync time.Time + ShouldResync func() bool + // clock allows tests to manipulate time + clock clock.Clock // lastSyncResourceVersion is the resource version token last // observed when doing a sync with the underlying store // it is thread safe, but not synchronized with the underlying store @@ -85,12 +76,6 @@ var ( // However, it can be modified to avoid periodic resync to break the // TCP connection. minWatchTimeout = 5 * time.Minute - // If we are within 'forceResyncThreshold' from the next planned resync - // and are just before issuing Watch(), resync will be forced now. - forceResyncThreshold = 3 * time.Second - // We try to set timeouts for Watch() so that we will finish about - // than 'timeoutThreshold' from next planned periodic resync. - timeoutThreshold = 1 * time.Second ) // NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector @@ -120,14 +105,14 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, expectedType: reflect.TypeOf(expectedType), period: time.Second, resyncPeriod: resyncPeriod, - now: time.Now, + clock: &clock.RealClock{}, } return r } -// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common // call chains to NewReflector, so they'd be low entropy names for reflectors -var internalPackages = []string{"kubernetes/pkg/client/cache/", "kubernetes/pkg/controller/framework/", "/runtime/asm_"} +var internalPackages = []string{"client-go/tools/cache/", "/runtime/asm_"} // getDefaultReflectorName walks back through the call stack until we find a caller from outside of the ignoredPackages // it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging @@ -164,10 +149,10 @@ func hasPackage(file string, ignoredPackages []string) bool { return false } -// trimPackagePrefix reduces dulpicate values off the front of a package name. +// trimPackagePrefix reduces duplicate values off the front of a package name. func trimPackagePrefix(file string) string { - if l := strings.LastIndex(file, "k8s.io/kubernetes/pkg/"); l >= 0 { - return file[l+len("k8s.io/kubernetes/"):] + if l := strings.LastIndex(file, "k8s.io/client-go/pkg/"); l >= 0 { + return file[l+len("k8s.io/client-go/"):] } if l := strings.LastIndex(file, "/src/"); l >= 0 { return file[l+5:] @@ -234,16 +219,14 @@ var ( // required, and a cleanup function. func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { if r.resyncPeriod == 0 { - r.nextResync = time.Time{} return neverExitWatch, func() bool { return false } } // The cleanup function is required: imagine the scenario where watches // always fail so we end up listing frequently. Then, if we don't // manually stop the timer, we could end up with many timers active // concurrently. - r.nextResync = r.now().Add(r.resyncPeriod) - t := time.NewTimer(r.resyncPeriod) - return t.C, t.Stop + t := r.clock.NewTimer(r.resyncPeriod) + return t.C(), t.Stop } // ListAndWatch first lists all items and get the resource version at the moment of call, @@ -258,7 +241,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { // Explicitly set "0" as resource version - it's fine for the List() // to be served from cache and potentially be delayed relative to // etcd contents. Reflector framework will catch up via Watch() eventually. - options := api.ListOptions{ResourceVersion: "0"} + options := metav1.ListOptions{ResourceVersion: "0"} list, err := r.listerWatcher.List(options) if err != nil { return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err) @@ -278,18 +261,24 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { r.setLastSyncResourceVersion(resourceVersion) resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) go func() { for { select { case <-resyncCh: case <-stopCh: return - } - glog.V(4).Infof("%s: next resync planned for %#v, forcing now", r.name, r.nextResync) - if err := r.store.Resync(); err != nil { - resyncerrc <- err + case <-cancelCh: return } + if r.ShouldResync == nil || r.ShouldResync() { + glog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + } cleanup() resyncCh, cleanup = r.resyncChan() } @@ -297,7 +286,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { for { timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) - options = api.ListOptions{ + options = metav1.ListOptions{ ResourceVersion: resourceVersion, // We want to avoid situations of hanging watchers. Stop any wachers that do not // receive any events within the timeout window. @@ -349,7 +338,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err // watchHandler watches w and keeps *resourceVersion up to date. func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error { - start := time.Now() + start := r.clock.Now() eventCount := 0 // Stopping the watcher should be idempotent and if we return from this function there's no way @@ -382,14 +371,23 @@ loop: newResourceVersion := meta.GetResourceVersion() switch event.Type { case watch.Added: - r.store.Add(event.Object) + err := r.store.Add(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", r.name, event.Object, err)) + } case watch.Modified: - r.store.Update(event.Object) + err := r.store.Update(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", r.name, event.Object, err)) + } case watch.Deleted: // TODO: Will any consumers need access to the "last known // state", which is passed in event.Object? If so, may need // to change this. - r.store.Delete(event.Object) + err := r.store.Delete(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", r.name, event.Object, err)) + } default: utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", r.name, event)) } @@ -399,7 +397,7 @@ loop: } } - watchDuration := time.Now().Sub(start) + watchDuration := r.clock.Now().Sub(start) if watchDuration < 1*time.Second && eventCount == 0 { glog.V(4).Infof("%s: Unexpected watch close - watch lasted less than a second and no items received", r.name) return errors.New("very short watch") diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go new file mode 100644 index 000000000..1349f335d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -0,0 +1,581 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/clock" + + "github.com/golang/glog" +) + +// SharedInformer has a shared data cache and is capable of distributing notifications for changes +// to the cache to multiple listeners who registered via AddEventHandler. If you use this, there is +// one behavior change compared to a standard Informer. When you receive a notification, the cache +// will be AT LEAST as fresh as the notification, but it MAY be more fresh. You should NOT depend +// on the contents of the cache exactly matching the notification you've received in handler +// functions. If there was a create, followed by a delete, the cache may NOT have your item. This +// has advantages over the broadcaster since it allows us to share a common cache across many +// controllers. Extending the broadcaster would have required us keep duplicate caches for each +// watch. +type SharedInformer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + AddEventHandler(handler ResourceEventHandler) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) + // GetStore returns the Store. + GetStore() Store + // GetController gives back a synthetic interface that "votes" to start the informer + GetController() Controller + // Run starts the shared informer, which will be stopped when stopCh is closed. + Run(stopCh <-chan struct{}) + // HasSynced returns true if the shared informer's store has synced. + HasSynced() bool + // LastSyncResourceVersion is the resource version observed when last synced with the underlying + // store. The value returned is not synchronized with access to the underlying store and is not + // thread-safe. + LastSyncResourceVersion() string +} + +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers Indexers) error + GetIndexer() Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + realClock := &clock.RealClock{} + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{clock: realClock}, + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: objType, + resyncCheckPeriod: defaultEventHandlerResyncPeriod, + defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)), + clock: realClock, + } + return sharedIndexInformer +} + +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +// syncedPollPeriod controls how often you look at the status of your sync funcs +const syncedPollPeriod = 100 * time.Millisecond + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the contoller should shutdown +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + glog.V(2).Infof("stop requested") + return false + } + + glog.V(4).Infof("caches populated") + return true +} + +type sharedIndexInformer struct { + indexer Indexer + controller Controller + + processor *sharedProcessor + cacheMutationDetector CacheMutationDetector + + // This block is tracked to handle late initialization of the controller + listerWatcher ListerWatcher + objectType runtime.Object + + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call + // shouldResync to check if any of our listeners need a resync. + resyncCheckPeriod time.Duration + // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via + // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default + // value). + defaultEventHandlerResyncPeriod time.Duration + // clock allows for testability + clock clock.Clock + + started bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex + // stopCh is the channel used to stop the main Run process. We have to track it so that + // late joiners can have a proper stop + stopCh <-chan struct{} +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disonnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +func (c *dummyController) LastSyncResourceVersion() string { + return "" +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, nil, s.indexer) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.controller.(*controller).clock = s.clock + s.started = true + }() + + s.stopCh = stopCh + s.cacheMutationDetector.Run(stopCh) + s.processor.run(stopCh) + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) isStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() Controller { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) { + s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +} + +func determineResyncPeriod(desired, check time.Duration) time.Duration { + if desired == 0 { + return desired + } + if check == 0 { + glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + return 0 + } + if desired < check { + glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + return check + } + return desired +} + +const minimumResyncPeriod = 1 * time.Second + +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if resyncPeriod > 0 { + if resyncPeriod < minimumResyncPeriod { + glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod) + resyncPeriod = minimumResyncPeriod + } + + if resyncPeriod < s.resyncCheckPeriod { + if s.started { + glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + resyncPeriod = s.resyncCheckPeriod + } else { + // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update + // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners + // accordingly + s.resyncCheckPeriod = resyncPeriod + s.processor.resyncCheckPeriodChanged(resyncPeriod) + } + } + } + + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now()) + + if !s.started { + s.processor.addListener(listener) + return + } + + // in order to safely join, we have to + // 1. stop sending add/update/delete notifications + // 2. do a list against the store + // 3. send synthetic "Add" events to the new handler + // 4. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + s.processor.addListener(listener) + + go listener.run(s.stopCh) + go listener.pop(s.stopCh) + + items := s.indexer.List() + for i := range items { + listener.add(addNotification{newObj: items[i]}) + } +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + // from oldest to newest + for _, d := range obj.(Deltas) { + switch d.Type { + case Sync, Added, Updated: + isSync := d.Type == Sync + s.cacheMutationDetector.AddObject(d.Object) + if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { + if err := s.indexer.Update(d.Object); err != nil { + return err + } + s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync) + } else { + if err := s.indexer.Add(d.Object); err != nil { + return err + } + s.processor.distribute(addNotification{newObj: d.Object}, isSync) + } + case Deleted: + if err := s.indexer.Delete(d.Object); err != nil { + return err + } + s.processor.distribute(deleteNotification{oldObj: d.Object}, false) + } + } + return nil +} + +type sharedProcessor struct { + listenersLock sync.RWMutex + listeners []*processorListener + syncingListeners []*processorListener + clock clock.Clock +} + +func (p *sharedProcessor) addListener(listener *processorListener) { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.listeners = append(p.listeners, listener) + p.syncingListeners = append(p.syncingListeners, listener) +} + +func (p *sharedProcessor) distribute(obj interface{}, sync bool) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if sync { + for _, listener := range p.syncingListeners { + listener.add(obj) + } + } else { + for _, listener := range p.listeners { + listener.add(obj) + } + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for _, listener := range p.listeners { + go listener.run(stopCh) + go listener.pop(stopCh) + } +} + +// shouldResync queries every listener to determine if any of them need a resync, based on each +// listener's resyncPeriod. +func (p *sharedProcessor) shouldResync() bool { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + p.syncingListeners = []*processorListener{} + + resyncNeeded := false + now := p.clock.Now() + for _, listener := range p.listeners { + // need to loop through all the listeners to see if they need to resync so we can prepare any + // listeners that are going to be resyncing. + if listener.shouldResync(now) { + resyncNeeded = true + p.syncingListeners = append(p.syncingListeners, listener) + listener.determineNextResync(now) + } + } + return resyncNeeded +} + +func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for _, listener := range p.listeners { + resyncPeriod := determineResyncPeriod(listener.requestedResyncPeriod, resyncCheckPeriod) + listener.setResyncPeriod(resyncPeriod) + } +} + +type processorListener struct { + // lock/cond protects access to 'pendingNotifications'. + lock sync.RWMutex + cond sync.Cond + + // pendingNotifications is an unbounded slice that holds all notifications not yet distributed + // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better + pendingNotifications []interface{} + + nextCh chan interface{} + + handler ResourceEventHandler + + // requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer + requestedResyncPeriod time.Duration + // resyncPeriod is how frequently the listener wants a full resync from the shared informer. This + // value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the + // informer's overall resync check period. + resyncPeriod time.Duration + // nextResync is the earliest time the listener should get a full resync + nextResync time.Time + // resyncLock guards access to resyncPeriod and nextResync + resyncLock sync.Mutex +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener { + ret := &processorListener{ + pendingNotifications: []interface{}{}, + nextCh: make(chan interface{}), + handler: handler, + requestedResyncPeriod: requestedResyncPeriod, + resyncPeriod: resyncPeriod, + } + + ret.cond.L = &ret.lock + + ret.determineNextResync(now) + + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.lock.Lock() + defer p.lock.Unlock() + + p.pendingNotifications = append(p.pendingNotifications, notification) + p.cond.Broadcast() +} + +func (p *processorListener) pop(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + blockingGet := func() (interface{}, bool) { + p.lock.Lock() + defer p.lock.Unlock() + + for len(p.pendingNotifications) == 0 { + // check if we're shutdown + select { + case <-stopCh: + return nil, true + default: + } + p.cond.Wait() + } + + nt := p.pendingNotifications[0] + p.pendingNotifications = p.pendingNotifications[1:] + return nt, false + } + + notification, stopped := blockingGet() + if stopped { + return + } + + select { + case <-stopCh: + return + case p.nextCh <- notification: + } + } +} + +func (p *processorListener) run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + for { + var next interface{} + select { + case <-stopCh: + func() { + p.lock.Lock() + defer p.lock.Unlock() + p.cond.Broadcast() + }() + return + case next = <-p.nextCh: + } + + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) + } + } +} + +// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, +// this always returns false. +func (p *processorListener) shouldResync(now time.Time) bool { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + if p.resyncPeriod == 0 { + return false + } + + return now.After(p.nextResync) || now.Equal(p.nextResync) +} + +func (p *processorListener) determineNextResync(now time.Time) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.nextResync = now.Add(p.resyncPeriod) +} + +func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.resyncPeriod = resyncPeriod +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go old mode 100644 new mode 100755 similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/client/cache/store.go rename to vendor/k8s.io/client-go/tools/cache/store.go index 71115f2ce..ee6b3bd46 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/store.go +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/kubernetes/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/meta" ) // Store is a generic object storage interface. Reflector knows how to watch a server @@ -99,7 +99,7 @@ func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { // name only, no namespace return "", parts[0], nil case 2: - // name and namespace + // namespace and name return parts[0], parts[1], nil } diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go rename to vendor/k8s.io/client-go/tools/cache/thread_safe_store.go index 11077e25b..4d6a01297 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/thread_safe_store.go +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/sets" ) // ThreadSafeStore is an interface that allows concurrent access to a storage backend. @@ -151,7 +151,7 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, returnKeySet := sets.String{} for _, indexKey := range indexKeys { set := index[indexKey] - for _, key := range set.List() { + for _, key := range set.UnsortedList() { returnKeySet.Insert(key) } } @@ -261,12 +261,13 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error { } index := c.indices[name] + if index == nil { + continue + } for _, indexValue := range indexValues { - if index != nil { - set := index[indexValue] - if set != nil { - set.Delete(key) - } + set := index[indexValue] + if set != nil { + set.Delete(key) } } } diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go rename to vendor/k8s.io/client-go/tools/cache/undelta_store.go index 4a8a4500e..117df46c4 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/undelta_store.go +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go new file mode 100644 index 000000000..43e26487c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -0,0 +1,183 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +func init() { + sDec, _ := base64.StdEncoding.DecodeString("REDACTED+") + redactedBytes = []byte(string(sDec)) +} + +// IsConfigEmpty returns true if the config is empty. +func IsConfigEmpty(config *Config) bool { + return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 && + len(config.CurrentContext) == 0 && + len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors && + len(config.Extensions) == 0 +} + +// MinifyConfig read the current context and uses that to keep only the relevant pieces of config +// This is useful for making secrets based on kubeconfig files +func MinifyConfig(config *Config) error { + if len(config.CurrentContext) == 0 { + return errors.New("current-context must exist in order to minify") + } + + currContext, exists := config.Contexts[config.CurrentContext] + if !exists { + return fmt.Errorf("cannot locate context %v", config.CurrentContext) + } + + newContexts := map[string]*Context{} + newContexts[config.CurrentContext] = currContext + + newClusters := map[string]*Cluster{} + if len(currContext.Cluster) > 0 { + if _, exists := config.Clusters[currContext.Cluster]; !exists { + return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) + } + + newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] + } + + newAuthInfos := map[string]*AuthInfo{} + if len(currContext.AuthInfo) > 0 { + if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { + return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) + } + + newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo] + } + + config.AuthInfos = newAuthInfos + config.Clusters = newClusters + config.Contexts = newContexts + + return nil +} + +var redactedBytes []byte + +// Flatten redacts raw data entries from the config object for a human-readable view. +func ShortenConfig(config *Config) { + // trick json encoder into printing a human readable string in the raw data + // by base64 decoding what we want to print. Relies on implementation of + // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte + for key, authInfo := range config.AuthInfos { + if len(authInfo.ClientKeyData) > 0 { + authInfo.ClientKeyData = redactedBytes + } + if len(authInfo.ClientCertificateData) > 0 { + authInfo.ClientCertificateData = redactedBytes + } + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + if len(cluster.CertificateAuthorityData) > 0 { + cluster.CertificateAuthorityData = redactedBytes + } + config.Clusters[key] = cluster + } +} + +// Flatten changes the config object into a self contained config (useful for making secrets) +func FlattenConfig(config *Config) error { + for key, authInfo := range config.AuthInfos { + baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil { + return err + } + if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil { + return err + } + + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil { + return err + } + + config.Clusters[key] = cluster + } + + return nil +} + +func FlattenContent(path *string, contents *[]byte, baseDir string) error { + if len(*path) != 0 { + if len(*contents) > 0 { + return errors.New("cannot have values for both path and contents") + } + + var err error + absPath := ResolvePath(*path, baseDir) + *contents, err = ioutil.ReadFile(absPath) + if err != nil { + return err + } + + *path = "" + } + + return nil +} + +// ResolvePath returns the path as an absolute paths, relative to the given base directory +func ResolvePath(path string, base string) string { + // Don't resolve empty paths + if len(path) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(path) { + return filepath.Join(base, path) + } + } + + return path +} + +func MakeAbs(path, base string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + if len(base) == 0 { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + base = cwd + } + return filepath.Join(base, path), nil +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go new file mode 100644 index 000000000..5fbbe3f13 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go @@ -0,0 +1,66 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package latest + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/tools/clientcmd/api/v1" +) + +// Version is the string that represents the current external default version. +const Version = "v1" + +var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"} + +// OldestVersion is the string that represents the oldest server version supported, +// for client code that wants to hardcode the lowest common denominator. +const OldestVersion = "v1" + +// Versions is the list of versions that are recognized in code. The order provided +// may be assumed to be least feature rich to most feature rich, and clients may +// choose to prefer the latter items in the list over the former items when presented +// with a set of versions to choose. +var Versions = []string{"v1"} + +var ( + Codec runtime.Codec + Scheme *runtime.Scheme +) + +func init() { + Scheme = runtime.NewScheme() + if err := api.AddToScheme(Scheme); err != nil { + // Programmer error, detect immediately + panic(err) + } + if err := v1.AddToScheme(Scheme); err != nil { + // Programmer error, detect immediately + panic(err) + } + yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) + Codec = versioning.NewDefaultingCodecForScheme( + Scheme, + yamlSerializer, + yamlSerializer, + schema.GroupVersion{Version: Version}, + runtime.InternalGroupVersioner, + ) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go new file mode 100644 index 000000000..2eec3881c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go new file mode 100644 index 000000000..fa9d40a9b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -0,0 +1,178 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters map[string]*Cluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos map[string]*AuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts map[string]*Context `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // +optional + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to act-as. + // +optional + Impersonate string `json:"act-as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewConfig() *Config { + return &Config{ + Preferences: *NewPreferences(), + Clusters: make(map[string]*Cluster), + AuthInfos: make(map[string]*AuthInfo), + Contexts: make(map[string]*Context), + Extensions: make(map[string]runtime.Object), + } +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewContext() *Context { + return &Context{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewCluster() *Cluster { + return &Cluster{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewAuthInfo() *AuthInfo { + return &AuthInfo{Extensions: make(map[string]runtime.Object)} +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewPreferences() *Preferences { + return &Preferences{Extensions: make(map[string]runtime.Object)} +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go new file mode 100644 index 000000000..b47bfbca2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -0,0 +1,227 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sort" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd/api" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + func(in *Cluster, out *api.Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Cluster, out *Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Preferences, out *api.Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Preferences, out *Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Context, out *api.Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Context, out *Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + + func(in *Config, out *api.Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make(map[string]*api.Cluster) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make(map[string]*api.AuthInfo) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make(map[string]*api.Context) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make(map[string]runtime.Object) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *api.Config, out *Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make([]NamedCluster, 0, 0) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make([]NamedAuthInfo, 0, 0) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make([]NamedContext, 0, 0) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make([]NamedExtension, 0, 0) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { + for _, curr := range *in { + newCluster := api.NewCluster() + if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { + return err + } + (*out)[curr.Name] = newCluster + } + + return nil + }, + func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newCluster := (*in)[key] + oldCluster := &Cluster{} + if err := s.Convert(newCluster, oldCluster, 0); err != nil { + return err + } + + namedCluster := NamedCluster{key, *oldCluster} + *out = append(*out, namedCluster) + } + + return nil + }, + func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { + for _, curr := range *in { + newAuthInfo := api.NewAuthInfo() + if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { + return err + } + (*out)[curr.Name] = newAuthInfo + } + + return nil + }, + func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newAuthInfo := (*in)[key] + oldAuthInfo := &AuthInfo{} + if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { + return err + } + + namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} + *out = append(*out, namedAuthInfo) + } + + return nil + }, + func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { + for _, curr := range *in { + newContext := api.NewContext() + if err := s.Convert(&curr.Context, newContext, 0); err != nil { + return err + } + (*out)[curr.Name] = newContext + } + + return nil + }, + func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newContext := (*in)[key] + oldContext := &Context{} + if err := s.Convert(newContext, oldContext, 0); err != nil { + return err + } + + namedContext := NamedContext{key, *oldContext} + *out = append(*out, namedContext) + } + + return nil + }, + func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { + for _, curr := range *in { + var newExtension runtime.Object + if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { + return err + } + (*out)[curr.Name] = newExtension + } + + return nil + }, + func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newExtension := (*in)[key] + oldExtension := &runtime.RawExtension{} + if err := s.Convert(newExtension, oldExtension, 0); err != nil { + return err + } + + namedExtension := NamedExtension{key, *oldExtension} + *out = append(*out, namedExtension) + } + + return nil + }, + ) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go new file mode 100644 index 000000000..bc87706db --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addConversionFuncs) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go new file mode 100644 index 000000000..a0777ba53 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -0,0 +1,170 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify + // a single value for the cluster version. + // This field isn't really needed anyway, so we are deprecating it without replacement. + // It will be ignored if it is present. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters []NamedCluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos []NamedAuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts []NamedContext `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). + // +optional + APIVersion string `json:"api-version,omitempty"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to imperonate. The name matches the flag. + // +optional + Impersonate string `json:"as,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster Cluster `json:"cluster"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context Context `json:"context"` +} + +// NamedAuthInfo relates nicknames to auth information +type NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo AuthInfo `json:"user"` +} + +// NamedExtension relates nicknames to extension information +type NamedExtension struct { + // Name is the nickname for this Extension + Name string `json:"name"` + // Extension holds the extension information + Extension runtime.RawExtension `json:"extension"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go similarity index 75% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go rename to vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go index 8b10ce2bc..12331f6e6 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/auth_loaders.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,7 +23,8 @@ import ( "io/ioutil" "os" - clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" + "github.com/howeyc/gopass" + clientauth "k8s.io/client-go/tools/auth" ) // AuthLoaders are used to build clientauth.Info objects. @@ -46,10 +47,13 @@ type PromptingAuthLoader struct { // LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { - var auth clientauth.Info // Prompt for user/pass and write a file if none exists. if _, err := os.Stat(path); os.IsNotExist(err) { - auth = *a.Prompt() + authPtr, err := a.Prompt() + auth := *authPtr + if err != nil { + return nil, err + } data, err := json.Marshal(auth) if err != nil { return &auth, err @@ -65,19 +69,30 @@ func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { } // Prompt pulls the user and password from a reader -func (a *PromptingAuthLoader) Prompt() *clientauth.Info { +func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) { + var err error auth := &clientauth.Info{} - auth.User = promptForString("Username", a.reader) - auth.Password = promptForString("Password", a.reader) - - return auth + auth.User, err = promptForString("Username", a.reader, true) + if err != nil { + return nil, err + } + auth.Password, err = promptForString("Password", nil, false) + if err != nil { + return nil, err + } + return auth, nil } -func promptForString(field string, r io.Reader) string { +func promptForString(field string, r io.Reader, show bool) (result string, err error) { fmt.Printf("Please enter %s: ", field) - var result string - fmt.Fscan(r, &result) - return result + if show { + _, err = fmt.Fscan(r, &result) + } else { + var data []byte + data, err = gopass.GetPasswdMasked() + result = string(data) + } + return result, err } // NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go new file mode 100644 index 000000000..0411e6235 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -0,0 +1,537 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strings" + + "github.com/golang/glog" + "github.com/imdario/mergo" + + "k8s.io/client-go/pkg/api" + restclient "k8s.io/client-go/rest" + clientauth "k8s.io/client-go/tools/auth" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields + // DEPRECATED will be replaced + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} + // DefaultClientConfig represents the legacy behavior of this package for defaulting + // DEPRECATED will be replace + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ + ClusterDefaults: ClusterDefaults, + }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} +) + +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + +// ClientConfig is used to make it easy to get an api server client +type ClientConfig interface { + // RawConfig returns the merged result of all overrides + RawConfig() (clientcmdapi.Config, error) + // ClientConfig returns a complete client config + ClientConfig() (*restclient.Config, error) + // Namespace returns the namespace resulting from the merged + // result of all overrides and a boolean indicating if it was + // overridden + Namespace() (string, bool, error) + // ConfigAccess returns the rules for loading/persisting the config. + ConfigAccess() ConfigAccess +} + +type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister + +type promptedCredentials struct { + username string + password string +} + +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type DirectClientConfig struct { + config clientcmdapi.Config + contextName string + overrides *ConfigOverrides + fallbackReader io.Reader + configAccess ConfigAccess + // promptedCredentials store the credentials input by the user + promptedCredentials promptedCredentials +} + +// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name +func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { + return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} +} + +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} +} + +// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} +} + +func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { + return config.config, nil +} + +// ClientConfig implements ClientConfig +func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { + // check that getAuthInfo, getContext, and getCluster do not return an error. + // Do this before checking if the curent config is usable in the event that an + // AuthInfo, Context, or Cluster config with user-defined names are not found. + // This provides a user with the immediate cause for error if one is found + configAuthInfo, err := config.getAuthInfo() + if err != nil { + return nil, err + } + + _, err = config.getContext() + if err != nil { + return nil, err + } + + configClusterInfo, err := config.getCluster() + if err != nil { + return nil, err + } + + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + clientConfig := &restclient.Config{} + clientConfig.Host = configClusterInfo.Server + + if len(config.overrides.Timeout) > 0 { + timeout, err := ParseTimeout(config.overrides.Timeout) + if err != nil { + return nil, err + } + clientConfig.Timeout = timeout + } + + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + if len(configAuthInfo.Impersonate) > 0 { + clientConfig.Impersonate = restclient.ImpersonationConfig{UserName: configAuthInfo.Impersonate} + } + + // only try to read the auth information if we are secure + if restclient.IsConfigTransportTLS(*clientConfig) { + var err error + + // mergo is a first write wins for map value and a last writing wins for interface values + // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. + // Our mergo.Merge version is older than this change. + var persister restclient.AuthProviderConfigPersister + if config.configAccess != nil { + authInfoName, _ := config.getAuthInfoName() + persister = PersisterForUser(config.configAccess, authInfoName) + } + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + if err != nil { + return nil, err + } + mergo.Merge(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.Merge(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restclient.Config{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.Merge(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identifcation +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + } else if len(configAuthInfo.TokenFile) > 0 { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { + return nil, err + } + mergedConfig.BearerToken = string(tokenBytes) + } + if len(configAuthInfo.Impersonate) > 0 { + mergedConfig.Impersonate = restclient.ImpersonationConfig{UserName: configAuthInfo.Impersonate} + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + if configAuthInfo.AuthProvider != nil { + mergedConfig.AuthProvider = configAuthInfo.AuthProvider + mergedConfig.AuthConfigPersister = persistAuthConfig + } + + // if there still isn't enough information to authenticate the user, try prompting + if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { + if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 { + mergedConfig.Username = config.promptedCredentials.username + mergedConfig.Password = config.promptedCredentials.password + return mergedConfig, nil + } + prompter := NewPromptingAuthLoader(fallbackReader) + promptedAuthInfo, err := prompter.Prompt() + if err != nil { + return nil, err + } + promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) + previouslyMergedConfig := mergedConfig + mergedConfig = &restclient.Config{} + mergo.Merge(mergedConfig, promptedConfig) + mergo.Merge(mergedConfig, previouslyMergedConfig) + config.promptedCredentials.username = mergedConfig.Username + config.promptedCredentials.password = mergedConfig.Password + } + + return mergedConfig, nil +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information +func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { + config := &restclient.Config{} + config.Username = info.User + config.Password = info.Password + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + return config +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information +func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { + config := restclient.Config{} + config.CAFile = info.CAFile + if info.Insecure != nil { + config.Insecure = *info.Insecure + } + return config +} + +func canIdentifyUser(config restclient.Config) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 || + config.AuthProvider != nil +} + +// Namespace implements ClientConfig +func (config *DirectClientConfig) Namespace() (string, bool, error) { + if err := config.ConfirmUsable(); err != nil { + return "", false, err + } + + configContext, err := config.getContext() + if err != nil { + return "", false, err + } + + if len(configContext.Namespace) == 0 { + return api.NamespaceDefault, false, nil + } + + overridden := false + if config.overrides != nil && config.overrides.Context.Namespace != "" { + overridden = true + } + return configContext.Namespace, overridden, nil +} + +// ConfigAccess implements ClientConfig +func (config *DirectClientConfig) ConfigAccess() ConfigAccess { + return config.configAccess +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *DirectClientConfig) ConfirmUsable() error { + validationErrors := make([]error, 0) + + var contextName string + if len(config.contextName) != 0 { + contextName = config.contextName + } else { + contextName = config.config.CurrentContext + } + + if len(contextName) > 0 { + _, exists := config.config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + } + + authInfoName, _ := config.getAuthInfoName() + authInfo, _ := config.getAuthInfo() + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) + clusterName, _ := config.getClusterName() + cluster, _ := config.getCluster() + validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName returns the default, or user-set context name, and a boolean that indicates +// whether the default context name has been overwritten by a user-set flag, or left as its default value +func (config *DirectClientConfig) getContextName() (string, bool) { + if len(config.overrides.CurrentContext) != 0 { + return config.overrides.CurrentContext, true + } + if len(config.contextName) != 0 { + return config.contextName, false + } + + return config.config.CurrentContext, false +} + +// getAuthInfoName returns a string containing the current authinfo name for the current context, +// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or +// left as its default value +func (config *DirectClientConfig) getAuthInfoName() (string, bool) { + if len(config.overrides.Context.AuthInfo) != 0 { + return config.overrides.Context.AuthInfo, true + } + context, _ := config.getContext() + return context.AuthInfo, false +} + +// getClusterName returns a string containing the default, or user-set cluster name, and a boolean +// indicating whether the default clusterName has been overwritten by a user-set flag, or left as +// its default value +func (config *DirectClientConfig) getClusterName() (string, bool) { + if len(config.overrides.Context.Cluster) != 0 { + return config.overrides.Context.Cluster, true + } + context, _ := config.getContext() + return context.Cluster, false +} + +// getContext returns the clientcmdapi.Context, or an error if a required context is not found. +func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { + contexts := config.config.Contexts + contextName, required := config.getContextName() + + mergedContext := clientcmdapi.NewContext() + if configContext, exists := contexts[contextName]; exists { + mergo.Merge(mergedContext, configContext) + } else if required { + return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) + } + mergo.Merge(mergedContext, config.overrides.Context) + + return *mergedContext, nil +} + +// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. +func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { + authInfos := config.config.AuthInfos + authInfoName, required := config.getAuthInfoName() + + mergedAuthInfo := clientcmdapi.NewAuthInfo() + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.Merge(mergedAuthInfo, configAuthInfo) + } else if required { + return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) + } + mergo.Merge(mergedAuthInfo, config.overrides.AuthInfo) + + return *mergedAuthInfo, nil +} + +// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. +func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { + clusterInfos := config.config.Clusters + clusterInfoName, required := config.getClusterName() + + mergedClusterInfo := clientcmdapi.NewCluster() + mergo.Merge(mergedClusterInfo, config.overrides.ClusterDefaults) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.Merge(mergedClusterInfo, configClusterInfo) + } else if required { + return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) + } + mergo.Merge(mergedClusterInfo, config.overrides.ClusterInfo) + // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data + // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" + caLen := len(config.overrides.ClusterInfo.CertificateAuthority) + caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) + if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { + mergedClusterInfo.CertificateAuthority = "" + mergedClusterInfo.CertificateAuthorityData = nil + } + + return *mergedClusterInfo, nil +} + +// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. +// Can take options overrides for flags explicitly provided to the command inside the cluster container. +type inClusterClientConfig struct { + overrides *ConfigOverrides + inClusterConfigProvider func() (*restclient.Config, error) +} + +var _ ClientConfig = &inClusterClientConfig{} + +func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") +} + +func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) { + if config.inClusterConfigProvider == nil { + config.inClusterConfigProvider = restclient.InClusterConfig + } + + icc, err := config.inClusterConfigProvider() + if err != nil { + return nil, err + } + + // in-cluster configs only takes a host, token, or CA file + // if any of them were individually provided, ovewrite anything else + if config.overrides != nil { + if server := config.overrides.ClusterInfo.Server; len(server) > 0 { + icc.Host = server + } + if token := config.overrides.AuthInfo.Token; len(token) > 0 { + icc.BearerToken = token + } + if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { + icc.TLSClientConfig.CAFile = certificateAuthorityFile + } + } + + return icc, err +} + +func (config *inClusterClientConfig) Namespace() (string, bool, error) { + // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. + // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up + if ns := os.Getenv("POD_NAMESPACE"); ns != "" { + return ns, true, nil + } + + // Fall back to the namespace associated with the service account token, if available + if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + return ns, true, nil + } + } + + return "default", false, nil +} + +func (config *inClusterClientConfig) ConfigAccess() ConfigAccess { + return NewDefaultClientConfigLoadingRules() +} + +// Possible returns true if loading an inside-kubernetes-cluster is possible. +func (config *inClusterClientConfig) Possible() bool { + fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") + return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + os.Getenv("KUBERNETES_SERVICE_PORT") != "" && + err == nil && !fi.IsDir() +} + +// BuildConfigFromFlags is a helper function that builds configs from a master +// url or a kubeconfig filepath. These are passed in as command line flags for cluster +// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath +// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback +// to the default config. +func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { + if kubeconfigPath == "" && masterUrl == "" { + glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + kubeconfig, err := restclient.InClusterConfig() + if err == nil { + return kubeconfig, nil + } + glog.Warning("error creating inClusterConfig, falling back to default config: ", err) + } + return NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() +} + +// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master +// url and a kubeconfigGetter. +func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { + // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. + cc := NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) + return cc.ClientConfig() +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go new file mode 100644 index 000000000..16ccdaf20 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -0,0 +1,472 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "os" + "path" + "path/filepath" + "reflect" + "sort" + + "github.com/golang/glog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files +type ConfigAccess interface { + // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config + GetLoadingPrecedence() []string + // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules + GetStartingConfig() (*clientcmdapi.Config, error) + // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. + GetDefaultFilename() string + // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more + IsExplicitFile() bool + // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more + GetExplicitFile() string +} + +type PathOptions struct { + // GlobalFile is the full path to the file to load as the global (final) option + GlobalFile string + // EnvVar is the env var name that points to the list of kubeconfig files to load + EnvVar string + // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file + ExplicitFileFlag string + + // GlobalFileSubpath is an optional value used for displaying help + GlobalFileSubpath string + + LoadingRules *ClientConfigLoadingRules +} + +func (o *PathOptions) GetEnvVarFiles() []string { + if len(o.EnvVar) == 0 { + return []string{} + } + + envVarValue := os.Getenv(o.EnvVar) + if len(envVarValue) == 0 { + return []string{} + } + + return filepath.SplitList(envVarValue) +} + +func (o *PathOptions) GetLoadingPrecedence() []string { + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + return envVarFiles + } + + return []string{o.GlobalFile} +} + +func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { + // don't mutate the original + loadingRules := *o.LoadingRules + loadingRules.Precedence = o.GetLoadingPrecedence() + + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +func (o *PathOptions) GetDefaultFilename() string { + if o.IsExplicitFile() { + return o.GetExplicitFile() + } + + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + if len(envVarFiles) == 1 { + return envVarFiles[0] + } + + // if any of the envvar files already exists, return it + for _, envVarFile := range envVarFiles { + if _, err := os.Stat(envVarFile); err == nil { + return envVarFile + } + } + + // otherwise, return the last one in the list + return envVarFiles[len(envVarFiles)-1] + } + + return o.GlobalFile +} + +func (o *PathOptions) IsExplicitFile() bool { + if len(o.LoadingRules.ExplicitPath) > 0 { + return true + } + + return false +} + +func (o *PathOptions) GetExplicitFile() string { + return o.LoadingRules.ExplicitPath +} + +func NewDefaultPathOptions() *PathOptions { + ret := &PathOptions{ + GlobalFile: RecommendedHomeFile, + EnvVar: RecommendedConfigPathEnvVar, + ExplicitFileFlag: RecommendedConfigPathFlag, + + GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + + LoadingRules: NewDefaultClientConfigLoadingRules(), + } + ret.LoadingRules.DoNotResolvePaths = true + + return ret +} + +// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or +// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. +// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { + possibleSources := configAccess.GetLoadingPrecedence() + // sort the possible kubeconfig files so we always "lock" in the same order + // to avoid deadlock (note: this can fail w/ symlinks, but... come on). + sort.Strings(possibleSources) + for _, filename := range possibleSources { + if err := lockFile(filename); err != nil { + return err + } + defer unlockFile(filename) + } + + startingConfig, err := configAccess.GetStartingConfig() + if err != nil { + return err + } + + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { + // nothing to do + return nil + } + + if startingConfig.CurrentContext != newConfig.CurrentContext { + if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { + return err + } + } + + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { + if err := writePreferences(configAccess, newConfig.Preferences); err != nil { + return err + } + } + + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *cluster + + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + configToWrite.Contexts[key] = context + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Clusters, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Contexts, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.AuthInfos, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + return nil +} + +func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { + return &persister{configAccess, user} +} + +type persister struct { + configAccess ConfigAccess + user string +} + +func (p *persister) Persist(config map[string]string) error { + newConfig, err := p.configAccess.GetStartingConfig() + if err != nil { + return err + } + authInfo, ok := newConfig.AuthInfos[p.user] + if ok && authInfo.AuthProvider != nil { + authInfo.AuthProvider.Config = config + ModifyConfig(p.configAccess, *newConfig, false) + } + return nil +} + +// writeCurrentContext takes three possible paths. +// If newCurrentContext is the same as the startingConfig's current context, then we exit. +// If newCurrentContext has a value, then that value is written into the default destination file. +// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file +func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if startingConfig.CurrentContext == newCurrentContext { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + if len(newCurrentContext) > 0 { + destinationFile := configAccess.GetDefaultFilename() + config, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + config.CurrentContext = newCurrentContext + + if err := WriteToFile(*config, destinationFile); err != nil { + return err + } + + return nil + } + + // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it + for _, file := range configAccess.GetLoadingPrecedence() { + if _, err := os.Stat(file); err == nil { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if len(currConfig.CurrentContext) > 0 { + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + } + + return errors.New("no config found to write context") +} + +func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + for _, file := range configAccess.GetLoadingPrecedence() { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + + return errors.New("no config found to write preferences") +} + +// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. +func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { + config, err := LoadFromFile(filename) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if config == nil { + config = clientcmdapi.NewConfig() + } + return config, nil +} + +// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit +func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { + config, err := getConfigFromFile(filename) + if err != nil { + glog.FatalDepth(1, err) + } + + return config +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go new file mode 100644 index 000000000..c07ace6a5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/doc.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package clientcmd provides one stop shopping for building a working client from a fixed config, +from a .kubeconfig file, from command line flags, or from any merged combination. + +Sample usage from merged .kubeconfig files (local directory, home directory) + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // if you want to change the loading rules (which files in which order), you can do so here + + configOverrides := &clientcmd.ConfigOverrides{} + // if you want to change override values or bind them to flags, there are methods to help you + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + // Do something + } + client, err := metav1.New(config) + // ... +*/ +package clientcmd diff --git a/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go new file mode 100644 index 000000000..b609d1a76 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "strconv" + "time" +) + +// ParseTimeout returns a parsed duration from a string +// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h). +func ParseTimeout(duration string) (time.Duration, error) { + if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 { + return (time.Duration(i) * time.Second), nil + } + if requestTimeout, err := time.ParseDuration(duration); err == nil { + return requestTimeout, nil + } + return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go rename to vendor/k8s.io/client-go/tools/clientcmd/loader.go index 8988355de..1fcc51038 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,18 +23,20 @@ import ( "os" "path" "path/filepath" + "reflect" goruntime "runtime" "strings" "github.com/golang/glog" "github.com/imdario/mergo" - "k8s.io/kubernetes/pkg/api/unversioned" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - clientcmdlatest "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/homedir" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" + "k8s.io/client-go/util/homedir" ) const ( @@ -65,6 +67,9 @@ func currentMigrationRules() map[string]string { type ClientConfigLoader interface { ConfigAccess + // IsDefaultConfig returns true if the returned config matches the defaults. + IsDefaultConfig(*restclient.Config) bool + // Load returns the latest config Load() (*clientcmdapi.Config, error) } @@ -85,7 +90,7 @@ func (g *ClientConfigGetter) GetLoadingPrecedence() []string { return nil } func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { - return nil, nil + return g.kubeconfigGetter() } func (g *ClientConfigGetter) GetDefaultFilename() string { return "" @@ -96,6 +101,9 @@ func (g *ClientConfigGetter) IsExplicitFile() bool { func (g *ClientConfigGetter) GetExplicitFile() string { return "" } +func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool { + return false +} // ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config // Callers can put the chain together however they want, but we'd recommend: @@ -112,6 +120,10 @@ type ClientConfigLoadingRules struct { // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so // that a default object that doesn't set this will usually get the behavior it wants. DoNotResolvePaths bool + + // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. + // This should match the overrides passed in to ClientConfig loader. + DefaultClientConfig ClientConfig } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -192,6 +204,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { // first merge all of our maps mapConfig := clientcmdapi.NewConfig() + for _, kubeconfig := range kubeconfigs { mergo.Merge(mapConfig, kubeconfig) } @@ -215,7 +228,6 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { errlist = append(errlist, err) } } - return config, utilerrors.NewAggregate(errlist) } @@ -230,14 +242,17 @@ func (rules *ClientConfigLoadingRules) Migrate() error { if _, err := os.Stat(destination); err == nil { // if the destination already exists, do nothing continue + } else if os.IsPermission(err) { + // if we can't access the file, skip it + continue } else if !os.IsNotExist(err) { // if we had an error other than non-existence, fail return err } if sourceInfo, err := os.Stat(source); err != nil { - if os.IsNotExist(err) { - // if the source file doesn't exist, there's no work to do. + if os.IsNotExist(err) || os.IsPermission(err) { + // if the source file doesn't exist or we can't access it, there's no work to do. continue } @@ -314,6 +329,18 @@ func (rules *ClientConfigLoadingRules) GetExplicitFile() string { return rules.ExplicitPath } +// IsDefaultConfig returns true if the provided configuration matches the default +func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool { + if rules.DefaultClientConfig == nil { + return false + } + defaultConfig, err := rules.DefaultClientConfig.ClientConfig() + if err != nil { + return false + } + return reflect.DeepEqual(config, defaultConfig) +} + // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { kubeconfigBytes, err := ioutil.ReadFile(filename) @@ -361,7 +388,7 @@ func Load(data []byte) (*clientcmdapi.Config, error) { if len(data) == 0 { return config, nil } - decoded, _, err := clientcmdlatest.Codec.Decode(data, &unversioned.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) + decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) if err != nil { return nil, err } @@ -527,7 +554,7 @@ func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { } func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { - return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile} } // ResolvePaths updates the given refs to be absolute paths, relative to the given base directory diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go new file mode 100644 index 000000000..92c1a5a00 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -0,0 +1,154 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "io" + "sync" + + "github.com/golang/glog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type DeferredLoadingClientConfig struct { + loader ClientConfigLoader + overrides *ConfigOverrides + fallbackReader io.Reader + + clientConfig ClientConfig + loadingLock sync.Mutex + + // provided for testing + icc InClusterConfig +} + +// InClusterConfig abstracts details of whether the client is running in a cluster for testing. +type InClusterConfig interface { + ClientConfig + Possible() bool +} + +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} +} + +// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} +} + +func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { + if config.clientConfig == nil { + config.loadingLock.Lock() + defer config.loadingLock.Unlock() + + if config.clientConfig == nil { + mergedConfig, err := config.loader.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig ClientConfig + if config.fallbackReader != nil { + mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) + } else { + mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) + } + + config.clientConfig = mergedClientConfig + } + } + + return config.clientConfig, nil +} + +func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { + mergedConfig, err := config.createClientConfig() + if err != nil { + return clientcmdapi.Config{}, err + } + + return mergedConfig.RawConfig() +} + +// ClientConfig implements ClientConfig +func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + + // load the configuration and return on non-empty errors and if the + // content differs from the default config + mergedConfig, err := mergedClientConfig.ClientConfig() + switch { + case err != nil: + if !IsEmptyConfig(err) { + // return on any error except empty config + return nil, err + } + case mergedConfig != nil: + // the configuration is valid, but if this is equal to the defaults we should try + // in-cluster configuration + if !config.loader.IsDefaultConfig(mergedConfig) { + return mergedConfig, nil + } + } + + // check for in-cluster configuration and use it + if config.icc.Possible() { + glog.V(4).Infof("Using in-cluster configuration") + return config.icc.ClientConfig() + } + + // return the result of the merged client config + return mergedConfig, err +} + +// Namespace implements KubeConfig +func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { + mergedKubeConfig, err := config.createClientConfig() + if err != nil { + return "", false, err + } + + ns, ok, err := mergedKubeConfig.Namespace() + // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or + // if in-cluster config is not possible, return immediately + if (err != nil && !IsEmptyConfig(err)) || ok || !config.icc.Possible() { + // return on any error except empty config + return ns, ok, err + } + + glog.V(4).Infof("Using in-cluster namespace") + + // allow the namespace from the service account token directory to be used. + return config.icc.Namespace() +} + +// ConfigAccess implements ClientConfig +func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { + return config.loader +} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go rename to vendor/k8s.io/client-go/tools/clientcmd/overrides.go index f6dda97f1..f18aa6a4a 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,16 +21,19 @@ import ( "github.com/spf13/pflag" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) // ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't // simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" type ConfigOverrides struct { - AuthInfo clientcmdapi.AuthInfo - ClusterInfo clientcmdapi.Cluster - Context clientcmdapi.Context - CurrentContext string + AuthInfo clientcmdapi.AuthInfo + // ClusterDefaults are applied before the configured cluster info is loaded. + ClusterDefaults clientcmdapi.Cluster + ClusterInfo clientcmdapi.Cluster + Context clientcmdapi.Context + CurrentContext string + Timeout string } // ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly @@ -40,6 +43,7 @@ type ConfigOverrideFlags struct { ClusterOverrideFlags ClusterOverrideFlags ContextOverrideFlags ContextOverrideFlags CurrentContext FlagInfo + Timeout FlagInfo } // AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects @@ -81,16 +85,23 @@ type FlagInfo struct { Description string } +// AddSecretAnnotation add secret flag to Annotation. +func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo { + flags.SetAnnotation(f.LongName, "classified", []string{"true"}) + return f +} + // BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) { +func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) } + return f } // BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered -func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) { +func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { // you can't register a flag without a long name if len(f.LongName) > 0 { // try to parse Default as a bool. If it fails, assume false @@ -101,6 +112,7 @@ func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) { flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) } + return f } const ( @@ -119,17 +131,30 @@ const ( FlagImpersonate = "as" FlagUsername = "username" FlagPassword = "password" + FlagTimeout = "request-timeout" ) +// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { + return ConfigOverrideFlags{ + AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), + ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), + ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), + + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, + } +} + // RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { return AuthOverrideFlags{ - ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS."}, - ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS."}, - Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server."}, - Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation."}, - Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server."}, - Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server."}, + ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"}, + ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, + Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, + Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, + Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, + Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, } } @@ -138,18 +163,8 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { return ClusterOverrideFlags{ APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, APIVersion: FlagInfo{prefix + FlagAPIVersion, "", "", "DEPRECATED: The API version to use when talking to the server"}, - CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert. file for the certificate authority."}, - InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure."}, - } -} - -// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing -func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { - return ConfigOverrideFlags{ - AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), - ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), - ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), - CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert. file for the certificate authority"}, + InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, } } @@ -158,18 +173,27 @@ func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { return ContextOverrideFlags{ ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, - Namespace: FlagInfo{prefix + FlagNamespace, "", "", "If present, the namespace scope for this CLI request."}, + Namespace: FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"}, } } +// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables +func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { + BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) + BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) + BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) + flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) + flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) +} + // BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { - flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate) - flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey) - flagNames.Token.BindStringFlag(flags, &authInfo.Token) - flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate) - flagNames.Username.BindStringFlag(flags, &authInfo.Username) - flagNames.Password.BindStringFlag(flags, &authInfo.Password) + flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags) + flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) + flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) + flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) + flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) + flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) } // BindClusterFlags is a convenience method to bind the specified flags to their associated variables @@ -182,14 +206,6 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) } -// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables -func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { - BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) - BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) - BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) - flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) -} - // BindFlags is a convenience method to bind the specified flags to their associated variables func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go new file mode 100644 index 000000000..ceeeb042e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -0,0 +1,270 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + ErrNoContext = errors.New("no context chosen") + ErrEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + ErrEmptyCluster = errors.New("cluster has no server defined") +) + +type errContextNotFound struct { + ContextName string +} + +func (e *errContextNotFound) Error() string { + return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) +} + +// IsContextNotFound returns a boolean indicating whether the error is known to +// report that a context was not found +func IsContextNotFound(err error) bool { + if err == nil { + return false + } + if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { + return true + } + return strings.Contains(err.Error(), "context was not found for specified context") +} + +// IsEmptyConfig returns true if the provided error indicates the provided configuration +// is empty. +func IsEmptyConfig(err error) bool { + switch t := err.(type) { + case errConfigurationInvalid: + return len(t) == 1 && t[0] == ErrEmptyConfig + } + return err == ErrEmptyConfig +} + +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +// errConfigurationInvalid implements error and Aggregate +var _ error = errConfigurationInvalid{} +var _ utilerrors.Aggregate = errConfigurationInvalid{} + +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) +} + +// Errors implements the AggregateError interface +func (e errConfigurationInvalid) Errors() []error { + return e +} + +// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. +func IsConfigurationInvalid(err error) bool { + switch err.(type) { + case *errContextNotFound, errConfigurationInvalid: + return true + } + return IsContextNotFound(err) +} + +// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. +func Validate(config clientcmdapi.Config) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + if len(config.CurrentContext) != 0 { + if _, exists := config.Contexts[config.CurrentContext]; !exists { + validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) + } + } + + for contextName, context := range config.Contexts { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + } + + for authInfoName, authInfo := range config.AuthInfos { + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) + } + + for clusterName, clusterInfo := range config.Clusters { + validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + var contextName string + if len(passedContextName) != 0 { + contextName = passedContextName + } else { + contextName = config.CurrentContext + } + + if len(contextName) == 0 { + return ErrNoContext + } + + context, exists := config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + + if exists { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { + validationErrors := make([]error, 0) + + if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) { + return []error{ErrEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + defer clientCertCA.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } + } + + return validationErrors +} + +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { + validationErrors := make([]error, 0) + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + defer clientCertFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + defer clientKeyFile.Close() + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + return validationErrors +} + +// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return +func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { + validationErrors := make([]error, 0) + + if len(context.AuthInfo) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) + } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) + } + + if len(context.Cluster) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) + } else if _, exists := config.Clusters[context.Cluster]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) + } + + if len(context.Namespace) != 0 { + if len(validation.IsDNS1123Label(context.Namespace)) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) + } + } + + return validationErrors +} diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100755 index 000000000..ff5179807 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,7 @@ +reviewers: +- wojtek-t +- eparis +- krousey +- jayunit100 +- fgrzadkowski +- tmrts diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go new file mode 100644 index 000000000..a01306c65 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides abstractions for registering which metrics +// to record. +package metrics + +import ( + "net/url" + "sync" + "time" +) + +var registerMetrics sync.Once + +// LatencyMetric observes client latency partitioned by verb and url. +type LatencyMetric interface { + Observe(verb string, u url.URL, latency time.Duration) +} + +// ResultMetric counts response codes partitioned by method and host. +type ResultMetric interface { + Increment(code string, method string, host string) +} + +var ( + // RequestLatency is the latency metric that rest clients will update. + RequestLatency LatencyMetric = noopLatency{} + // RequestResult is the result metric that rest clients will update. + RequestResult ResultMetric = noopResult{} +) + +// Register registers metrics for the rest client to use. This can +// only be called once. +func Register(lm LatencyMetric, rm ResultMetric) { + registerMetrics.Do(func() { + RequestLatency = lm + RequestResult = rm + }) +} + +type noopLatency struct{} + +func (noopLatency) Observe(string, url.URL, time.Duration) {} + +type noopResult struct{} + +func (noopResult) Increment(string, string, string) {} diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS new file mode 100755 index 000000000..bf0ba5b9f --- /dev/null +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -0,0 +1,7 @@ +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- krousey +- caesarxuchao diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go similarity index 96% rename from vendor/k8s.io/kubernetes/pkg/client/transport/cache.go rename to vendor/k8s.io/client-go/transport/cache.go index 8c07f5391..8d76def34 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -1,5 +1,5 @@ /* -Copyright 2015 The Kubernetes Authors All rights reserved. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ import ( "sync" "time" - utilnet "k8s.io/kubernetes/pkg/util/net" + utilnet "k8s.io/apimachinery/pkg/util/net" ) // TlsTransportCache caches TLS http.RoundTrippers different configurations. The diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go new file mode 100644 index 000000000..820594ba3 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/config.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import "net/http" + +// Config holds various options for establishing a transport. +type Config struct { + // UserAgent is an optional field that specifies the caller of this + // request. + UserAgent string + + // The base TLS configuration for this transport. + TLS TLSConfig + + // Username and password for basic authentication + Username string + Password string + + // Bearer token for authentication + BearerToken string + + // Impersonate is the config that this Config will impersonate using + Impersonate ImpersonationConfig + + // Transport may be used for custom HTTP behavior. This attribute may + // not be specified with the TLS client certificate options. Use + // WrapTransport for most client level operations. + Transport http.RoundTripper + + // WrapTransport will be invoked for custom HTTP behavior after the + // underlying transport is initialized (either the transport created + // from TLSClientConfig, Transport, or http.DefaultTransport). The + // config may layer other RoundTrippers on top of the returned + // RoundTripper. + WrapTransport func(rt http.RoundTripper) http.RoundTripper +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName matches user.Info.GetName() + UserName string + // Groups matches user.Info.GetGroups() + Groups []string + // Extra matches user.Info.GetExtra() + Extra map[string][]string +} + +// HasCA returns whether the configuration has a certificate authority or not. +func (c *Config) HasCA() bool { + return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 +} + +// HasBasicAuth returns whether the configuration has basic authentication or not. +func (c *Config) HasBasicAuth() bool { + return len(c.Username) != 0 +} + +// HasTokenAuth returns whether the configuration has token authentication or not. +func (c *Config) HasTokenAuth() bool { + return len(c.BearerToken) != 0 +} + +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *Config) HasCertAuth() bool { + return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 +} + +// TLSConfig holds the information needed to set up a TLS transport. +type TLSConfig struct { + CAFile string // Path of the PEM-encoded server trusted root certificates. + CertFile string // Path of the PEM-encoded client certificate. + KeyFile string // Path of the PEM-encoded client key. + + Insecure bool // Server should be accessed without verifying the certificate. For testing only. + ServerName string // Override for the server name passed to the server for SNI and used to verify certificates. + + CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. + CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. + KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. +} diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go new file mode 100644 index 000000000..a6f396fbb --- /dev/null +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -0,0 +1,436 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/golang/glog" +) + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered +// behavior from the config. Exposed to allow more clients that need HTTP-like +// behavior but then must hijack the underlying connection (like WebSocket or +// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from +// New. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + if config.WrapTransport != nil { + rt = config.WrapTransport(rt) + } + + rt = DebugWrappers(rt) + + // Set authentication wrappers + switch { + case config.HasBasicAuth() && config.HasTokenAuth(): + return nil, fmt.Errorf("username/password or bearer token may be set, but not both") + case config.HasTokenAuth(): + rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + case config.HasBasicAuth(): + rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) + } + if len(config.UserAgent) > 0 { + rt = NewUserAgentRoundTripper(config.UserAgent, rt) + } + if len(config.Impersonate.UserName) > 0 || + len(config.Impersonate.Groups) > 0 || + len(config.Impersonate.Extra) > 0 { + rt = NewImpersonatingRoundTripper(config.Impersonate, rt) + } + return rt, nil +} + +// DebugWrappers wraps a round tripper and logs based on the current log level. +func DebugWrappers(rt http.RoundTripper) http.RoundTripper { + switch { + case bool(glog.V(9)): + rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) + case bool(glog.V(8)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) + case bool(glog.V(7)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) + case bool(glog.V(6)): + rt = newDebuggingRoundTripper(rt, debugURLTiming) + } + + return rt +} + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +type authProxyRoundTripper struct { + username string + groups []string + extra map[string][]string + + rt http.RoundTripper +} + +// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for +// authentication terminating proxy cases +// assuming you pull the user from the context: +// username is the user.Info.GetName() of the user +// groups is the user.Info.GetGroups() of the user +// extra is the user.Info.GetExtra() of the user +// extra can contain any additional information that the authenticator +// thought was interesting, for example authorization scopes. +// In order to faithfully round-trip through an impersonation flow, these keys +// MUST be lowercase. +func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { + return &authProxyRoundTripper{ + username: username, + groups: groups, + extra: extra, + rt: rt, + } +} + +func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) + + return rt.rt.RoundTrip(req) +} + +// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument. +func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) { + req.Header.Del("X-Remote-User") + req.Header.Del("X-Remote-Group") + for key := range req.Header { + if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) { + req.Header.Del(key) + } + } + + req.Header.Set("X-Remote-User", username) + for _, group := range groups { + req.Header.Add("X-Remote-Group", group) + } + for key, values := range extra { + for _, value := range values { + req.Header.Add("X-Remote-Extra-"+key, value) + } + } +} + +func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type userAgentRoundTripper struct { + agent string + rt http.RoundTripper +} + +func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{agent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("User-Agent")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set("User-Agent", rt.agent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type basicAuthRoundTripper struct { + username string + password string + rt http.RoundTripper +} + +// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a +// request unless it has already been set. +func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} +} + +func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + req = cloneRequest(req) + req.SetBasicAuth(rt.username, rt.password) + return rt.rt.RoundTrip(req) +} + +func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// These correspond to the headers used in pkg/apis/authentication. We don't want the package dependency, +// but you must not change the values. +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key for the `extra` map is suffix. + // The same key can be repeated multiple times to have multiple elements in the slice under a single key. + // For instance: + // Impersonate-Extra-Foo: one + // Impersonate-Extra-Foo: two + // results in extra["Foo"] = []string{"one", "two"} + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +type impersonatingRoundTripper struct { + impersonate ImpersonationConfig + delegate http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. +func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{impersonate, delegate} +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // use the user header as marker for the rest. + if len(req.Header.Get(ImpersonateUserHeader)) != 0 { + return rt.delegate.RoundTrip(req) + } + req = cloneRequest(req) + req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) + + for _, group := range rt.impersonate.Groups { + req.Header.Add(ImpersonateGroupHeader, group) + } + for k, vv := range rt.impersonate.Extra { + for _, v := range vv { + req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v) + } + } + + return rt.delegate.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegate.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } + +type bearerAuthRoundTripper struct { + bearer string + rt http.RoundTripper +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { + return &bearerAuthRoundTripper{bearer, rt} +} + +func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + req = cloneRequest(req) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + return rt.rt.RoundTrip(req) +} + +func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +// requestInfo keeps track of information about a request/response combination +type requestInfo struct { + RequestHeaders http.Header + RequestVerb string + RequestURL string + + ResponseStatus string + ResponseHeaders http.Header + ResponseErr error + + Duration time.Duration +} + +// newRequestInfo creates a new RequestInfo based on an http request +func newRequestInfo(req *http.Request) *requestInfo { + return &requestInfo{ + RequestURL: req.URL.String(), + RequestVerb: req.Method, + RequestHeaders: req.Header, + } +} + +// complete adds information about the response to the requestInfo +func (r *requestInfo) complete(response *http.Response, err error) { + if err != nil { + r.ResponseErr = err + return + } + r.ResponseStatus = response.Status + r.ResponseHeaders = response.Header +} + +// toCurl returns a string that can be run as a command in a terminal (minus the body) +func (r *requestInfo) toCurl() string { + headers := "" + for key, values := range r.RequestHeaders { + for _, value := range values { + headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) + } + } + + return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) +} + +// debuggingRoundTripper will display information about the requests passing +// through it based on what is configured +type debuggingRoundTripper struct { + delegatedRoundTripper http.RoundTripper + + levels map[debugLevel]bool +} + +type debugLevel int + +const ( + debugJustURL debugLevel = iota + debugURLTiming + debugCurlCommand + debugRequestHeaders + debugResponseStatus + debugResponseHeaders +) + +func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { + drt := &debuggingRoundTripper{ + delegatedRoundTripper: rt, + levels: make(map[debugLevel]bool, len(levels)), + } + for _, v := range levels { + drt.levels[v] = true + } + return drt +} + +func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented") + } +} + +func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + reqInfo := newRequestInfo(req) + + if rt.levels[debugJustURL] { + glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + } + if rt.levels[debugCurlCommand] { + glog.Infof("%s", reqInfo.toCurl()) + + } + if rt.levels[debugRequestHeaders] { + glog.Infof("Request Headers:") + for key, values := range reqInfo.RequestHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + startTime := time.Now() + response, err := rt.delegatedRoundTripper.RoundTrip(req) + reqInfo.Duration = time.Since(startTime) + + reqInfo.complete(response, err) + + if rt.levels[debugURLTiming] { + glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseStatus] { + glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseHeaders] { + glog.Infof("Response Headers:") + for key, values := range reqInfo.ResponseHeaders { + for _, value := range values { + glog.Infof(" %s: %s", key, value) + } + } + } + + return response, err +} + +func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.delegatedRoundTripper +} diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go new file mode 100644 index 000000000..15be0a3e6 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -0,0 +1,141 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" +) + +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func New(config *Config) (http.RoundTripper, error) { + // Set transport level security + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { + return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") + } + + var ( + rt http.RoundTripper + err error + ) + + if config.Transport != nil { + rt = config.Transport + } else { + rt, err = tlsCache.get(config) + if err != nil { + return nil, err + } + } + + return HTTPWrappersForConfig(config, rt) +} + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(c *Config) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { + return nil, nil + } + if c.HasCA() && c.TLS.Insecure { + return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: c.TLS.Insecure, + ServerName: c.TLS.ServerName, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) + } + + if c.HasCertAuth() { + cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) + if err != nil { + return nil, err + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + return tlsConfig, nil +} + +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *Config) error { + var err error + c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) + if err != nil { + return err + } + + c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) + if err != nil { + return err + } + + c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go new file mode 100644 index 000000000..6854d4152 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -0,0 +1,215 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math" + "math/big" + "net" + "time" +) + +const ( + rsaKeySize = 2048 + duration365d = time.Hour * 24 * 365 +) + +// Config containes the basic fields required for creating a certificate +type Config struct { + CommonName string + Organization []string + AltNames AltNames + Usages []x509.ExtKeyUsage +} + +// AltNames contains the domain names and IP addresses that will be added +// to the API Server's x509 certificate SubAltNames field. The values will +// be passed directly to the x509.Certificate object. +type AltNames struct { + DNSNames []string + IPs []net.IP +} + +// NewPrivateKey creates an RSA private key +func NewPrivateKey() (*rsa.PrivateKey, error) { + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) +} + +// NewSelfSignedCACert creates a CA certificate +func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { + now := time.Now() + tmpl := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + NotBefore: now.UTC(), + NotAfter: now.Add(duration365d * 10).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// NewSignedCert creates a signed certificate using the given CA certificate and key +func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { + serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + if err != nil { + return nil, err + } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + if len(cfg.Usages) == 0 { + return nil, errors.New("must specify at least one ExtKeyUsage") + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: time.Now().Add(duration365d).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + +// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. +// Host may be an IP or a DNS name +// You may also specify additional subject alt names (either ip or dns names) for the certificate +func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) { + priv, err := rsa.GenerateKey(cryptorand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), + }, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * 365), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, host) + } + + template.IPAddresses = append(template.IPAddresses, alternateIPs...) + template.DNSNames = append(template.DNSNames, alternateDNS...) + + derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, nil, err + } + + // Generate cert + certBuffer := bytes.Buffer{} + if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil { + return nil, nil, err + } + + // Generate key + keyBuffer := bytes.Buffer{} + if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return nil, nil, err + } + + return certBuffer.Bytes(), keyBuffer.Bytes(), nil +} + +// FormatBytesCert receives byte array certificate and formats in human-readable format +func FormatBytesCert(cert []byte) (string, error) { + block, _ := pem.Decode(cert) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return "", fmt.Errorf("failed to parse certificate [%v]", err) + } + return FormatCert(c), nil +} + +// FormatCert receives certificate and formats in human-readable format +func FormatCert(c *x509.Certificate) string { + var ips []string + for _, ip := range c.IPAddresses { + ips = append(ips, ip.String()) + } + altNames := append(ips, c.DNSNames...) + res := fmt.Sprintf( + "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", + c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, + ) + res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) + if len(altNames) > 0 { + res += fmt.Sprintf("\nAlternate Names: %v", altNames) + } + return res +} diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go new file mode 100644 index 000000000..280d249fe --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/csr.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "net" +) + +// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs. +// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.) +func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) { + template := &x509.CertificateRequest{ + Subject: *subject, + DNSNames: dnsSANs, + IPAddresses: ipSANs, + } + + return MakeCSRFromTemplate(privateKey, template) +} + +// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private +// key and certificate request as a template. All key types that are +// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey +// and *ecdsa.PrivateKey.) +func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) { + t := *template + t.SignatureAlgorithm = sigType(privateKey) + + csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey) + if err != nil { + return nil, err + } + + csrPemBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrDER, + } + + return pem.EncodeToMemory(csrPemBlock), nil +} + +func sigType(privateKey interface{}) x509.SignatureAlgorithm { + // Customize the signature for RSA keys, depending on the key size + if privateKey, ok := privateKey.(*rsa.PrivateKey); ok { + keySize := privateKey.N.BitLen() + switch { + case keySize >= 4096: + return x509.SHA512WithRSA + case keySize >= 3072: + return x509.SHA384WithRSA + default: + return x509.SHA256WithRSA + } + } + return x509.UnknownSignatureAlgorithm +} diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go new file mode 100644 index 000000000..b6b690383 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -0,0 +1,150 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// CanReadCertAndKey returns true if the certificate and key files already exists, +// otherwise returns false. If lost one of cert and key, returns error. +func CanReadCertAndKey(certPath, keyPath string) (bool, error) { + certReadable := canReadFile(certPath) + keyReadable := canReadFile(keyPath) + + if certReadable == false && keyReadable == false { + return false, nil + } + + if certReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath) + } + + if keyReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath) + } + + return true, nil +} + +// If the file represented by path exists and +// readable, returns true otherwise returns false. +func canReadFile(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + + defer f.Close() + + return true +} + +// WriteCert writes the pem-encoded certificate data to certPath. +// The certificate file will be created with file mode 0644. +// If the certificate file already exists, it will be overwritten. +// The parent directory of the certPath will be created as needed with file mode 0755. +func WriteCert(certPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(certPath, data, os.FileMode(0644)); err != nil { + return err + } + return nil +} + +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + if err := ioutil.WriteFile(keyPath, data, os.FileMode(0600)); err != nil { + return err + } + return nil +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + if err == nil { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func NewPool(filename string) (*x509.CertPool, error) { + certs, err := CertsFromFile(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func CertsFromFile(file string) ([]*x509.Certificate, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + certs, err := ParseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", file, err) + } + return certs, nil +} + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %v", file, err) + } + return key, nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go new file mode 100644 index 000000000..899845857 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -0,0 +1,138 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // CertificateBlockType is a possible value for pem.Block.Type. + CertificateBlockType = "CERTIFICATE" + // CertificateRequestBlockType is a possible value for pem.Block.Type. + CertificateRequestBlockType = "CERTIFICATE REQUEST" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" +) + +// EncodePublicKeyPEM returns PEM-endcode public data +func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: PublicKeyBlockType, + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// EncodePrivateKeyPEM returns PEM-encoded private key data +func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { + block := pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + return pem.EncodeToMemory(&block) +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != CertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("could not read any certificates") + } + return certs, nil +} diff --git a/vendor/k8s.io/client-go/util/clock/clock.go b/vendor/k8s.io/client-go/util/clock/clock.go new file mode 100644 index 000000000..c303a212a --- /dev/null +++ b/vendor/k8s.io/client-go/util/clock/clock.go @@ -0,0 +1,327 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import ( + "sync" + "time" +) + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + Now() time.Time + Since(time.Time) time.Duration + After(d time.Duration) <-chan time.Time + NewTimer(d time.Duration) Timer + Sleep(d time.Duration) + Tick(d time.Duration) <-chan time.Time +} + +var ( + _ = Clock(RealClock{}) + _ = Clock(&FakeClock{}) + _ = Clock(&IntervalClock{}) +) + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// Same as time.After(d). +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +func (RealClock) Tick(d time.Duration) <-chan time.Time { + return time.Tick(d) +} + +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + lock sync.RWMutex + time time.Time + + // waiters are waiting for the fake time to pass their specified time + waiters []fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time + fired bool +} + +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + time: t, + } +} + +// Now returns f's time. +func (f *FakeClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakeClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// Fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// Fake version of time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, timer.waiter) + return timer +} + +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// Sets the time. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := &f.waiters[i] + if !w.targetTime.After(t) { + + if w.skipIfBlocked { + select { + case w.destChan <- t: + w.fired = true + default: + } + } else { + w.destChan <- t + w.fired = true + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, *w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// Returns true if After has been called on f but not yet satisfied (so you can +// write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// Unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +var ( + _ = Timer(&realTimer{}) + _ = Timer(&fakeTimer{}) +) + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +// fakeTimer implements Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := &f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, *w) + } + } + + f.fakeClock.waiters = newWaiters + + return !f.waiter.fired +} + +// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet +// fired, or false otherwise. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := !f.waiter.fired + + f.waiter.fired = false + f.waiter.targetTime = f.fakeClock.time.Add(d) + + return active +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go new file mode 100644 index 000000000..030b15a56 --- /dev/null +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "time" + + "k8s.io/client-go/util/clock" + "k8s.io/client-go/util/integer" +) + +type backoffEntry struct { + backoff time.Duration + lastUpdate time.Time +} + +type Backoff struct { + sync.Mutex + Clock clock.Clock + defaultDuration time.Duration + maxDuration time.Duration + perItemBackoff map[string]*backoffEntry +} + +func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: tc, + defaultDuration: initial, + maxDuration: max, + } +} + +func NewBackOff(initial, max time.Duration) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: clock.RealClock{}, + defaultDuration: initial, + maxDuration: max, + } +} + +// Get the current backoff Duration +func (p *Backoff) Get(id string) time.Duration { + p.Lock() + defer p.Unlock() + var delay time.Duration + entry, ok := p.perItemBackoff[id] + if ok { + delay = entry.backoff + } + return delay +} + +// move backoff to the next mark, capping at maxDuration +func (p *Backoff) Next(id string, eventTime time.Time) { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + entry = p.initEntryUnsafe(id) + } else { + delay := entry.backoff * 2 // exponential + entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + } + entry.lastUpdate = p.Clock.Now() +} + +// Reset forces clearing of all backoff data for a given key. +func (p *Backoff) Reset(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Returns True if the elapsed time since eventTime is smaller than the current backoff window +func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return p.Clock.Now().Sub(eventTime) < entry.backoff +} + +// Returns True if time since lastupdate is less than the current backoff window. +func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return eventTime.Sub(entry.lastUpdate) < entry.backoff +} + +// Garbage collect records that have aged past maxDuration. Backoff users are expected +// to invoke this periodically. +func (p *Backoff) GC() { + p.Lock() + defer p.Unlock() + now := p.Clock.Now() + for id, entry := range p.perItemBackoff { + if now.Sub(entry.lastUpdate) > p.maxDuration*2 { + // GC when entry has not been updated for 2*maxDuration + delete(p.perItemBackoff, id) + } + } +} + +func (p *Backoff) DeleteEntry(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Take a lock on *Backoff, before calling initEntryUnsafe +func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { + entry := &backoffEntry{backoff: p.defaultDuration} + p.perItemBackoff[id] = entry + return entry +} + +// After 2*maxDuration we restart the backoff factor to the beginning +func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go similarity index 87% rename from vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go rename to vendor/k8s.io/client-go/util/flowcontrol/throttle.go index a63817ca8..881a2f57d 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/throttle.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -1,5 +1,5 @@ /* -Copyright 2014 The Kubernetes Authors All rights reserved. +Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -35,10 +35,13 @@ type RateLimiter interface { // Usually we use token bucket rate limiter. In that case, // 1.0 means no tokens are available; 0.0 means we have a full bucket of tokens to use. Saturation() float64 + // QPS returns QPS of this rate limiter + QPS() float32 } type tokenBucketRateLimiter struct { limiter *ratelimit.Bucket + qps float32 } // NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. @@ -48,7 +51,10 @@ type tokenBucketRateLimiter struct { // The maximum number of tokens in the bucket is capped at 'burst'. func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { limiter := ratelimit.NewBucketWithRate(float64(qps), int64(burst)) - return &tokenBucketRateLimiter{limiter} + return &tokenBucketRateLimiter{ + limiter: limiter, + qps: qps, + } } func (t *tokenBucketRateLimiter) TryAccept() bool { @@ -69,6 +75,10 @@ func (t *tokenBucketRateLimiter) Accept() { func (t *tokenBucketRateLimiter) Stop() { } +func (t *tokenBucketRateLimiter) QPS() float32 { + return t.qps +} + type fakeAlwaysRateLimiter struct{} func NewFakeAlwaysRateLimiter() RateLimiter { @@ -87,16 +97,18 @@ func (t *fakeAlwaysRateLimiter) Stop() {} func (t *fakeAlwaysRateLimiter) Accept() {} +func (t *fakeAlwaysRateLimiter) QPS() float32 { + return 1 +} + type fakeNeverRateLimiter struct { wg sync.WaitGroup } func NewFakeNeverRateLimiter() RateLimiter { - wg := sync.WaitGroup{} - wg.Add(1) - return &fakeNeverRateLimiter{ - wg: wg, - } + rl := fakeNeverRateLimiter{} + rl.wg.Add(1) + return &rl } func (t *fakeNeverRateLimiter) TryAccept() bool { @@ -114,3 +126,7 @@ func (t *fakeNeverRateLimiter) Stop() { func (t *fakeNeverRateLimiter) Accept() { t.wg.Wait() } + +func (t *fakeNeverRateLimiter) QPS() float32 { + return 1 +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go similarity index 84% rename from vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go rename to vendor/k8s.io/client-go/util/homedir/homedir.go index 57171e109..816db57f5 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/homedir/homedir.go +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors All rights reserved. +Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,6 +24,13 @@ import ( // HomeDir returns the home directory for the current user func HomeDir() string { if runtime.GOOS == "windows" { + + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); len(home) > 0 { + if _, err := os.Stat(home); err == nil { + return home + } + } if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { homeDir := homeDrive + homePath if _, err := os.Stat(homeDir); err == nil { diff --git a/vendor/k8s.io/client-go/util/integer/integer.go b/vendor/k8s.io/client-go/util/integer/integer.go new file mode 100644 index 000000000..c6ea106f9 --- /dev/null +++ b/vendor/k8s.io/client-go/util/integer/integer.go @@ -0,0 +1,67 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integer + +func IntMax(a, b int) int { + if b > a { + return b + } + return a +} + +func IntMin(a, b int) int { + if b < a { + return b + } + return a +} + +func Int32Max(a, b int32) int32 { + if b > a { + return b + } + return a +} + +func Int32Min(a, b int32) int32 { + if b < a { + return b + } + return a +} + +func Int64Max(a, b int64) int64 { + if b > a { + return b + } + return a +} + +func Int64Min(a, b int64) int64 { + if b < a { + return b + } + return a +} + +// RoundToInt32 rounds floats into integer numbers. +func RoundToInt32(a float64) int32 { + if a < 0 { + return int32(a - 0.5) + } + return int32(a + 0.5) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go new file mode 100644 index 000000000..35caed4fa --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -0,0 +1,211 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "math" + "sync" + "time" + + "github.com/juju/ratelimit" +) + +type RateLimiter interface { + // When gets an item and gets to decide how long that item should wait + When(item interface{}) time.Duration + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop tracking it + Forget(item interface{}) + // NumRequeues returns back how many failures the item has had + NumRequeues(item interface{}) int +} + +// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential +func DefaultControllerRateLimiter() RateLimiter { + return NewMaxOfRateLimiter( + NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) + &BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))}, + ) +} + +// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type BucketRateLimiter struct { + *ratelimit.Bucket +} + +var _ RateLimiter = &BucketRateLimiter{} + +func (r *BucketRateLimiter) When(item interface{}) time.Duration { + return r.Bucket.Take(1) +} + +func (r *BucketRateLimiter) NumRequeues(item interface{}) int { + return 0 +} + +func (r *BucketRateLimiter) Forget(item interface{}) { +} + +// ItemExponentialFailureRateLimiter does a simple baseDelay*10^<num-failures> limit +// dealing with max failures and expiration are up to the caller +type ItemExponentialFailureRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + baseDelay time.Duration + maxDelay time.Duration +} + +var _ RateLimiter = &ItemExponentialFailureRateLimiter{} + +func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { + return &ItemExponentialFailureRateLimiter{ + failures: map[interface{}]int{}, + baseDelay: baseDelay, + maxDelay: maxDelay, + } +} + +func DefaultItemBasedRateLimiter() RateLimiter { + return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) +} + +func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + exp := r.failures[item] + r.failures[item] = r.failures[item] + 1 + + // The backoff is capped such that 'calculated' value never overflows. + backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp)) + if backoff > math.MaxInt64 { + return r.maxDelay + } + + calculated := time.Duration(backoff) + if calculated > r.maxDelay { + return r.maxDelay + } + + return calculated +} + +func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type ItemFastSlowRateLimiter struct { + failuresLock sync.Mutex + failures map[interface{}]int + + maxFastAttempts int + fastDelay time.Duration + slowDelay time.Duration +} + +var _ RateLimiter = &ItemFastSlowRateLimiter{} + +func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { + return &ItemFastSlowRateLimiter{ + failures: map[interface{}]int{}, + fastDelay: fastDelay, + slowDelay: slowDelay, + maxFastAttempts: maxFastAttempts, + } +} + +func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + r.failures[item] = r.failures[item] + 1 + + if r.failures[item] <= r.maxFastAttempts { + return r.fastDelay + } + + return r.slowDelay +} + +func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + return r.failures[item] +} + +func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { + r.failuresLock.Lock() + defer r.failuresLock.Unlock() + + delete(r.failures, item) +} + +// MaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type MaxOfRateLimiter struct { + limiters []RateLimiter +} + +func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { + ret := time.Duration(0) + for _, limiter := range r.limiters { + curr := limiter.When(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { + return &MaxOfRateLimiter{limiters: limiters} +} + +func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { + ret := 0 + for _, limiter := range r.limiters { + curr := limiter.NumRequeues(item) + if curr > ret { + ret = curr + } + } + + return ret +} + +func (r *MaxOfRateLimiter) Forget(item interface{}) { + for _, limiter := range r.limiters { + limiter.Forget(item) + } +} diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go new file mode 100644 index 000000000..593ad9ad4 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -0,0 +1,246 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sort" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/util/clock" +) + +// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type DelayingInterface interface { + Interface + // AddAfter adds an item to the workqueue after the indicated duration has passed + AddAfter(item interface{}, duration time.Duration) +} + +// NewDelayingQueue constructs a new workqueue with delayed queuing ability +func NewDelayingQueue() DelayingInterface { + return newDelayingQueue(clock.RealClock{}, "") +} + +func NewNamedDelayingQueue(name string) DelayingInterface { + return newDelayingQueue(clock.RealClock{}, name) +} + +func newDelayingQueue(clock clock.Clock, name string) DelayingInterface { + ret := &delayingType{ + Interface: NewNamed(name), + clock: clock, + heartbeat: clock.Tick(maxWait), + stopCh: make(chan struct{}), + waitingTimeByEntry: map[t]time.Time{}, + waitingForAddCh: make(chan waitFor, 1000), + metrics: newRetryMetrics(name), + } + + go ret.waitingLoop() + + return ret +} + +// delayingType wraps an Interface and provides delayed re-enquing +type delayingType struct { + Interface + + // clock tracks time for delayed firing + clock clock.Clock + + // stopCh lets us signal a shutdown to the waiting loop + stopCh chan struct{} + + // heartbeat ensures we wait no more than maxWait before firing + // + // TODO: replace with Ticker (and add to clock) so this can be cleaned up. + // clock.Tick will leak. + heartbeat <-chan time.Time + + // waitingForAdd is an ordered slice of items to be added to the contained work queue + waitingForAdd []waitFor + // waitingTimeByEntry holds wait time by entry, so we can lookup pre-existing indexes + waitingTimeByEntry map[t]time.Time + // waitingForAddCh is a buffered channel that feeds waitingForAdd + waitingForAddCh chan waitFor + + // metrics counts the number of retries + metrics retryMetrics +} + +// waitFor holds the data to add and the time it should be added +type waitFor struct { + data t + readyAt time.Time +} + +// ShutDown gives a way to shut off this queue +func (q *delayingType) ShutDown() { + q.Interface.ShutDown() + close(q.stopCh) +} + +// AddAfter adds the given item to the work queue after the given delay +func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { + // don't add if we're already shutting down + if q.ShuttingDown() { + return + } + + q.metrics.retry() + + // immediately add things with no delay + if duration <= 0 { + q.Add(item) + return + } + + select { + case <-q.stopCh: + // unblock if ShutDown() is called + case q.waitingForAddCh <- waitFor{data: item, readyAt: q.clock.Now().Add(duration)}: + } +} + +// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening. +// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an +// expired item sitting for more than 10 seconds. +const maxWait = 10 * time.Second + +// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. +func (q *delayingType) waitingLoop() { + defer utilruntime.HandleCrash() + + // Make a placeholder channel to use when there are no items in our list + never := make(<-chan time.Time) + + for { + if q.Interface.ShuttingDown() { + // discard waiting entries + q.waitingForAdd = nil + q.waitingTimeByEntry = nil + return + } + + now := q.clock.Now() + + // Add ready entries + readyEntries := 0 + for _, entry := range q.waitingForAdd { + if entry.readyAt.After(now) { + break + } + q.Add(entry.data) + delete(q.waitingTimeByEntry, entry.data) + readyEntries++ + } + q.waitingForAdd = q.waitingForAdd[readyEntries:] + + // Set up a wait for the first item's readyAt (if one exists) + nextReadyAt := never + if len(q.waitingForAdd) > 0 { + nextReadyAt = q.clock.After(q.waitingForAdd[0].readyAt.Sub(now)) + } + + select { + case <-q.stopCh: + return + + case <-q.heartbeat: + // continue the loop, which will add ready items + + case <-nextReadyAt: + // continue the loop, which will add ready items + + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry) + } else { + q.Add(waitEntry.data) + } + + drained := false + for !drained { + select { + case waitEntry := <-q.waitingForAddCh: + if waitEntry.readyAt.After(q.clock.Now()) { + q.waitingForAdd = insert(q.waitingForAdd, q.waitingTimeByEntry, waitEntry) + } else { + q.Add(waitEntry.data) + } + default: + drained = true + } + } + } + } +} + +// inserts the given entry into the sorted entries list +// same semantics as append()... the given slice may be modified, +// and the returned value should be used +// +// TODO: This should probably be converted to use container/heap to improve +// running time for a large number of items. +func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor { + // if the entry is already in our retry list and the existing time is before the new one, just skip it + existingTime, exists := knownEntries[entry.data] + if exists && existingTime.Before(entry.readyAt) { + return entries + } + + // if the entry exists and is scheduled for later, go ahead and remove the entry + if exists { + if existingIndex := findEntryIndex(entries, existingTime, entry.data); existingIndex >= 0 && existingIndex < len(entries) { + entries = append(entries[:existingIndex], entries[existingIndex+1:]...) + } + } + + insertionIndex := sort.Search(len(entries), func(i int) bool { + return entry.readyAt.Before(entries[i].readyAt) + }) + + // grow by 1 + entries = append(entries, waitFor{}) + // shift items from the insertion point to the end + copy(entries[insertionIndex+1:], entries[insertionIndex:]) + // insert the record + entries[insertionIndex] = entry + + knownEntries[entry.data] = entry.readyAt + + return entries +} + +// findEntryIndex returns the index for an existing entry +func findEntryIndex(entries []waitFor, existingTime time.Time, data t) int { + index := sort.Search(len(entries), func(i int) bool { + return entries[i].readyAt.After(existingTime) || existingTime == entries[i].readyAt + }) + + // we know this is the earliest possible index, but there could be multiple with the same time + // iterate from here to find the dupe + for ; index < len(entries); index++ { + if entries[index].data == data { + break + } + } + + return index +} diff --git a/vendor/k8s.io/client-go/util/workqueue/doc.go b/vendor/k8s.io/client-go/util/workqueue/doc.go new file mode 100644 index 000000000..2a00c74ac --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package workqueue provides a simple queue that supports the following +// features: +// * Fair: items processed in the order in which they are added. +// * Stingy: a single item will not be processed multiple times concurrently, +// and if an item is added multiple times before it can be processed, it +// will only be processed once. +// * Multiple consumers and producers. In particular, it is allowed for an +// item to be reenqueued while it is being processed. +// * Shutdown notifications. +package workqueue diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go new file mode 100644 index 000000000..a481bdfb2 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go @@ -0,0 +1,195 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + "time" +) + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +type queueMetrics interface { + add(item t) + get(item t) + done(item t) +} + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Inc() + Dec() +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} + +type defaultQueueMetrics struct { + // current depth of a workqueue + depth GaugeMetric + // total number of adds handled by a workqueue + adds CounterMetric + // how long an item stays in a workqueue + latency SummaryMetric + // how long processing an item from a workqueue takes + workDuration SummaryMetric + addTimes map[t]time.Time + processingStartTimes map[t]time.Time +} + +func (m *defaultQueueMetrics) add(item t) { + if m == nil { + return + } + + m.adds.Inc() + m.depth.Inc() + if _, exists := m.addTimes[item]; !exists { + m.addTimes[item] = time.Now() + } +} + +func (m *defaultQueueMetrics) get(item t) { + if m == nil { + return + } + + m.depth.Dec() + m.processingStartTimes[item] = time.Now() + if startTime, exists := m.addTimes[item]; exists { + m.latency.Observe(sinceInMicroseconds(startTime)) + delete(m.addTimes, item) + } +} + +func (m *defaultQueueMetrics) done(item t) { + if m == nil { + return + } + + if startTime, exists := m.processingStartTimes[item]; exists { + m.workDuration.Observe(sinceInMicroseconds(startTime)) + delete(m.processingStartTimes, item) + } +} + +// Gets the time since the specified start in microseconds. +func sinceInMicroseconds(start time.Time) float64 { + return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) +} + +type retryMetrics interface { + retry() +} + +type defaultRetryMetrics struct { + retries CounterMetric +} + +func (m *defaultRetryMetrics) retry() { + if m == nil { + return + } + + m.retries.Inc() +} + +// MetricsProvider generates various metrics used by the queue. +type MetricsProvider interface { + NewDepthMetric(name string) GaugeMetric + NewAddsMetric(name string) CounterMetric + NewLatencyMetric(name string) SummaryMetric + NewWorkDurationMetric(name string) SummaryMetric + NewRetriesMetric(name string) CounterMetric +} + +type noopMetricsProvider struct{} + +func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewLatencyMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewWorkDurationMetric(name string) SummaryMetric { + return noopMetric{} +} + +func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +func newQueueMetrics(name string) queueMetrics { + var ret *defaultQueueMetrics + if len(name) == 0 { + return ret + } + return &defaultQueueMetrics{ + depth: metricsFactory.metricsProvider.NewDepthMetric(name), + adds: metricsFactory.metricsProvider.NewAddsMetric(name), + latency: metricsFactory.metricsProvider.NewLatencyMetric(name), + workDuration: metricsFactory.metricsProvider.NewWorkDurationMetric(name), + addTimes: map[t]time.Time{}, + processingStartTimes: map[t]time.Time{}, + } +} + +func newRetryMetrics(name string) retryMetrics { + var ret *defaultRetryMetrics + if len(name) == 0 { + return ret + } + return &defaultRetryMetrics{ + retries: metricsFactory.metricsProvider.NewRetriesMetric(name), + } +} + +// SetProvider sets the metrics provider of the metricsFactory. +func SetProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go new file mode 100644 index 000000000..be668c423 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/parallelizer.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +type DoWorkPieceFunc func(piece int) + +// Parallelize is a very simple framework that allow for parallelizing +// N independent pieces of work. +func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) { + toProcess := make(chan int, pieces) + for i := 0; i < pieces; i++ { + toProcess <- i + } + close(toProcess) + + if pieces < workers { + workers = pieces + } + + wg := sync.WaitGroup{} + wg.Add(workers) + for i := 0; i < workers; i++ { + go func() { + defer utilruntime.HandleCrash() + defer wg.Done() + for piece := range toProcess { + doWorkPiece(piece) + } + }() + } + wg.Wait() +} diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go new file mode 100644 index 000000000..3e1a49fe2 --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -0,0 +1,172 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "sync" +) + +type Interface interface { + Add(item interface{}) + Len() int + Get() (item interface{}, shutdown bool) + Done(item interface{}) + ShutDown() + ShuttingDown() bool +} + +// New constructs a new workqueue (see the package comment). +func New() *Type { + return NewNamed("") +} + +func NewNamed(name string) *Type { + return &Type{ + dirty: set{}, + processing: set{}, + cond: sync.NewCond(&sync.Mutex{}), + metrics: newQueueMetrics(name), + } +} + +// Type is a work queue (see the package comment). +type Type struct { + // queue defines the order in which we will work on items. Every + // element of queue should be in the dirty set and not in the + // processing set. + queue []t + + // dirty defines all of the items that need to be processed. + dirty set + + // Things that are currently being processed are in the processing set. + // These things may be simultaneously in the dirty set. When we finish + // processing something and remove it from this set, we'll check if + // it's in the dirty set, and if so, add it to the queue. + processing set + + cond *sync.Cond + + shuttingDown bool + + metrics queueMetrics +} + +type empty struct{} +type t interface{} +type set map[t]empty + +func (s set) has(item t) bool { + _, exists := s[item] + return exists +} + +func (s set) insert(item t) { + s[item] = empty{} +} + +func (s set) delete(item t) { + delete(s, item) +} + +// Add marks item as needing processing. +func (q *Type) Add(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + if q.shuttingDown { + return + } + if q.dirty.has(item) { + return + } + + q.metrics.add(item) + + q.dirty.insert(item) + if q.processing.has(item) { + return + } + + q.queue = append(q.queue, item) + q.cond.Signal() +} + +// Len returns the current queue length, for informational purposes only. You +// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular +// value, that can't be synchronized properly. +func (q *Type) Len() int { + q.cond.L.Lock() + defer q.cond.L.Unlock() + return len(q.queue) +} + +// Get blocks until it can return an item to be processed. If shutdown = true, +// the caller should end their goroutine. You must call Done with item when you +// have finished processing it. +func (q *Type) Get() (item interface{}, shutdown bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + for len(q.queue) == 0 && !q.shuttingDown { + q.cond.Wait() + } + if len(q.queue) == 0 { + // We must be shutting down. + return nil, true + } + + item, q.queue = q.queue[0], q.queue[1:] + + q.metrics.get(item) + + q.processing.insert(item) + q.dirty.delete(item) + + return item, false +} + +// Done marks item as done processing, and if it has been marked as dirty again +// while it was being processed, it will be re-added to the queue for +// re-processing. +func (q *Type) Done(item interface{}) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + q.metrics.done(item) + + q.processing.delete(item) + if q.dirty.has(item) { + q.queue = append(q.queue, item) + q.cond.Signal() + } +} + +// ShutDown will cause q to ignore all new items added to it. As soon as the +// worker goroutines have drained the existing items in the queue, they will be +// instructed to exit. +func (q *Type) ShutDown() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + q.shuttingDown = true + q.cond.Broadcast() +} + +func (q *Type) ShuttingDown() bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + return q.shuttingDown +} diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go new file mode 100644 index 000000000..417ac001b --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go @@ -0,0 +1,69 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +// RateLimitingInterface is an interface that rate limits items being added to the queue. +type RateLimitingInterface interface { + DelayingInterface + + // AddRateLimited adds an item to the workqueue after the rate limiter says its ok + AddRateLimited(item interface{}) + + // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you + // still have to call `Done` on the queue. + Forget(item interface{}) + + // NumRequeues returns back how many times the item was requeued + NumRequeues(item interface{}) int +} + +// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewDelayingQueue(), + rateLimiter: rateLimiter, + } +} + +func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface { + return &rateLimitingType{ + DelayingInterface: NewNamedDelayingQueue(name), + rateLimiter: rateLimiter, + } +} + +// rateLimitingType wraps an Interface and provides rateLimited re-enquing +type rateLimitingType struct { + DelayingInterface + + rateLimiter RateLimiter +} + +// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok +func (q *rateLimitingType) AddRateLimited(item interface{}) { + q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +} + +func (q *rateLimitingType) NumRequeues(item interface{}) int { + return q.rateLimiter.NumRequeues(item) +} + +func (q *rateLimitingType) Forget(item interface{}) { + q.rateLimiter.Forget(item) +} diff --git a/vendor/k8s.io/client-go/util/workqueue/timed_queue.go b/vendor/k8s.io/client-go/util/workqueue/timed_queue.go new file mode 100644 index 000000000..2ad90bfdf --- /dev/null +++ b/vendor/k8s.io/client-go/util/workqueue/timed_queue.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import "time" + +type TimedWorkQueue struct { + *Type +} + +type TimedWorkQueueItem struct { + StartTime time.Time + Object interface{} +} + +func NewTimedWorkQueue() *TimedWorkQueue { + return &TimedWorkQueue{New()} +} + +// Add adds the obj along with the current timestamp to the queue. +func (q TimedWorkQueue) Add(timedItem *TimedWorkQueueItem) { + q.Type.Add(timedItem) +} + +// Get gets the obj along with its timestamp from the queue. +func (q TimedWorkQueue) Get() (timedItem *TimedWorkQueueItem, shutdown bool) { + origin, shutdown := q.Type.Get() + if origin == nil { + return nil, shutdown + } + timedItem, _ = origin.(*TimedWorkQueueItem) + return timedItem, shutdown +} + +func (q TimedWorkQueue) Done(timedItem *TimedWorkQueueItem) error { + q.Type.Done(timedItem) + return nil +} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE deleted file mode 100644 index 6b4d837a4..000000000 --- a/vendor/k8s.io/kubernetes/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 The Kubernetes Authors All rights reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go deleted file mode 100644 index 0b03ff9b3..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/deep_copy_generated.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package federation - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_federation_Cluster, - DeepCopy_federation_ClusterCondition, - DeepCopy_federation_ClusterList, - DeepCopy_federation_ClusterSpec, - DeepCopy_federation_ClusterStatus, - DeepCopy_federation_ServerAddressByClientCIDR, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_federation_Cluster(in Cluster, out *Cluster, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_federation_ClusterSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_federation_ClusterStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_federation_ClusterCondition(in ClusterCondition, out *ClusterCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_federation_ClusterList(in ClusterList, out *ClusterList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Cluster, len(in)) - for i := range in { - if err := DeepCopy_federation_Cluster(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_federation_ClusterSpec(in ClusterSpec, out *ClusterSpec, c *conversion.Cloner) error { - if in.ServerAddressByClientCIDRs != nil { - in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(in)) - for i := range in { - if err := DeepCopy_federation_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - if err := api.DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - return nil -} - -func DeepCopy_federation_ClusterStatus(in ClusterStatus, out *ClusterStatus, c *conversion.Cloner) error { - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(in)) - for i := range in { - if err := DeepCopy_federation_ClusterCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Zones != nil { - in, out := in.Zones, &out.Zones - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Zones = nil - } - out.Region = in.Region - return nil -} - -func DeepCopy_federation_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go b/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go deleted file mode 100644 index a1dc24cab..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/install/install.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/federation/apis/federation/v1beta1" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/federation/apis/federation" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", federation.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString( - "Cluster", - ) - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1beta1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(federation.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - federation.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1beta1.SchemeGroupVersion: - v1beta1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/register.go deleted file mode 100644 index 2cc7f1f0e..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "federation" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) -} - -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - &api.ListOptions{}, - &api.DeleteOptions{}, - ) -} - -func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/types.generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/types.generated.go deleted file mode 100644 index 95ebc5994..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/types.generated.go +++ /dev/null @@ -1,2333 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package federation - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_api "k8s.io/kubernetes/pkg/api" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_api.LocalObjectReference - var v1 pkg2_unversioned.Time - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "clientCIDR": - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - case "serverAddress": - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SecretRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "serverAddressByClientCIDRs": - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv4 := &x.ServerAddressByClientCIDRs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv4), d) - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_api.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv8 := &x.ServerAddressByClientCIDRs - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv8), d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_api.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_api.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_api.ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = len(x.Zones) != 0 - yyq2[2] = x.Region != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Zones == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("zones")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Zones == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("region")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv4), d) - } - } - case "zones": - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv6 := &x.Zones - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "region": - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv10 := &x.Conditions - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv10), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv12 := &x.Zones - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServerAddressByClientCIDR{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServerAddressByClientCIDR, yyrl1) - } - } else { - yyv1 = make([]ServerAddressByClientCIDR, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServerAddressByClientCIDR{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServerAddressByClientCIDR{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServerAddressByClientCIDR{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServerAddressByClientCIDR{}) // var yyz1 ServerAddressByClientCIDR - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServerAddressByClientCIDR{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServerAddressByClientCIDR{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterCondition, yyrl1) - } - } else { - yyv1 = make([]ClusterCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterCondition{}) // var yyz1 ClusterCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Cluster{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Cluster, yyrl1) - } - } else { - yyv1 = make([]Cluster, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Cluster{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Cluster{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Cluster{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Cluster{}) // var yyz1 Cluster - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Cluster{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Cluster{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/types.go deleted file mode 100644 index 10a9d8bee..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/types.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - SecretRef *api.LocalObjectReference `json:"secretRef,omitempty"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - Conditions []ClusterCondition `json:"conditions,omitempty"` - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - Zones []string `json:"zones,omitempty"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - Region string `json:"region,omitempty"` -} - -// +genclient=true,nonNamespaced=true - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of the Cluster. - Spec ClusterSpec `json:"spec,omitempty"` - // Status describes the current status of a Cluster - Status ClusterStatus `json:"status,omitempty"` -} - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` - - // List of Cluster objects. - Items []Cluster `json:"items"` -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go deleted file mode 100644 index f8d396e86..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - err := api.Scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "Cluster", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go deleted file mode 100644 index 5877d0082..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/conversion_generated.go +++ /dev/null @@ -1,296 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - federation "k8s.io/kubernetes/federation/apis/federation" - api "k8s.io/kubernetes/pkg/api" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_Cluster_To_federation_Cluster, - Convert_federation_Cluster_To_v1beta1_Cluster, - Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition, - Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition, - Convert_v1beta1_ClusterList_To_federation_ClusterList, - Convert_federation_ClusterList_To_v1beta1_ClusterList, - Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec, - Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec, - Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus, - Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus, - Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR, - Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - return autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s) -} - -func autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - return autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s) -} - -func autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - out.Type = federation.ClusterConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s) -} - -func autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - out.Type = ClusterConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - return autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s) -} - -func autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]federation.Cluster, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Cluster_To_federation_Cluster(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s) -} - -func autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - if err := Convert_federation_Cluster_To_v1beta1_Cluster(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - return autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s) -} - -func autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]federation.ServerAddressByClientCIDR, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.SecretRef = nil - } - return nil -} - -func Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s) -} - -func autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - if err := Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.SecretRef = nil - } - return nil -} - -func Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - return autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s) -} - -func autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]federation.ClusterCondition, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Zones = in.Zones - out.Region = in.Region - return nil -} - -func Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s) -} - -func autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - if err := Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Zones = in.Zones - out.Region = in.Region - return nil -} - -func Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - return autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) -} - -func autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s) -} - -func autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go deleted file mode 100644 index 0d53b4a6a..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/deep_copy_generated.go +++ /dev/null @@ -1,146 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1beta1_Cluster, - DeepCopy_v1beta1_ClusterCondition, - DeepCopy_v1beta1_ClusterList, - DeepCopy_v1beta1_ClusterSpec, - DeepCopy_v1beta1_ClusterStatus, - DeepCopy_v1beta1_ServerAddressByClientCIDR, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1beta1_Cluster(in Cluster, out *Cluster, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ClusterSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ClusterStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_ClusterCondition(in ClusterCondition, out *ClusterCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v1beta1_ClusterList(in ClusterList, out *ClusterList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Cluster, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_Cluster(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_ClusterSpec(in ClusterSpec, out *ClusterSpec, c *conversion.Cloner) error { - if in.ServerAddressByClientCIDRs != nil { - in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) - if err := v1.DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - return nil -} - -func DeepCopy_v1beta1_ClusterStatus(in ClusterStatus, out *ClusterStatus, c *conversion.Cloner) error { - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_ClusterCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Zones != nil { - in, out := in.Zones, &out.Zones - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Zones = nil - } - out.Region = in.Region - return nil -} - -func DeepCopy_v1beta1_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go deleted file mode 100644 index b096ce2fd..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/defaults.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go deleted file mode 100644 index cfdb87c53..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go deleted file mode 100644 index e9193b608..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.pb.go +++ /dev/null @@ -1,1398 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto - - It has these top-level messages: - Cluster - ClusterCondition - ClusterList - ClusterSpec - ClusterStatus - ServerAddressByClientCIDR -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *Cluster) Reset() { *m = Cluster{} } -func (m *Cluster) String() string { return proto.CompactTextString(m) } -func (*Cluster) ProtoMessage() {} - -func (m *ClusterCondition) Reset() { *m = ClusterCondition{} } -func (m *ClusterCondition) String() string { return proto.CompactTextString(m) } -func (*ClusterCondition) ProtoMessage() {} - -func (m *ClusterList) Reset() { *m = ClusterList{} } -func (m *ClusterList) String() string { return proto.CompactTextString(m) } -func (*ClusterList) ProtoMessage() {} - -func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } -func (m *ClusterSpec) String() string { return proto.CompactTextString(m) } -func (*ClusterSpec) ProtoMessage() {} - -func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } -func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } -func (*ClusterStatus) ProtoMessage() {} - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } -func (*ServerAddressByClientCIDR) ProtoMessage() {} - -func init() { - proto.RegisterType((*Cluster)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.Cluster") - proto.RegisterType((*ClusterCondition)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterCondition") - proto.RegisterType((*ClusterList)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterList") - proto.RegisterType((*ClusterSpec)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterSpec") - proto.RegisterType((*ClusterStatus)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ClusterStatus") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.federation.apis.federation.v1beta1.ServerAddressByClientCIDR") -} -func (m *Cluster) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Cluster) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ClusterCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ClusterList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.SecretRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n7, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *ClusterStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Region))) - i += copy(data[i:], m.Region) - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) - i += copy(data[i:], m.ClientCIDR) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) - i += copy(data[i:], m.ServerAddress) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Cluster) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterSpec) Size() (n int) { - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - l = len(m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Cluster) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Cluster: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ClusterConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Cluster{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ClusterCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zones = append(m.Zones, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Region = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCIDR = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddress = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto deleted file mode 100644 index 811f40f43..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.federation.apis.federation.v1beta1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -message Cluster { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of the Cluster. - optional ClusterSpec spec = 2; - - // Status describes the current status of a Cluster - optional ClusterStatus status = 3; -} - -// ClusterCondition describes current state of a cluster. -message ClusterCondition { - // Type of cluster condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// A list of all the kubernetes clusters registered to the federation -message ClusterList { - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of Cluster objects. - repeated Cluster items = 2; -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -message ClusterSpec { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; - - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretRef = 2; -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -message ClusterStatus { - // Conditions is an array of current cluster conditions. - repeated ClusterCondition conditions = 1; - - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - repeated string zones = 5; - - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - optional string region = 6; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go deleted file mode 100644 index 46dc3f10b..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "federation" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Adds the list of known types to api.Scheme. -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} - -func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.generated.go deleted file mode 100644 index e4d7246f2..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.generated.go +++ /dev/null @@ -1,2333 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg1_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.Time - var v1 pkg1_v1.LocalObjectReference - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "clientCIDR": - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - case "serverAddress": - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SecretRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "serverAddressByClientCIDRs": - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv4 := &x.ServerAddressByClientCIDRs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv4), d) - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_v1.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv8 := &x.ServerAddressByClientCIDRs - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv8), d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_v1.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = len(x.Zones) != 0 - yyq2[2] = x.Region != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Zones == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("zones")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Zones == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("region")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv4), d) - } - } - case "zones": - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv6 := &x.Zones - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "region": - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv10 := &x.Conditions - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv10), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv12 := &x.Zones - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServerAddressByClientCIDR{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServerAddressByClientCIDR, yyrl1) - } - } else { - yyv1 = make([]ServerAddressByClientCIDR, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServerAddressByClientCIDR{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServerAddressByClientCIDR{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServerAddressByClientCIDR{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServerAddressByClientCIDR{}) // var yyz1 ServerAddressByClientCIDR - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServerAddressByClientCIDR{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServerAddressByClientCIDR{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterCondition, yyrl1) - } - } else { - yyv1 = make([]ClusterCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterCondition{}) // var yyz1 ClusterCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Cluster{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Cluster, yyrl1) - } - } else { - yyv1 = make([]Cluster, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Cluster{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Cluster{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Cluster{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Cluster{}) // var yyz1 Cluster - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Cluster{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Cluster{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go b/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go deleted file mode 100644 index f15988e2f..000000000 --- a/vendor/k8s.io/kubernetes/federation/apis/federation/v1beta1/types.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR" protobuf:"bytes,1,rep,name=serverAddressByClientCIDRs"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - SecretRef *v1.LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ClusterConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - Conditions []ClusterCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - Zones []string `json:"zones,omitempty" protobuf:"bytes,5,rep,name=zones"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - Region string `json:"region,omitempty" protobuf:"bytes,6,opt,name=region"` -} - -// +genclient=true,nonNamespaced=true - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of the Cluster. - Spec ClusterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current status of a Cluster - Status ClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of Cluster objects. - Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go deleted file mode 100644 index f8163ef83..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/clientset.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation_internalclientset - -import ( - "github.com/golang/glog" - unversionedcore "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned" - unversionedfederation "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned" - restclient "k8s.io/kubernetes/pkg/client/restclient" - discovery "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - Federation() unversionedfederation.FederationInterface - Core() unversionedcore.CoreInterface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - *unversionedfederation.FederationClient - *unversionedcore.CoreClient -} - -// Federation retrieves the FederationClient -func (c *Clientset) Federation() unversionedfederation.FederationInterface { - if c == nil { - return nil - } - return c.FederationClient -} - -// Core retrieves the CoreClient -func (c *Clientset) Core() unversionedcore.CoreInterface { - if c == nil { - return nil - } - return c.CoreClient -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *restclient.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var clientset Clientset - var err error - clientset.FederationClient, err = unversionedfederation.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) - return nil, err - } - return &clientset, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *Clientset { - var clientset Clientset - clientset.FederationClient = unversionedfederation.NewForConfigOrDie(c) - clientset.CoreClient = unversionedcore.NewForConfigOrDie(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &clientset -} - -// New creates a new Clientset for the given RESTClient. -func New(c *restclient.RESTClient) *Clientset { - var clientset Clientset - clientset.FederationClient = unversionedfederation.New(c) - clientset.CoreClient = unversionedcore.New(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &clientset -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go deleted file mode 100644 index 40d4accea..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] - -// This package has the automatically generated clientset. -package federation_internalclientset diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go deleted file mode 100644 index af8c2e743..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/import_known_versions.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation_internalclientset - -// These imports are the API groups the client will support. -import ( - _ "k8s.io/kubernetes/federation/apis/federation/install" -) - -func init() { -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go deleted file mode 100644 index d308d0fe1..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/core_client.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type CoreInterface interface { - GetRESTClient() *restclient.RESTClient - ServicesGetter -} - -// CoreClient is used to interact with features provided by the Core group. -type CoreClient struct { - *restclient.RESTClient -} - -func (c *CoreClient) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -// NewForConfig creates a new CoreClient for the given config. -func NewForConfig(c *restclient.Config) (*CoreClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreClient{client}, nil -} - -// NewForConfigOrDie creates a new CoreClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *CoreClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreClient for the given RESTClient. -func New(c *restclient.RESTClient) *CoreClient { - return &CoreClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if core group is not registered, return an error - g, err := registered.Group("") - if err != nil { - return err - } - config.APIPath = "/api" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go deleted file mode 100644 index 30cff08b9..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go deleted file mode 100644 index 65df6665a..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type ServiceExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go deleted file mode 100644 index 006f601c2..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/core/unversioned/service.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*api.Service) (*api.Service, error) - Update(*api.Service) (*api.Service, error) - UpdateStatus(*api.Service) (*api.Service, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Service, error) - List(opts api.ListOptions) (*api.ServiceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client *CoreClient - ns string -} - -// newServices returns a Services -func newServices(c *CoreClient, namespace string) *services { - return &services{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { - result = &api.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go deleted file mode 100644 index e27095145..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/cluster.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - federation "k8s.io/kubernetes/federation/apis/federation" - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ClustersGetter has a method to return a ClusterInterface. -// A group's client should implement this interface. -type ClustersGetter interface { - Clusters() ClusterInterface -} - -// ClusterInterface has methods to work with Cluster resources. -type ClusterInterface interface { - Create(*federation.Cluster) (*federation.Cluster, error) - Update(*federation.Cluster) (*federation.Cluster, error) - UpdateStatus(*federation.Cluster) (*federation.Cluster, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*federation.Cluster, error) - List(opts api.ListOptions) (*federation.ClusterList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ClusterExpansion -} - -// clusters implements ClusterInterface -type clusters struct { - client *FederationClient -} - -// newClusters returns a Clusters -func newClusters(c *FederationClient) *clusters { - return &clusters{ - client: c, - } -} - -// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Create(cluster *federation.Cluster) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Post(). - Resource("clusters"). - Body(cluster). - Do(). - Into(result) - return -} - -// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. -func (c *clusters) Update(cluster *federation.Cluster) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Put(). - Resource("clusters"). - Name(cluster.Name). - Body(cluster). - Do(). - Into(result) - return -} - -func (c *clusters) UpdateStatus(cluster *federation.Cluster) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Put(). - Resource("clusters"). - Name(cluster.Name). - SubResource("status"). - Body(cluster). - Do(). - Into(result) - return -} - -// Delete takes name of the cluster and deletes it. Returns an error if one occurs. -func (c *clusters) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("clusters"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusters) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("clusters"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. -func (c *clusters) Get(name string) (result *federation.Cluster, err error) { - result = &federation.Cluster{} - err = c.client.Get(). - Resource("clusters"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Clusters that match those selectors. -func (c *clusters) List(opts api.ListOptions) (result *federation.ClusterList, err error) { - result = &federation.ClusterList{} - err = c.client.Get(). - Resource("clusters"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusters. -func (c *clusters) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusters"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go deleted file mode 100644 index 30cff08b9..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service] --input=[../../federation/apis/federation/,api/] - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go deleted file mode 100644 index be2a8a153..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/federation_client.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type FederationInterface interface { - GetRESTClient() *restclient.RESTClient - ClustersGetter -} - -// FederationClient is used to interact with features provided by the Federation group. -type FederationClient struct { - *restclient.RESTClient -} - -func (c *FederationClient) Clusters() ClusterInterface { - return newClusters(c) -} - -// NewForConfig creates a new FederationClient for the given config. -func NewForConfig(c *restclient.Config) (*FederationClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &FederationClient{client}, nil -} - -// NewForConfigOrDie creates a new FederationClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *FederationClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new FederationClient for the given RESTClient. -func New(c *restclient.RESTClient) *FederationClient { - return &FederationClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if federation group is not registered, return an error - g, err := registered.Group("federation") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FederationClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go deleted file mode 100644 index 8888bf9bd..000000000 --- a/vendor/k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset/typed/federation/unversioned/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type ClusterExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/api/OWNERS b/vendor/k8s.io/kubernetes/pkg/api/OWNERS deleted file mode 100644 index d28472e0f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -assignees: - - bgrant0607 - - erictune - - lavalamp - - smarterclayton - - thockin diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go deleted file mode 100644 index dbdf11cba..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotations/annotations.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package annotations - -const kubectlPrefix = "kubectl.kubernetes.io/" - -// LastAppliedConfigAnnotation is the annotation used to store the previous -// configuration of a resource for use in a three way diff by UpdateApplyAnnotation. -const LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go b/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go deleted file mode 100644 index 6a422ea4f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotations/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package annotations defines annotation keys that shared between server and client -package annotations diff --git a/vendor/k8s.io/kubernetes/pkg/api/context.go b/vendor/k8s.io/kubernetes/pkg/api/context.go deleted file mode 100644 index 7e8639571..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/context.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - stderrs "errors" - "time" - - "golang.org/x/net/context" - "k8s.io/kubernetes/pkg/auth/user" -) - -// Context carries values across API boundaries. -// This context matches the context.Context interface -// (https://blog.golang.org/context), for the purposes -// of passing the api.Context through to the storage tier. -// TODO: Determine the extent that this abstraction+interface -// is used by the api, and whether we can remove. -type Context interface { - // Value returns the value associated with key or nil if none. - Value(key interface{}) interface{} - - // Deadline returns the time when this Context will be canceled, if any. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that is closed when this Context is canceled - // or times out. - Done() <-chan struct{} - - // Err indicates why this context was canceled, after the Done channel - // is closed. - Err() error -} - -// The key type is unexported to prevent collisions -type key int - -// namespaceKey is the context key for the request namespace. -const namespaceKey key = 0 - -// userKey is the context key for the request user. -const userKey key = 1 - -// NewContext instantiates a base context object for request flows. -func NewContext() Context { - return context.TODO() -} - -// NewDefaultContext instantiates a base context object for request flows in the default namespace -func NewDefaultContext() Context { - return WithNamespace(NewContext(), NamespaceDefault) -} - -// WithValue returns a copy of parent in which the value associated with key is val. -func WithValue(parent Context, key interface{}, val interface{}) Context { - internalCtx, ok := parent.(context.Context) - if !ok { - panic(stderrs.New("Invalid context type")) - } - return context.WithValue(internalCtx, key, val) -} - -// WithNamespace returns a copy of parent in which the namespace value is set -func WithNamespace(parent Context, namespace string) Context { - return WithValue(parent, namespaceKey, namespace) -} - -// NamespaceFrom returns the value of the namespace key on the ctx -func NamespaceFrom(ctx Context) (string, bool) { - namespace, ok := ctx.Value(namespaceKey).(string) - return namespace, ok -} - -// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none -func NamespaceValue(ctx Context) string { - namespace, _ := NamespaceFrom(ctx) - return namespace -} - -// ValidNamespace returns false if the namespace on the context differs from the resource. If the resource has no namespace, it is set to the value in the context. -func ValidNamespace(ctx Context, resource *ObjectMeta) bool { - ns, ok := NamespaceFrom(ctx) - if len(resource.Namespace) == 0 { - resource.Namespace = ns - } - return ns == resource.Namespace && ok -} - -// WithNamespaceDefaultIfNone returns a context whose namespace is the default if and only if the parent context has no namespace value -func WithNamespaceDefaultIfNone(parent Context) Context { - namespace, ok := NamespaceFrom(parent) - if !ok || len(namespace) == 0 { - return WithNamespace(parent, NamespaceDefault) - } - return parent -} - -// WithUser returns a copy of parent in which the user value is set -func WithUser(parent Context, user user.Info) Context { - return WithValue(parent, userKey, user) -} - -// UserFrom returns the value of the user key on the ctx -func UserFrom(ctx Context) (user.Info, bool) { - user, ok := ctx.Value(userKey).(user.Info) - return user, ok -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/conversion.go b/vendor/k8s.io/kubernetes/pkg/api/conversion.go deleted file mode 100644 index 7ae1e0184..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/conversion.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - Scheme.AddDefaultingFuncs( - func(obj *ListOptions) { - if obj.LabelSelector == nil { - obj.LabelSelector = labels.Everything() - } - if obj.FieldSelector == nil { - obj.FieldSelector = fields.Everything() - } - }, - ) - Scheme.AddConversionFuncs( - Convert_unversioned_TypeMeta_To_unversioned_TypeMeta, - Convert_unversioned_ListMeta_To_unversioned_ListMeta, - Convert_intstr_IntOrString_To_intstr_IntOrString, - Convert_unversioned_Time_To_unversioned_Time, - Convert_Slice_string_To_unversioned_Time, - Convert_string_To_labels_Selector, - Convert_string_To_fields_Selector, - Convert_Pointer_bool_To_bool, - Convert_bool_To_Pointer_bool, - Convert_Pointer_string_To_string, - Convert_string_To_Pointer_string, - Convert_labels_Selector_To_string, - Convert_fields_Selector_To_string, - Convert_resource_Quantity_To_resource_Quantity, - ) -} - -func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { - if *in == nil { - *out = "" - return nil - } - *out = **in - return nil -} - -func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { - if in == nil { - stringVar := "" - *out = &stringVar - return nil - } - *out = in - return nil -} - -func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { - if *in == nil { - *out = false - return nil - } - *out = **in - return nil -} - -func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { - if in == nil { - boolVar := false - *out = &boolVar - return nil - } - *out = in - return nil -} - -func Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(in, out *unversioned.TypeMeta, s conversion.Scope) error { - // These values are explicitly not copied - //out.APIVersion = in.APIVersion - //out.Kind = in.Kind - return nil -} - -func Convert_unversioned_ListMeta_To_unversioned_ListMeta(in, out *unversioned.ListMeta, s conversion.Scope) error { - out.ResourceVersion = in.ResourceVersion - out.SelfLink = in.SelfLink - return nil -} - -func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { - out.Type = in.Type - out.IntVal = in.IntVal - out.StrVal = in.StrVal - return nil -} - -func Convert_unversioned_Time_To_unversioned_Time(in *unversioned.Time, out *unversioned.Time, s conversion.Scope) error { - // Cannot deep copy these, because time.Time has unexported fields. - *out = *in - return nil -} - -// Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value -func Convert_Slice_string_To_unversioned_Time(input *[]string, out *unversioned.Time, s conversion.Scope) error { - str := "" - if len(*input) > 0 { - str = (*input)[0] - } - return out.UnmarshalQueryParameter(str) -} - -func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { - selector, err := labels.Parse(*in) - if err != nil { - return err - } - *out = selector - return nil -} -func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { - selector, err := fields.ParseSelector(*in) - if err != nil { - return err - } - *out = selector - return nil -} -func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { - if *in == nil { - return nil - } - *out = (*in).String() - return nil -} -func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { - if *in == nil { - return nil - } - *out = (*in).String() - return nil -} -func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { - *out = *in - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go deleted file mode 100644 index f3181b8a7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/deep_copy_generated.go +++ /dev/null @@ -1,3300 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package api - -import ( - resource "k8s.io/kubernetes/pkg/api/resource" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - fields "k8s.io/kubernetes/pkg/fields" - labels "k8s.io/kubernetes/pkg/labels" - runtime "k8s.io/kubernetes/pkg/runtime" - types "k8s.io/kubernetes/pkg/types" - intstr "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - if err := Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_api_AWSElasticBlockStoreVolumeSource, - DeepCopy_api_Affinity, - DeepCopy_api_AttachedVolume, - DeepCopy_api_AzureFileVolumeSource, - DeepCopy_api_Binding, - DeepCopy_api_Capabilities, - DeepCopy_api_CephFSVolumeSource, - DeepCopy_api_CinderVolumeSource, - DeepCopy_api_ComponentCondition, - DeepCopy_api_ComponentStatus, - DeepCopy_api_ComponentStatusList, - DeepCopy_api_ConfigMap, - DeepCopy_api_ConfigMapKeySelector, - DeepCopy_api_ConfigMapList, - DeepCopy_api_ConfigMapVolumeSource, - DeepCopy_api_Container, - DeepCopy_api_ContainerImage, - DeepCopy_api_ContainerPort, - DeepCopy_api_ContainerState, - DeepCopy_api_ContainerStateRunning, - DeepCopy_api_ContainerStateTerminated, - DeepCopy_api_ContainerStateWaiting, - DeepCopy_api_ContainerStatus, - DeepCopy_api_ConversionError, - DeepCopy_api_DaemonEndpoint, - DeepCopy_api_DeleteOptions, - DeepCopy_api_DownwardAPIVolumeFile, - DeepCopy_api_DownwardAPIVolumeSource, - DeepCopy_api_EmptyDirVolumeSource, - DeepCopy_api_EndpointAddress, - DeepCopy_api_EndpointPort, - DeepCopy_api_EndpointSubset, - DeepCopy_api_Endpoints, - DeepCopy_api_EndpointsList, - DeepCopy_api_EnvVar, - DeepCopy_api_EnvVarSource, - DeepCopy_api_Event, - DeepCopy_api_EventList, - DeepCopy_api_EventSource, - DeepCopy_api_ExecAction, - DeepCopy_api_ExportOptions, - DeepCopy_api_FCVolumeSource, - DeepCopy_api_FlexVolumeSource, - DeepCopy_api_FlockerVolumeSource, - DeepCopy_api_GCEPersistentDiskVolumeSource, - DeepCopy_api_GitRepoVolumeSource, - DeepCopy_api_GlusterfsVolumeSource, - DeepCopy_api_HTTPGetAction, - DeepCopy_api_HTTPHeader, - DeepCopy_api_Handler, - DeepCopy_api_HostPathVolumeSource, - DeepCopy_api_ISCSIVolumeSource, - DeepCopy_api_KeyToPath, - DeepCopy_api_Lifecycle, - DeepCopy_api_LimitRange, - DeepCopy_api_LimitRangeItem, - DeepCopy_api_LimitRangeList, - DeepCopy_api_LimitRangeSpec, - DeepCopy_api_List, - DeepCopy_api_ListOptions, - DeepCopy_api_LoadBalancerIngress, - DeepCopy_api_LoadBalancerStatus, - DeepCopy_api_LocalObjectReference, - DeepCopy_api_NFSVolumeSource, - DeepCopy_api_Namespace, - DeepCopy_api_NamespaceList, - DeepCopy_api_NamespaceSpec, - DeepCopy_api_NamespaceStatus, - DeepCopy_api_Node, - DeepCopy_api_NodeAddress, - DeepCopy_api_NodeAffinity, - DeepCopy_api_NodeCondition, - DeepCopy_api_NodeDaemonEndpoints, - DeepCopy_api_NodeList, - DeepCopy_api_NodeProxyOptions, - DeepCopy_api_NodeResources, - DeepCopy_api_NodeSelector, - DeepCopy_api_NodeSelectorRequirement, - DeepCopy_api_NodeSelectorTerm, - DeepCopy_api_NodeSpec, - DeepCopy_api_NodeStatus, - DeepCopy_api_NodeSystemInfo, - DeepCopy_api_ObjectFieldSelector, - DeepCopy_api_ObjectMeta, - DeepCopy_api_ObjectReference, - DeepCopy_api_OwnerReference, - DeepCopy_api_PersistentVolume, - DeepCopy_api_PersistentVolumeClaim, - DeepCopy_api_PersistentVolumeClaimList, - DeepCopy_api_PersistentVolumeClaimSpec, - DeepCopy_api_PersistentVolumeClaimStatus, - DeepCopy_api_PersistentVolumeClaimVolumeSource, - DeepCopy_api_PersistentVolumeList, - DeepCopy_api_PersistentVolumeSource, - DeepCopy_api_PersistentVolumeSpec, - DeepCopy_api_PersistentVolumeStatus, - DeepCopy_api_Pod, - DeepCopy_api_PodAffinity, - DeepCopy_api_PodAffinityTerm, - DeepCopy_api_PodAntiAffinity, - DeepCopy_api_PodAttachOptions, - DeepCopy_api_PodCondition, - DeepCopy_api_PodExecOptions, - DeepCopy_api_PodList, - DeepCopy_api_PodLogOptions, - DeepCopy_api_PodProxyOptions, - DeepCopy_api_PodSecurityContext, - DeepCopy_api_PodSpec, - DeepCopy_api_PodStatus, - DeepCopy_api_PodStatusResult, - DeepCopy_api_PodTemplate, - DeepCopy_api_PodTemplateList, - DeepCopy_api_PodTemplateSpec, - DeepCopy_api_Preconditions, - DeepCopy_api_PreferredSchedulingTerm, - DeepCopy_api_Probe, - DeepCopy_api_RBDVolumeSource, - DeepCopy_api_RangeAllocation, - DeepCopy_api_ReplicationController, - DeepCopy_api_ReplicationControllerList, - DeepCopy_api_ReplicationControllerSpec, - DeepCopy_api_ReplicationControllerStatus, - DeepCopy_api_ResourceFieldSelector, - DeepCopy_api_ResourceQuota, - DeepCopy_api_ResourceQuotaList, - DeepCopy_api_ResourceQuotaSpec, - DeepCopy_api_ResourceQuotaStatus, - DeepCopy_api_ResourceRequirements, - DeepCopy_api_SELinuxOptions, - DeepCopy_api_Secret, - DeepCopy_api_SecretKeySelector, - DeepCopy_api_SecretList, - DeepCopy_api_SecretVolumeSource, - DeepCopy_api_SecurityContext, - DeepCopy_api_SerializedReference, - DeepCopy_api_Service, - DeepCopy_api_ServiceAccount, - DeepCopy_api_ServiceAccountList, - DeepCopy_api_ServiceList, - DeepCopy_api_ServicePort, - DeepCopy_api_ServiceProxyOptions, - DeepCopy_api_ServiceSpec, - DeepCopy_api_ServiceStatus, - DeepCopy_api_TCPSocketAction, - DeepCopy_api_Taint, - DeepCopy_api_Toleration, - DeepCopy_api_Volume, - DeepCopy_api_VolumeMount, - DeepCopy_api_VolumeSource, - DeepCopy_api_VsphereVirtualDiskVolumeSource, - DeepCopy_api_WeightedPodAffinityTerm, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) error { - if in.NodeAffinity != nil { - in, out := in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - if err := DeepCopy_api_NodeAffinity(*in, *out, c); err != nil { - return err - } - } else { - out.NodeAffinity = nil - } - if in.PodAffinity != nil { - in, out := in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - if err := DeepCopy_api_PodAffinity(*in, *out, c); err != nil { - return err - } - } else { - out.PodAffinity = nil - } - if in.PodAntiAffinity != nil { - in, out := in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - if err := DeepCopy_api_PodAntiAffinity(*in, *out, c); err != nil { - return err - } - } else { - out.PodAntiAffinity = nil - } - return nil -} - -func DeepCopy_api_AttachedVolume(in AttachedVolume, out *AttachedVolume, c *conversion.Cloner) error { - out.Name = in.Name - out.DevicePath = in.DevicePath - return nil -} - -func DeepCopy_api_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_Binding(in Binding, out *Binding, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectReference(in.Target, &out.Target, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_Capabilities(in Capabilities, out *Capabilities, c *conversion.Cloner) error { - if in.Add != nil { - in, out := in.Add, &out.Add - *out = make([]Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Add = nil - } - if in.Drop != nil { - in, out := in.Drop, &out.Drop - *out = make([]Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Drop = nil - } - return nil -} - -func DeepCopy_api_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error { - if in.Monitors != nil { - in, out := in.Monitors, &out.Monitors - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - out.Message = in.Message - out.Error = in.Error - return nil -} - -func DeepCopy_api_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(in)) - for i := range in { - if err := DeepCopy_api_ComponentCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - return nil -} - -func DeepCopy_api_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ComponentStatus, len(in)) - for i := range in { - if err := DeepCopy_api_ComponentStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Data = nil - } - return nil -} - -func DeepCopy_api_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKeySelector, c *conversion.Cloner) error { - if err := DeepCopy_api_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func DeepCopy_api_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ConfigMap, len(in)) - for i := range in { - if err := DeepCopy_api_ConfigMap(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapVolumeSource, c *conversion.Cloner) error { - if err := DeepCopy_api_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]KeyToPath, len(in)) - for i := range in { - if err := DeepCopy_api_KeyToPath(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_Container(in Container, out *Container, c *conversion.Cloner) error { - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - in, out := in.Command, &out.Command - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Command = nil - } - if in.Args != nil { - in, out := in.Args, &out.Args - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]ContainerPort, len(in)) - for i := range in { - if err := DeepCopy_api_ContainerPort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - in, out := in.Env, &out.Env - *out = make([]EnvVar, len(in)) - for i := range in { - if err := DeepCopy_api_EnvVar(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := DeepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { - return err - } - if in.VolumeMounts != nil { - in, out := in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(in)) - for i := range in { - if err := DeepCopy_api_VolumeMount(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - if in.LivenessProbe != nil { - in, out := in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - if err := DeepCopy_api_Probe(*in, *out, c); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - if in.ReadinessProbe != nil { - in, out := in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - if err := DeepCopy_api_Probe(*in, *out, c); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - if in.Lifecycle != nil { - in, out := in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - if err := DeepCopy_api_Lifecycle(*in, *out, c); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = in.ImagePullPolicy - if in.SecurityContext != nil { - in, out := in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - if err := DeepCopy_api_SecurityContext(*in, *out, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func DeepCopy_api_ContainerImage(in ContainerImage, out *ContainerImage, c *conversion.Cloner) error { - if in.Names != nil { - in, out := in.Names, &out.Names - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Names = nil - } - out.SizeBytes = in.SizeBytes - return nil -} - -func DeepCopy_api_ContainerPort(in ContainerPort, out *ContainerPort, c *conversion.Cloner) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = in.Protocol - out.HostIP = in.HostIP - return nil -} - -func DeepCopy_api_ContainerState(in ContainerState, out *ContainerState, c *conversion.Cloner) error { - if in.Waiting != nil { - in, out := in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - if err := DeepCopy_api_ContainerStateWaiting(*in, *out, c); err != nil { - return err - } - } else { - out.Waiting = nil - } - if in.Running != nil { - in, out := in.Running, &out.Running - *out = new(ContainerStateRunning) - if err := DeepCopy_api_ContainerStateRunning(*in, *out, c); err != nil { - return err - } - } else { - out.Running = nil - } - if in.Terminated != nil { - in, out := in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - if err := DeepCopy_api_ContainerStateTerminated(*in, *out, c); err != nil { - return err - } - } else { - out.Terminated = nil - } - return nil -} - -func DeepCopy_api_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_ContainerStateTerminated(in ContainerStateTerminated, out *ContainerStateTerminated, c *conversion.Cloner) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.FinishedAt, &out.FinishedAt, c); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - -func DeepCopy_api_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_api_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *conversion.Cloner) error { - out.Name = in.Name - if err := DeepCopy_api_ContainerState(in.State, &out.State, c); err != nil { - return err - } - if err := DeepCopy_api_ContainerState(in.LastTerminationState, &out.LastTerminationState, c); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func DeepCopy_api_ConversionError(in ConversionError, out *ConversionError, c *conversion.Cloner) error { - if in.In == nil { - out.In = nil - } else if newVal, err := c.DeepCopy(in.In); err != nil { - return err - } else { - out.In = newVal.(interface{}) - } - if in.Out == nil { - out.Out = nil - } else if newVal, err := c.DeepCopy(in.Out); err != nil { - return err - } else { - out.Out = newVal.(interface{}) - } - out.Message = in.Message - return nil -} - -func DeepCopy_api_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conversion.Cloner) error { - out.Port = in.Port - return nil -} - -func DeepCopy_api_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if in.GracePeriodSeconds != nil { - in, out := in.GracePeriodSeconds, &out.GracePeriodSeconds - *out = new(int64) - **out = *in - } else { - out.GracePeriodSeconds = nil - } - if in.Preconditions != nil { - in, out := in.Preconditions, &out.Preconditions - *out = new(Preconditions) - if err := DeepCopy_api_Preconditions(*in, *out, c); err != nil { - return err - } - } else { - out.Preconditions = nil - } - if in.OrphanDependents != nil { - in, out := in.OrphanDependents, &out.OrphanDependents - *out = new(bool) - **out = *in - } else { - out.OrphanDependents = nil - } - return nil -} - -func DeepCopy_api_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error { - out.Path = in.Path - if in.FieldRef != nil { - in, out := in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - if err := DeepCopy_api_ObjectFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - return nil -} - -func DeepCopy_api_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, c *conversion.Cloner) error { - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(in)) - for i := range in { - if err := DeepCopy_api_DownwardAPIVolumeFile(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVolumeSource, c *conversion.Cloner) error { - out.Medium = in.Medium - return nil -} - -func DeepCopy_api_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error { - out.IP = in.IP - out.Hostname = in.Hostname - if in.TargetRef != nil { - in, out := in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - if err := DeepCopy_api_ObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.TargetRef = nil - } - return nil -} - -func DeepCopy_api_EndpointPort(in EndpointPort, out *EndpointPort, c *conversion.Cloner) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = in.Protocol - return nil -} - -func DeepCopy_api_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conversion.Cloner) error { - if in.Addresses != nil { - in, out := in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(in)) - for i := range in { - if err := DeepCopy_api_EndpointAddress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if in.NotReadyAddresses != nil { - in, out := in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(in)) - for i := range in { - if err := DeepCopy_api_EndpointAddress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.NotReadyAddresses = nil - } - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]EndpointPort, len(in)) - for i := range in { - if err := DeepCopy_api_EndpointPort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - return nil -} - -func DeepCopy_api_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Subsets != nil { - in, out := in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(in)) - for i := range in { - if err := DeepCopy_api_EndpointSubset(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Subsets = nil - } - return nil -} - -func DeepCopy_api_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Endpoints, len(in)) - for i := range in { - if err := DeepCopy_api_Endpoints(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - if in.ValueFrom != nil { - in, out := in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - if err := DeepCopy_api_EnvVarSource(*in, *out, c); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func DeepCopy_api_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion.Cloner) error { - if in.FieldRef != nil { - in, out := in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - if err := DeepCopy_api_ObjectFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_api_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - if in.ConfigMapKeyRef != nil { - in, out := in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - if err := DeepCopy_api_ConfigMapKeySelector(*in, *out, c); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - if in.SecretKeyRef != nil { - in, out := in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - if err := DeepCopy_api_SecretKeySelector(*in, *out, c); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func DeepCopy_api_Event(in Event, out *Event, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectReference(in.InvolvedObject, &out.InvolvedObject, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := DeepCopy_api_EventSource(in.Source, &out.Source, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.FirstTimestamp, &out.FirstTimestamp, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTimestamp, &out.LastTimestamp, c); err != nil { - return err - } - out.Count = in.Count - out.Type = in.Type - return nil -} - -func DeepCopy_api_EventList(in EventList, out *EventList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Event, len(in)) - for i := range in { - if err := DeepCopy_api_Event(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_EventSource(in EventSource, out *EventSource, c *conversion.Cloner) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -func DeepCopy_api_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner) error { - if in.Command != nil { - in, out := in.Command, &out.Command - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Command = nil - } - return nil -} - -func DeepCopy_api_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func DeepCopy_api_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error { - if in.TargetWWNs != nil { - in, out := in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - in, out := in.Lun, &out.Lun - *out = new(int32) - **out = *in - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c *conversion.Cloner) error { - out.Driver = in.Driver - out.FSType = in.FSType - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - in, out := in.Options, &out.Options - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func DeepCopy_api_FlockerVolumeSource(in FlockerVolumeSource, out *FlockerVolumeSource, c *conversion.Cloner) error { - out.DatasetName = in.DatasetName - return nil -} - -func DeepCopy_api_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_GitRepoVolumeSource(in GitRepoVolumeSource, out *GitRepoVolumeSource, c *conversion.Cloner) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func DeepCopy_api_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *GlusterfsVolumeSource, c *conversion.Cloner) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error { - out.Path = in.Path - if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - out.Host = in.Host - out.Scheme = in.Scheme - if in.HTTPHeaders != nil { - in, out := in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(in)) - for i := range in { - if err := DeepCopy_api_HTTPHeader(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func DeepCopy_api_HTTPHeader(in HTTPHeader, out *HTTPHeader, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func DeepCopy_api_Handler(in Handler, out *Handler, c *conversion.Cloner) error { - if in.Exec != nil { - in, out := in.Exec, &out.Exec - *out = new(ExecAction) - if err := DeepCopy_api_ExecAction(*in, *out, c); err != nil { - return err - } - } else { - out.Exec = nil - } - if in.HTTPGet != nil { - in, out := in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - if err := DeepCopy_api_HTTPGetAction(*in, *out, c); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - if in.TCPSocket != nil { - in, out := in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - if err := DeepCopy_api_TCPSocketAction(*in, *out, c); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func DeepCopy_api_HostPathVolumeSource(in HostPathVolumeSource, out *HostPathVolumeSource, c *conversion.Cloner) error { - out.Path = in.Path - return nil -} - -func DeepCopy_api_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, c *conversion.Cloner) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_KeyToPath(in KeyToPath, out *KeyToPath, c *conversion.Cloner) error { - out.Key = in.Key - out.Path = in.Path - return nil -} - -func DeepCopy_api_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) error { - if in.PostStart != nil { - in, out := in.PostStart, &out.PostStart - *out = new(Handler) - if err := DeepCopy_api_Handler(*in, *out, c); err != nil { - return err - } - } else { - out.PostStart = nil - } - if in.PreStop != nil { - in, out := in.PreStop, &out.PreStop - *out = new(Handler) - if err := DeepCopy_api_Handler(*in, *out, c); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func DeepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_LimitRangeSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { - out.Type = in.Type - if in.Max != nil { - in, out := in.Max, &out.Max - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Max = nil - } - if in.Min != nil { - in, out := in.Min, &out.Min - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Min = nil - } - if in.Default != nil { - in, out := in.Default, &out.Default - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Default = nil - } - if in.DefaultRequest != nil { - in, out := in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.DefaultRequest = nil - } - if in.MaxLimitRequestRatio != nil { - in, out := in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.MaxLimitRequestRatio = nil - } - return nil -} - -func DeepCopy_api_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]LimitRange, len(in)) - for i := range in { - if err := DeepCopy_api_LimitRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conversion.Cloner) error { - if in.Limits != nil { - in, out := in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(in)) - for i := range in { - if err := DeepCopy_api_LimitRangeItem(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Limits = nil - } - return nil -} - -func DeepCopy_api_List(in List, out *List, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]runtime.Object, len(in)) - for i := range in { - if newVal, err := c.DeepCopy(in[i]); err != nil { - return err - } else { - (*out)[i] = newVal.(runtime.Object) - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if in.LabelSelector == nil { - out.LabelSelector = nil - } else if newVal, err := c.DeepCopy(in.LabelSelector); err != nil { - return err - } else { - out.LabelSelector = newVal.(labels.Selector) - } - if in.FieldSelector == nil { - out.FieldSelector = nil - } else if newVal, err := c.DeepCopy(in.FieldSelector); err != nil { - return err - } else { - out.FieldSelector = newVal.(fields.Selector) - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - in, out := in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = *in - } else { - out.TimeoutSeconds = nil - } - return nil -} - -func DeepCopy_api_LoadBalancerIngress(in LoadBalancerIngress, out *LoadBalancerIngress, c *conversion.Cloner) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func DeepCopy_api_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStatus, c *conversion.Cloner) error { - if in.Ingress != nil { - in, out := in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(in)) - for i := range in { - if err := DeepCopy_api_LoadBalancerIngress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func DeepCopy_api_LocalObjectReference(in LocalObjectReference, out *LocalObjectReference, c *conversion.Cloner) error { - out.Name = in.Name - return nil -} - -func DeepCopy_api_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *conversion.Cloner) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_NamespaceSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_NamespaceStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Namespace, len(in)) - for i := range in { - if err := DeepCopy_api_Namespace(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversion.Cloner) error { - if in.Finalizers != nil { - in, out := in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Finalizers = nil - } - return nil -} - -func DeepCopy_api_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - return nil -} - -func DeepCopy_api_Node(in Node, out *Node, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_NodeSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_NodeStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cloner) error { - out.Type = in.Type - out.Address = in.Address - return nil -} - -func DeepCopy_api_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion.Cloner) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - if err := DeepCopy_api_NodeSelector(*in, *out, c); err != nil { - return err - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(in)) - for i := range in { - if err := DeepCopy_api_PreferredSchedulingTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func DeepCopy_api_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastHeartbeatTime, &out.LastHeartbeatTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_api_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEndpoints, c *conversion.Cloner) error { - if err := DeepCopy_api_DaemonEndpoint(in.KubeletEndpoint, &out.KubeletEndpoint, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Node, len(in)) - for i := range in { - if err := DeepCopy_api_Node(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func DeepCopy_api_NodeResources(in NodeResources, out *NodeResources, c *conversion.Cloner) error { - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - return nil -} - -func DeepCopy_api_NodeSelector(in NodeSelector, out *NodeSelector, c *conversion.Cloner) error { - if in.NodeSelectorTerms != nil { - in, out := in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(in)) - for i := range in { - if err := DeepCopy_api_NodeSelectorTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.NodeSelectorTerms = nil - } - return nil -} - -func DeepCopy_api_NodeSelectorRequirement(in NodeSelectorRequirement, out *NodeSelectorRequirement, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := in.Values, &out.Values - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Values = nil - } - return nil -} - -func DeepCopy_api_NodeSelectorTerm(in NodeSelectorTerm, out *NodeSelectorTerm, c *conversion.Cloner) error { - if in.MatchExpressions != nil { - in, out := in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(in)) - for i := range in { - if err := DeepCopy_api_NodeSelectorRequirement(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func DeepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - return nil -} - -func DeepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - if in.Allocatable != nil { - in, out := in.Allocatable, &out.Allocatable - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Allocatable = nil - } - out.Phase = in.Phase - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(in)) - for i := range in { - if err := DeepCopy_api_NodeCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Addresses != nil { - in, out := in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(in)) - for i := range in { - if err := DeepCopy_api_NodeAddress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if err := DeepCopy_api_NodeDaemonEndpoints(in.DaemonEndpoints, &out.DaemonEndpoints, c); err != nil { - return err - } - if err := DeepCopy_api_NodeSystemInfo(in.NodeInfo, &out.NodeInfo, c); err != nil { - return err - } - if in.Images != nil { - in, out := in.Images, &out.Images - *out = make([]ContainerImage, len(in)) - for i := range in { - if err := DeepCopy_api_ContainerImage(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Images = nil - } - if in.VolumesInUse != nil { - in, out := in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.VolumesInUse = nil - } - if in.VolumesAttached != nil { - in, out := in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(in)) - for i := range in { - if err := DeepCopy_api_AttachedVolume(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.VolumesAttached = nil - } - return nil -} - -func DeepCopy_api_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conversion.Cloner) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -func DeepCopy_api_ObjectFieldSelector(in ObjectFieldSelector, out *ObjectFieldSelector, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func DeepCopy_api_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := unversioned.DeepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { - return err - } - if in.DeletionTimestamp != nil { - in, out := in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - in, out := in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - *out = new(int64) - **out = *in - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - in, out := in.Labels, &out.Labels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - in, out := in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Annotations = nil - } - if in.OwnerReferences != nil { - in, out := in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(in)) - for i := range in { - if err := DeepCopy_api_OwnerReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.OwnerReferences = nil - } - if in.Finalizers != nil { - in, out := in.Finalizers, &out.Finalizers - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Finalizers = nil - } - return nil -} - -func DeepCopy_api_ObjectReference(in ObjectReference, out *ObjectReference, c *conversion.Cloner) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func DeepCopy_api_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = in.UID - if in.Controller != nil { - in, out := in.Controller, &out.Controller - *out = new(bool) - **out = *in - } else { - out.Controller = nil - } - return nil -} - -func DeepCopy_api_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_PersistentVolumeSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_PersistentVolumeStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_PersistentVolumeClaimSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_PersistentVolumeClaimStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(in)) - for i := range in { - if err := DeepCopy_api_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_PersistentVolumeClaimSpec(in PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, c *conversion.Cloner) error { - if in.AccessModes != nil { - in, out := in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AccessModes = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := DeepCopy_api_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { - return err - } - out.VolumeName = in.VolumeName - return nil -} - -func DeepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - if in.AccessModes != nil { - in, out := in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AccessModes = nil - } - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - return nil -} - -func DeepCopy_api_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PersistentVolume, len(in)) - for i := range in { - if err := DeepCopy_api_PersistentVolume(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *PersistentVolumeSource, c *conversion.Cloner) error { - if in.GCEPersistentDisk != nil { - in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - if err := DeepCopy_api_GCEPersistentDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - if err := DeepCopy_api_AWSElasticBlockStoreVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.HostPath != nil { - in, out := in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - if err := DeepCopy_api_HostPathVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.Glusterfs != nil { - in, out := in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - if err := DeepCopy_api_GlusterfsVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.NFS != nil { - in, out := in.NFS, &out.NFS - *out = new(NFSVolumeSource) - if err := DeepCopy_api_NFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.RBD != nil { - in, out := in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.ISCSI != nil { - in, out := in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.FlexVolume != nil { - in, out := in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - in, out := in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - if err := DeepCopy_api_CinderVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.FC != nil { - in, out := in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FC = nil - } - if in.Flocker != nil { - in, out := in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - if err := DeepCopy_api_FlockerVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.AzureFile != nil { - in, out := in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - if err := DeepCopy_api_AzureFileVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.VsphereVolume != nil { - in, out := in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func DeepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - if err := DeepCopy_api_PersistentVolumeSource(in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { - return err - } - if in.AccessModes != nil { - in, out := in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AccessModes = nil - } - if in.ClaimRef != nil { - in, out := in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - if err := DeepCopy_api_ObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.ClaimRef = nil - } - out.PersistentVolumeReclaimPolicy = in.PersistentVolumeReclaimPolicy - return nil -} - -func DeepCopy_api_PersistentVolumeStatus(in PersistentVolumeStatus, out *PersistentVolumeStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func DeepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_PodSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_PodStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func DeepCopy_api_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error { - if in.LabelSelector != nil { - in, out := in.LabelSelector, &out.LabelSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.LabelSelector = nil - } - if in.Namespaces != nil { - in, out := in.Namespaces, &out.Namespaces - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Namespaces = nil - } - out.TopologyKey = in.TopologyKey - return nil -} - -func DeepCopy_api_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_api_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_api_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func DeepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func DeepCopy_api_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_api_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - if in.Command != nil { - in, out := in.Command, &out.Command - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Command = nil - } - return nil -} - -func DeepCopy_api_PodList(in PodList, out *PodList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Pod, len(in)) - for i := range in { - if err := DeepCopy_api_Pod(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - if in.SinceSeconds != nil { - in, out := in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = *in - } else { - out.SinceSeconds = nil - } - if in.SinceTime != nil { - in, out := in.SinceTime, &out.SinceTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.SinceTime = nil - } - out.Timestamps = in.Timestamps - if in.TailLines != nil { - in, out := in.TailLines, &out.TailLines - *out = new(int64) - **out = *in - } else { - out.TailLines = nil - } - if in.LimitBytes != nil { - in, out := in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = *in - } else { - out.LimitBytes = nil - } - return nil -} - -func DeepCopy_api_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func DeepCopy_api_PodSecurityContext(in PodSecurityContext, out *PodSecurityContext, c *conversion.Cloner) error { - out.HostNetwork = in.HostNetwork - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if in.SELinuxOptions != nil { - in, out := in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - if err := DeepCopy_api_SELinuxOptions(*in, *out, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - in, out := in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = *in - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - in, out := in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = *in - } else { - out.RunAsNonRoot = nil - } - if in.SupplementalGroups != nil { - in, out := in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(in)) - copy(*out, in) - } else { - out.SupplementalGroups = nil - } - if in.FSGroup != nil { - in, out := in.FSGroup, &out.FSGroup - *out = new(int64) - **out = *in - } else { - out.FSGroup = nil - } - return nil -} - -func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { - if in.Volumes != nil { - in, out := in.Volumes, &out.Volumes - *out = make([]Volume, len(in)) - for i := range in { - if err := DeepCopy_api_Volume(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := in.InitContainers, &out.InitContainers - *out = make([]Container, len(in)) - for i := range in { - if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := in.Containers, &out.Containers - *out = make([]Container, len(in)) - for i := range in { - if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = in.RestartPolicy - if in.TerminationGracePeriodSeconds != nil { - in, out := in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = *in - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = in.DNSPolicy - if in.NodeSelector != nil { - in, out := in.NodeSelector, &out.NodeSelector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName - if in.SecurityContext != nil { - in, out := in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - if err := DeepCopy_api_PodSecurityContext(*in, *out, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - in, out := in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(in)) - for i := range in { - if err := DeepCopy_api_LocalObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - return nil -} - -func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]PodCondition, len(in)) - for i := range in { - if err := DeepCopy_api_PodCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - if in.StartTime != nil { - in, out := in.StartTime, &out.StartTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.InitContainerStatuses != nil { - in, out := in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(in)) - for i := range in { - if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.InitContainerStatuses = nil - } - if in.ContainerStatuses != nil { - in, out := in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(in)) - for i := range in { - if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ContainerStatuses = nil - } - return nil -} - -func DeepCopy_api_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_PodStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PodTemplate, len(in)) - for i := range in { - if err := DeepCopy_api_PodTemplate(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *conversion.Cloner) error { - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_PodSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_Preconditions(in Preconditions, out *Preconditions, c *conversion.Cloner) error { - if in.UID != nil { - in, out := in.UID, &out.UID - *out = new(types.UID) - **out = *in - } else { - out.UID = nil - } - return nil -} - -func DeepCopy_api_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error { - out.Weight = in.Weight - if err := DeepCopy_api_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_Probe(in Probe, out *Probe, c *conversion.Cloner) error { - if err := DeepCopy_api_Handler(in.Handler, &out.Handler, c); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func DeepCopy_api_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *conversion.Cloner) error { - if in.CephMonitors != nil { - in, out := in.CephMonitors, &out.CephMonitors - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := DeepCopy_api_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_api_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - out.Range = in.Range - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make([]byte, len(in)) - copy(*out, in) - } else { - out.Data = nil - } - return nil -} - -func DeepCopy_api_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ReplicationControllerSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_ReplicationControllerStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ReplicationController, len(in)) - for i := range in { - if err := DeepCopy_api_ReplicationController(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ReplicationControllerSpec(in ReplicationControllerSpec, out *ReplicationControllerSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Selector = nil - } - if in.Template != nil { - in, out := in.Template, &out.Template - *out = new(PodTemplateSpec) - if err := DeepCopy_api_PodTemplateSpec(*in, *out, c); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func DeepCopy_api_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func DeepCopy_api_ResourceFieldSelector(in ResourceFieldSelector, out *ResourceFieldSelector, c *conversion.Cloner) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - if err := resource.DeepCopy_resource_Quantity(in.Divisor, &out.Divisor, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ResourceQuotaSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_ResourceQuotaStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ResourceQuota, len(in)) - for i := range in { - if err := DeepCopy_api_ResourceQuota(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { - if in.Hard != nil { - in, out := in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Hard = nil - } - if in.Scopes != nil { - in, out := in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Scopes = nil - } - return nil -} - -func DeepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { - if in.Hard != nil { - in, out := in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Hard = nil - } - if in.Used != nil { - in, out := in.Used, &out.Used - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Used = nil - } - return nil -} - -func DeepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { - if in.Limits != nil { - in, out := in.Limits, &out.Limits - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - in, out := in.Requests, &out.Requests - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Requests = nil - } - return nil -} - -func DeepCopy_api_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conversion.Cloner) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func DeepCopy_api_Secret(in Secret, out *Secret, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make(map[string][]byte) - for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { - return err - } else { - (*out)[key] = newVal.([]byte) - } - } - } else { - out.Data = nil - } - out.Type = in.Type - return nil -} - -func DeepCopy_api_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector, c *conversion.Cloner) error { - if err := DeepCopy_api_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func DeepCopy_api_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Secret, len(in)) - for i := range in { - if err := DeepCopy_api_Secret(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]KeyToPath, len(in)) - for i := range in { - if err := DeepCopy_api_KeyToPath(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_SecurityContext(in SecurityContext, out *SecurityContext, c *conversion.Cloner) error { - if in.Capabilities != nil { - in, out := in.Capabilities, &out.Capabilities - *out = new(Capabilities) - if err := DeepCopy_api_Capabilities(*in, *out, c); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - in, out := in.Privileged, &out.Privileged - *out = new(bool) - **out = *in - } else { - out.Privileged = nil - } - if in.SELinuxOptions != nil { - in, out := in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - if err := DeepCopy_api_SELinuxOptions(*in, *out, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - in, out := in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = *in - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - in, out := in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = *in - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - in, out := in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = *in - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func DeepCopy_api_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectReference(in.Reference, &out.Reference, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_Service(in Service, out *Service, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ServiceSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_api_ServiceStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Secrets != nil { - in, out := in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(in)) - for i := range in { - if err := DeepCopy_api_ObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Secrets = nil - } - if in.ImagePullSecrets != nil { - in, out := in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(in)) - for i := range in { - if err := DeepCopy_api_LocalObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func DeepCopy_api_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ServiceAccount, len(in)) - for i := range in { - if err := DeepCopy_api_ServiceAccount(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Service, len(in)) - for i := range in { - if err := DeepCopy_api_Service(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_api_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cloner) error { - out.Name = in.Name - out.Protocol = in.Protocol - out.Port = in.Port - if err := intstr.DeepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil { - return err - } - out.NodePort = in.NodePort - return nil -} - -func DeepCopy_api_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func DeepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error { - out.Type = in.Type - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]ServicePort, len(in)) - for i := range in { - if err := DeepCopy_api_ServicePort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Selector = nil - } - out.ClusterIP = in.ClusterIP - if in.ExternalIPs != nil { - in, out := in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.ExternalIPs = nil - } - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = in.SessionAffinity - if in.LoadBalancerSourceRanges != nil { - in, out := in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.LoadBalancerSourceRanges = nil - } - return nil -} - -func DeepCopy_api_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *conversion.Cloner) error { - if err := DeepCopy_api_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error { - if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_Taint(in Taint, out *Taint, c *conversion.Cloner) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = in.Effect - return nil -} - -func DeepCopy_api_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - out.Value = in.Value - out.Effect = in.Effect - return nil -} - -func DeepCopy_api_Volume(in Volume, out *Volume, c *conversion.Cloner) error { - out.Name = in.Name - if err := DeepCopy_api_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { - return err - } - return nil -} - -func DeepCopy_api_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cloner) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - return nil -} - -func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.Cloner) error { - if in.HostPath != nil { - in, out := in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - if err := DeepCopy_api_HostPathVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.EmptyDir != nil { - in, out := in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - if err := DeepCopy_api_EmptyDirVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - if in.GCEPersistentDisk != nil { - in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - if err := DeepCopy_api_GCEPersistentDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - if err := DeepCopy_api_AWSElasticBlockStoreVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.GitRepo != nil { - in, out := in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - if err := DeepCopy_api_GitRepoVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.GitRepo = nil - } - if in.Secret != nil { - in, out := in.Secret, &out.Secret - *out = new(SecretVolumeSource) - if err := DeepCopy_api_SecretVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Secret = nil - } - if in.NFS != nil { - in, out := in.NFS, &out.NFS - *out = new(NFSVolumeSource) - if err := DeepCopy_api_NFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.ISCSI != nil { - in, out := in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_api_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Glusterfs != nil { - in, out := in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - if err := DeepCopy_api_GlusterfsVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.PersistentVolumeClaim != nil { - in, out := in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - if err := DeepCopy_api_PersistentVolumeClaimVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - if in.RBD != nil { - in, out := in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_api_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.FlexVolume != nil { - in, out := in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_api_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - in, out := in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - if err := DeepCopy_api_CinderVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_api_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.Flocker != nil { - in, out := in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - if err := DeepCopy_api_FlockerVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.DownwardAPI != nil { - in, out := in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - if err := DeepCopy_api_DownwardAPIVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - if in.FC != nil { - in, out := in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_api_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FC = nil - } - if in.AzureFile != nil { - in, out := in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - if err := DeepCopy_api_AzureFileVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.ConfigMap != nil { - in, out := in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - if err := DeepCopy_api_ConfigMapVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - if in.VsphereVolume != nil { - in, out := in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func DeepCopy_api_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - return nil -} - -func DeepCopy_api_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error { - out.Weight = in.Weight - if err := DeepCopy_api_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/api/doc.go deleted file mode 100644 index 8a54f7acc..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package api contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package api diff --git a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go b/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go deleted file mode 100644 index 501d58f28..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/endpoints/util.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "hash" - "sort" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/types" - hashutil "k8s.io/kubernetes/pkg/util/hash" -) - -const ( - // TODO: to be deleted after v1.3 is released - // Its value is the json representation of map[string(IP)][HostRecord] - // example: '{"10.245.1.6":{"HostName":"my-webserver"}}' - PodHostnamesAnnotation = "endpoints.beta.kubernetes.io/hostnames-map" -) - -// TODO: to be deleted after v1.3 is released -type HostRecord struct { - HostName string -} - -// RepackSubsets takes a slice of EndpointSubset objects, expands it to the full -// representation, and then repacks that into the canonical layout. This -// ensures that code which operates on these objects can rely on the common -// form for things like comparison. The result is a newly allocated slice. -func RepackSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { - // First map each unique port definition to the sets of hosts that - // offer it. - allAddrs := map[addressKey]*api.EndpointAddress{} - portToAddrReadyMap := map[api.EndpointPort]addressSet{} - for i := range subsets { - for _, port := range subsets[i].Ports { - for k := range subsets[i].Addresses { - mapAddressByPort(&subsets[i].Addresses[k], port, true, allAddrs, portToAddrReadyMap) - } - for k := range subsets[i].NotReadyAddresses { - mapAddressByPort(&subsets[i].NotReadyAddresses[k], port, false, allAddrs, portToAddrReadyMap) - } - } - } - - // Next, map the sets of hosts to the sets of ports they offer. - // Go does not allow maps or slices as keys to maps, so we have - // to synthesize an artificial key and do a sort of 2-part - // associative entity. - type keyString string - keyToAddrReadyMap := map[keyString]addressSet{} - addrReadyMapKeyToPorts := map[keyString][]api.EndpointPort{} - for port, addrs := range portToAddrReadyMap { - key := keyString(hashAddresses(addrs)) - keyToAddrReadyMap[key] = addrs - addrReadyMapKeyToPorts[key] = append(addrReadyMapKeyToPorts[key], port) - } - - // Next, build the N-to-M association the API wants. - final := []api.EndpointSubset{} - for key, ports := range addrReadyMapKeyToPorts { - var readyAddrs, notReadyAddrs []api.EndpointAddress - for addr, ready := range keyToAddrReadyMap[key] { - if ready { - readyAddrs = append(readyAddrs, *addr) - } else { - notReadyAddrs = append(notReadyAddrs, *addr) - } - } - final = append(final, api.EndpointSubset{Addresses: readyAddrs, NotReadyAddresses: notReadyAddrs, Ports: ports}) - } - - // Finally, sort it. - return SortSubsets(final) -} - -// The sets of hosts must be de-duped, using IP+UID as the key. -type addressKey struct { - ip string - uid types.UID -} - -// mapAddressByPort adds an address into a map by its ports, registering the address with a unique pointer, and preserving -// any existing ready state. -func mapAddressByPort(addr *api.EndpointAddress, port api.EndpointPort, ready bool, allAddrs map[addressKey]*api.EndpointAddress, portToAddrReadyMap map[api.EndpointPort]addressSet) *api.EndpointAddress { - // use addressKey to distinguish between two endpoints that are identical addresses - // but may have come from different hosts, for attribution. For instance, Mesos - // assigns pods the node IP, but the pods are distinct. - key := addressKey{ip: addr.IP} - if addr.TargetRef != nil { - key.uid = addr.TargetRef.UID - } - - // Accumulate the address. The full EndpointAddress structure is preserved for use when - // we rebuild the subsets so that the final TargetRef has all of the necessary data. - existingAddress := allAddrs[key] - if existingAddress == nil { - // Make a copy so we don't write to the - // input args of this function. - existingAddress = &api.EndpointAddress{} - *existingAddress = *addr - allAddrs[key] = existingAddress - } - - // Remember that this port maps to this address. - if _, found := portToAddrReadyMap[port]; !found { - portToAddrReadyMap[port] = addressSet{} - } - // if we have not yet recorded this port for this address, or if the previous - // state was ready, write the current ready state. not ready always trumps - // ready. - if wasReady, found := portToAddrReadyMap[port][existingAddress]; !found || wasReady { - portToAddrReadyMap[port][existingAddress] = ready - } - return existingAddress -} - -type addressSet map[*api.EndpointAddress]bool - -type addrReady struct { - addr *api.EndpointAddress - ready bool -} - -func hashAddresses(addrs addressSet) string { - // Flatten the list of addresses into a string so it can be used as a - // map key. Unfortunately, DeepHashObject is implemented in terms of - // spew, and spew does not handle non-primitive map keys well. So - // first we collapse it into a slice, sort the slice, then hash that. - slice := make([]addrReady, 0, len(addrs)) - for k, ready := range addrs { - slice = append(slice, addrReady{k, ready}) - } - sort.Sort(addrsReady(slice)) - hasher := md5.New() - hashutil.DeepHashObject(hasher, slice) - return hex.EncodeToString(hasher.Sum(nil)[0:]) -} - -func lessAddrReady(a, b addrReady) bool { - // ready is not significant to hashing since we can't have duplicate addresses - return LessEndpointAddress(a.addr, b.addr) -} - -type addrsReady []addrReady - -func (sl addrsReady) Len() int { return len(sl) } -func (sl addrsReady) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrsReady) Less(i, j int) bool { - return lessAddrReady(sl[i], sl[j]) -} - -func LessEndpointAddress(a, b *api.EndpointAddress) bool { - ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP)) - if ipComparison != 0 { - return ipComparison < 0 - } - if b.TargetRef == nil { - return false - } - if a.TargetRef == nil { - return true - } - return a.TargetRef.UID < b.TargetRef.UID -} - -type addrPtrsByIpAndUID []*api.EndpointAddress - -func (sl addrPtrsByIpAndUID) Len() int { return len(sl) } -func (sl addrPtrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrPtrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(sl[i], sl[j]) -} - -// SortSubsets sorts an array of EndpointSubset objects in place. For ease of -// use it returns the input slice. -func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { - for i := range subsets { - ss := &subsets[i] - sort.Sort(addrsByIpAndUID(ss.Addresses)) - sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses)) - sort.Sort(portsByHash(ss.Ports)) - } - sort.Sort(subsetsByHash(subsets)) - return subsets -} - -func hashObject(hasher hash.Hash, obj interface{}) []byte { - hashutil.DeepHashObject(hasher, obj) - return hasher.Sum(nil) -} - -type subsetsByHash []api.EndpointSubset - -func (sl subsetsByHash) Len() int { return len(sl) } -func (sl subsetsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl subsetsByHash) Less(i, j int) bool { - hasher := md5.New() - h1 := hashObject(hasher, sl[i]) - h2 := hashObject(hasher, sl[j]) - return bytes.Compare(h1, h2) < 0 -} - -type addrsByIpAndUID []api.EndpointAddress - -func (sl addrsByIpAndUID) Len() int { return len(sl) } -func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrsByIpAndUID) Less(i, j int) bool { - return LessEndpointAddress(&sl[i], &sl[j]) -} - -type portsByHash []api.EndpointPort - -func (sl portsByHash) Len() int { return len(sl) } -func (sl portsByHash) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl portsByHash) Less(i, j int) bool { - hasher := md5.New() - h1 := hashObject(hasher, sl[i]) - h2 := hashObject(hasher, sl[j]) - return bytes.Compare(h1, h2) < 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go b/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go deleted file mode 100644 index 3a2eb2a0a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/errors/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package errors provides detailed error types for api field validation. -package errors diff --git a/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go b/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go deleted file mode 100644 index 89e83c2e3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/errors/errors.go +++ /dev/null @@ -1,456 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// HTTP Status codes not in the golang http package. -const ( - StatusUnprocessableEntity = 422 - StatusTooManyRequests = 429 - // HTTP recommendations are for servers to define 5xx error codes - // for scenarios not covered by behavior. In this case, ServerTimeout - // is an indication that a transient server error has occurred and the - // client *should* retry, with an optional Retry-After header to specify - // the back off window. - StatusServerTimeout = 504 -) - -// StatusError is an error intended for consumption by a REST API server; it can also be -// reconstructed by clients from a REST response. Public to allow easy type switches. -type StatusError struct { - ErrStatus unversioned.Status -} - -// APIStatus is exposed by errors that can be converted to an api.Status object -// for finer grained details. -type APIStatus interface { - Status() unversioned.Status -} - -var _ error = &StatusError{} - -// Error implements the Error interface. -func (e *StatusError) Error() string { - return e.ErrStatus.Message -} - -// Status allows access to e's status without having to know the detailed workings -// of StatusError. Used by pkg/apiserver. -func (e *StatusError) Status() unversioned.Status { - return e.ErrStatus -} - -// DebugError reports extended info about the error to debug output. -func (e *StatusError) DebugError() (string, []interface{}) { - if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil { - return "server response object: %s", []interface{}{string(out)} - } - return "server response object: %#v", []interface{}{e.ErrStatus} -} - -// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. -type UnexpectedObjectError struct { - Object runtime.Object -} - -// Error returns an error message describing 'u'. -func (u *UnexpectedObjectError) Error() string { - return fmt.Sprintf("unexpected object: %v", u.Object) -} - -// FromObject generates an StatusError from an unversioned.Status, if that is the type of obj; otherwise, -// returns an UnexpecteObjectError. -func FromObject(obj runtime.Object) error { - switch t := obj.(type) { - case *unversioned.Status: - return &StatusError{*t} - } - return &UnexpectedObjectError{obj} -} - -// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. -func NewNotFound(qualifiedResource unversioned.GroupResource, name string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusNotFound, - Reason: unversioned.StatusReasonNotFound, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name), - }} -} - -// NewAlreadyExists returns an error indicating the item requested exists by that identifier. -func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusConflict, - Reason: unversioned.StatusReasonAlreadyExists, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name), - }} -} - -// NewUnauthorized returns an error indicating the client is not authorized to perform the requested -// action. -func NewUnauthorized(reason string) *StatusError { - message := reason - if len(message) == 0 { - message = "not authorized" - } - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusUnauthorized, - Reason: unversioned.StatusReasonUnauthorized, - Message: message, - }} -} - -// NewForbidden returns an error indicating the requested action was forbidden -func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusForbidden, - Reason: unversioned.StatusReasonForbidden, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err), - }} -} - -// NewConflict returns an error indicating the item can't be updated as provided. -func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusConflict, - Reason: unversioned.StatusReasonConflict, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - }, - Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), - }} -} - -// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. -func NewGone(message string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusGone, - Reason: unversioned.StatusReasonGone, - Message: message, - }} -} - -// NewInvalid returns an error indicating the item is invalid and cannot be processed. -func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) *StatusError { - causes := make([]unversioned.StatusCause, 0, len(errs)) - for i := range errs { - err := errs[i] - causes = append(causes, unversioned.StatusCause{ - Type: unversioned.CauseType(err.Type), - Message: err.ErrorBody(), - Field: err.Field, - }) - } - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: StatusUnprocessableEntity, // RFC 4918: StatusUnprocessableEntity - Reason: unversioned.StatusReasonInvalid, - Details: &unversioned.StatusDetails{ - Group: qualifiedKind.Group, - Kind: qualifiedKind.Kind, - Name: name, - Causes: causes, - }, - Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), - }} -} - -// NewBadRequest creates an error that indicates that the request is invalid and can not be processed. -func NewBadRequest(reason string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusBadRequest, - Reason: unversioned.StatusReasonBadRequest, - Message: reason, - }} -} - -// NewServiceUnavailable creates an error that indicates that the requested service is unavailable. -func NewServiceUnavailable(reason string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusServiceUnavailable, - Reason: unversioned.StatusReasonServiceUnavailable, - Message: reason, - }} -} - -// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. -func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusMethodNotAllowed, - Reason: unversioned.StatusReasonMethodNotAllowed, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - }, - Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()), - }} -} - -// NewServerTimeout returns an error indicating the requested action could not be completed due to a -// transient error, and the client should try again. -func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusInternalServerError, - Reason: unversioned.StatusReasonServerTimeout, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: operation, - RetryAfterSeconds: int32(retryAfterSeconds), - }, - Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()), - }} -} - -// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we -// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. -func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) *StatusError { - return NewServerTimeout(unversioned.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) -} - -// NewInternalError returns an error indicating the item is invalid and cannot be processed. -func NewInternalError(err error) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: http.StatusInternalServerError, - Reason: unversioned.StatusReasonInternalError, - Details: &unversioned.StatusDetails{ - Causes: []unversioned.StatusCause{{Message: err.Error()}}, - }, - Message: fmt.Sprintf("Internal error occurred: %v", err), - }} -} - -// NewTimeoutError returns an error indicating that a timeout occurred before the request -// could be completed. Clients may retry, but the operation may still complete. -func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: StatusServerTimeout, - Reason: unversioned.StatusReasonTimeout, - Message: fmt.Sprintf("Timeout: %s", message), - Details: &unversioned.StatusDetails{ - RetryAfterSeconds: int32(retryAfterSeconds), - }, - }} -} - -// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. -func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { - reason := unversioned.StatusReasonUnknown - message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) - switch code { - case http.StatusConflict: - if verb == "POST" { - reason = unversioned.StatusReasonAlreadyExists - } else { - reason = unversioned.StatusReasonConflict - } - message = "the server reported a conflict" - case http.StatusNotFound: - reason = unversioned.StatusReasonNotFound - message = "the server could not find the requested resource" - case http.StatusBadRequest: - reason = unversioned.StatusReasonBadRequest - message = "the server rejected our request for an unknown reason" - case http.StatusUnauthorized: - reason = unversioned.StatusReasonUnauthorized - message = "the server has asked for the client to provide credentials" - case http.StatusForbidden: - reason = unversioned.StatusReasonForbidden - message = "the server does not allow access to the requested resource" - case http.StatusMethodNotAllowed: - reason = unversioned.StatusReasonMethodNotAllowed - message = "the server does not allow this method on the requested resource" - case StatusUnprocessableEntity: - reason = unversioned.StatusReasonInvalid - message = "the server rejected our request due to an error in our request" - case StatusServerTimeout: - reason = unversioned.StatusReasonServerTimeout - message = "the server cannot complete the requested operation at this time, try again later" - case StatusTooManyRequests: - reason = unversioned.StatusReasonTimeout - message = "the server has received too many requests and has asked us to try again later" - default: - if code >= 500 { - reason = unversioned.StatusReasonInternalError - message = "an error on the server has prevented the request from succeeding" - } - } - switch { - case !qualifiedResource.IsEmpty() && len(name) > 0: - message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name) - case !qualifiedResource.IsEmpty(): - message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String()) - } - var causes []unversioned.StatusCause - if isUnexpectedResponse { - causes = []unversioned.StatusCause{ - { - Type: unversioned.CauseTypeUnexpectedServerResponse, - Message: serverMessage, - }, - } - } else { - causes = nil - } - return &StatusError{unversioned.Status{ - Status: unversioned.StatusFailure, - Code: int32(code), - Reason: reason, - Details: &unversioned.StatusDetails{ - Group: qualifiedResource.Group, - Kind: qualifiedResource.Resource, - Name: name, - - Causes: causes, - RetryAfterSeconds: int32(retryAfterSeconds), - }, - Message: message, - }} -} - -// IsNotFound returns true if the specified error was created by NewNotFound. -func IsNotFound(err error) bool { - return reasonForError(err) == unversioned.StatusReasonNotFound -} - -// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. -func IsAlreadyExists(err error) bool { - return reasonForError(err) == unversioned.StatusReasonAlreadyExists -} - -// IsConflict determines if the err is an error which indicates the provided update conflicts. -func IsConflict(err error) bool { - return reasonForError(err) == unversioned.StatusReasonConflict -} - -// IsInvalid determines if the err is an error which indicates the provided resource is not valid. -func IsInvalid(err error) bool { - return reasonForError(err) == unversioned.StatusReasonInvalid -} - -// IsMethodNotSupported determines if the err is an error which indicates the provided action could not -// be performed because it is not supported by the server. -func IsMethodNotSupported(err error) bool { - return reasonForError(err) == unversioned.StatusReasonMethodNotAllowed -} - -// IsBadRequest determines if err is an error which indicates that the request is invalid. -func IsBadRequest(err error) bool { - return reasonForError(err) == unversioned.StatusReasonBadRequest -} - -// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and -// requires authentication by the user. -func IsUnauthorized(err error) bool { - return reasonForError(err) == unversioned.StatusReasonUnauthorized -} - -// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot -// be completed as requested. -func IsForbidden(err error) bool { - return reasonForError(err) == unversioned.StatusReasonForbidden -} - -// IsServerTimeout determines if err is an error which indicates that the request needs to be retried -// by the client. -func IsServerTimeout(err error) bool { - return reasonForError(err) == unversioned.StatusReasonServerTimeout -} - -// IsUnexpectedServerError returns true if the server response was not in the expected API format, -// and may be the result of another HTTP actor. -func IsUnexpectedServerError(err error) bool { - switch t := err.(type) { - case APIStatus: - if d := t.Status().Details; d != nil { - for _, cause := range d.Causes { - if cause.Type == unversioned.CauseTypeUnexpectedServerResponse { - return true - } - } - } - } - return false -} - -// IsUnexpectedObjectError determines if err is due to an unexpected object from the master. -func IsUnexpectedObjectError(err error) bool { - _, ok := err.(*UnexpectedObjectError) - return err != nil && ok -} - -// SuggestsClientDelay returns true if this error suggests a client delay as well as the -// suggested seconds to wait, or false if the error does not imply a wait. -func SuggestsClientDelay(err error) (int, bool) { - switch t := err.(type) { - case APIStatus: - if t.Status().Details != nil { - switch t.Status().Reason { - case unversioned.StatusReasonServerTimeout, unversioned.StatusReasonTimeout: - return int(t.Status().Details.RetryAfterSeconds), true - } - } - } - return 0, false -} - -func reasonForError(err error) unversioned.StatusReason { - switch t := err.(type) { - case APIStatus: - return t.Status().Reason - } - return unversioned.StatusReasonUnknown -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/generate.go b/vendor/k8s.io/kubernetes/pkg/api/generate.go deleted file mode 100644 index 2cca5e52f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/generate.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "fmt" - - utilrand "k8s.io/kubernetes/pkg/util/rand" -) - -// NameGenerator generates names for objects. Some backends may have more information -// available to guide selection of new names and this interface hides those details. -type NameGenerator interface { - // GenerateName generates a valid name from the base name, adding a random suffix to the - // the base. If base is valid, the returned name must also be valid. The generator is - // responsible for knowing the maximum valid name length. - GenerateName(base string) string -} - -// GenerateName will resolve the object name of the provided ObjectMeta to a generated version if -// necessary. It expects that validation for ObjectMeta has already completed (that Base is a -// valid name) and that the NameGenerator generates a name that is also valid. -func GenerateName(u NameGenerator, meta *ObjectMeta) { - if len(meta.GenerateName) == 0 || len(meta.Name) != 0 { - return - } - meta.Name = u.GenerateName(meta.GenerateName) -} - -// simpleNameGenerator generates random names. -type simpleNameGenerator struct{} - -// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics -// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes -// name (63 characters) -var SimpleNameGenerator NameGenerator = simpleNameGenerator{} - -const ( - // TODO: make this flexible for non-core resources with alternate naming rules. - maxNameLength = 63 - randomLength = 5 - maxGeneratedNameLength = maxNameLength - randomLength -) - -func (simpleNameGenerator) GenerateName(base string) string { - if len(base) > maxGeneratedNameLength { - base = base[:maxGeneratedNameLength] - } - return fmt.Sprintf("%s%s", base, utilrand.String(randomLength)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/helpers.go deleted file mode 100644 index 1349ef62b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/helpers.go +++ /dev/null @@ -1,498 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "crypto/md5" - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/sets" - - "github.com/davecgh/go-spew/spew" -) - -// Conversion error conveniently packages up errors in conversions. -type ConversionError struct { - In, Out interface{} - Message string -} - -// Return a helpful string about the error -func (c *ConversionError) Error() string { - return spew.Sprintf( - "Conversion error: %s. (in: %v(%+v) out: %v)", - c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out), - ) -} - -// Semantic can do semantic deep equality checks for api objects. -// Example: api.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true -var Semantic = conversion.EqualitiesOrDie( - func(a, b resource.Quantity) bool { - // Ignore formatting, only care that numeric value stayed the same. - // TODO: if we decide it's important, it should be safe to start comparing the format. - // - // Uninitialized quantities are equivalent to 0 quantities. - return a.Cmp(b) == 0 - }, - func(a, b unversioned.Time) bool { - return a.UTC() == b.UTC() - }, - func(a, b labels.Selector) bool { - return a.String() == b.String() - }, - func(a, b fields.Selector) bool { - return a.String() == b.String() - }, -) - -var standardResourceQuotaScopes = sets.NewString( - string(ResourceQuotaScopeTerminating), - string(ResourceQuotaScopeNotTerminating), - string(ResourceQuotaScopeBestEffort), - string(ResourceQuotaScopeNotBestEffort), -) - -// IsStandardResourceQuotaScope returns true if the scope is a standard value -func IsStandardResourceQuotaScope(str string) bool { - return standardResourceQuotaScopes.Has(str) -} - -var podObjectCountQuotaResources = sets.NewString( - string(ResourcePods), -) - -var podComputeQuotaResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), - string(ResourceLimitsCPU), - string(ResourceLimitsMemory), - string(ResourceRequestsCPU), - string(ResourceRequestsMemory), -) - -// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool { - switch scope { - case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort: - return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case ResourceQuotaScopeBestEffort: - return podObjectCountQuotaResources.Has(resource) - default: - return true - } -} - -var standardContainerResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), -) - -// IsStandardContainerResourceName returns true if the container can make a resource request -// for the specified resource -func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) -} - -var standardLimitRangeTypes = sets.NewString( - string(LimitTypePod), - string(LimitTypeContainer), -) - -// IsStandardLimitRangeType returns true if the type is Pod or Container -func IsStandardLimitRangeType(str string) bool { - return standardLimitRangeTypes.Has(str) -} - -var standardQuotaResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), - string(ResourceRequestsCPU), - string(ResourceRequestsMemory), - string(ResourceLimitsCPU), - string(ResourceLimitsMemory), - string(ResourcePods), - string(ResourceQuotas), - string(ResourceServices), - string(ResourceReplicationControllers), - string(ResourceSecrets), - string(ResourcePersistentVolumeClaims), - string(ResourceConfigMaps), - string(ResourceServicesNodePorts), - string(ResourceServicesLoadBalancers), -) - -// IsStandardQuotaResourceName returns true if the resource is known to -// the quota tracking system -func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) -} - -var standardResources = sets.NewString( - string(ResourceCPU), - string(ResourceMemory), - string(ResourceRequestsCPU), - string(ResourceRequestsMemory), - string(ResourceLimitsCPU), - string(ResourceLimitsMemory), - string(ResourcePods), - string(ResourceQuotas), - string(ResourceServices), - string(ResourceReplicationControllers), - string(ResourceSecrets), - string(ResourceConfigMaps), - string(ResourcePersistentVolumeClaims), - string(ResourceStorage), -) - -// IsStandardResourceName returns true if the resource is known to the system -func IsStandardResourceName(str string) bool { - return standardResources.Has(str) -} - -var integerResources = sets.NewString( - string(ResourcePods), - string(ResourceQuotas), - string(ResourceServices), - string(ResourceReplicationControllers), - string(ResourceSecrets), - string(ResourceConfigMaps), - string(ResourcePersistentVolumeClaims), - string(ResourceServicesNodePorts), - string(ResourceServicesLoadBalancers), -) - -// IsIntegerResourceName returns true if the resource is measured in integer values -func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) -} - -// NewDeleteOptions returns a DeleteOptions indicating the resource should -// be deleted within the specified grace period. Use zero to indicate -// immediate deletion. If you would prefer to use the default grace period, -// use &api.DeleteOptions{} directly. -func NewDeleteOptions(grace int64) *DeleteOptions { - return &DeleteOptions{GracePeriodSeconds: &grace} -} - -// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. -func NewPreconditionDeleteOptions(uid string) *DeleteOptions { - u := types.UID(uid) - p := Preconditions{UID: &u} - return &DeleteOptions{Preconditions: &p} -} - -// NewUIDPreconditions returns a Preconditions with UID set. -func NewUIDPreconditions(uid string) *Preconditions { - u := types.UID(uid) - return &Preconditions{UID: &u} -} - -// this function aims to check if the service's ClusterIP is set or not -// the objective is not to perform validation here -func IsServiceIPSet(service *Service) bool { - return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" -} - -// this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *Service) bool { - return service.Spec.ClusterIP == "" -} - -var standardFinalizers = sets.NewString( - string(FinalizerKubernetes), - FinalizerOrphan, -) - -func IsStandardFinalizerName(str string) bool { - return standardFinalizers.Has(str) -} - -// SingleObject returns a ListOptions for watching a single object. -func SingleObject(meta ObjectMeta) ListOptions { - return ListOptions{ - FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name), - ResourceVersion: meta.ResourceVersion, - } -} - -// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, -// only if they do not already exist -func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { - for _, add := range addAddresses { - exists := false - for _, existing := range *addresses { - if existing.Address == add.Address && existing.Type == add.Type { - exists = true - break - } - } - if !exists { - *addresses = append(*addresses, add) - } - } -} - -func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) { - data, err := runtime.Encode(codec, obj) - if err != nil { - return "", err - } - return fmt.Sprintf("%x", md5.Sum(data)), nil -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool { - return ingressSliceEqual(l.Ingress, r.Ingress) -} - -func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool { - if len(lhs) != len(rhs) { - return false - } - for i := range lhs { - if !ingressEqual(&lhs[i], &rhs[i]) { - return false - } - } - return true -} - -func ingressEqual(lhs, rhs *LoadBalancerIngress) bool { - if lhs.IP != rhs.IP { - return false - } - if lhs.Hostname != rhs.Hostname { - return false - } - return true -} - -// TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { - c := &LoadBalancerStatus{} - c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress)) - for i := range lb.Ingress { - c.Ingress[i] = lb.Ingress[i] - } - return c -} - -// GetAccessModesAsString returns a string representation of an array of access modes. -// modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { - modes = removeDuplicateAccessModes(modes) - modesStr := []string{} - if containsAccessMode(modes, ReadWriteOnce) { - modesStr = append(modesStr, "RWO") - } - if containsAccessMode(modes, ReadOnlyMany) { - modesStr = append(modesStr, "ROX") - } - if containsAccessMode(modes, ReadWriteMany) { - modesStr = append(modesStr, "RWX") - } - return strings.Join(modesStr, ",") -} - -// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { - strmodes := strings.Split(modes, ",") - accessModes := []PersistentVolumeAccessMode{} - for _, s := range strmodes { - s = strings.Trim(s, " ") - switch { - case s == "RWO": - accessModes = append(accessModes, ReadWriteOnce) - case s == "ROX": - accessModes = append(accessModes, ReadOnlyMany) - case s == "RWX": - accessModes = append(accessModes, ReadWriteMany) - } - } - return accessModes -} - -// removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { - accessModes := []PersistentVolumeAccessMode{} - for _, m := range modes { - if !containsAccessMode(accessModes, m) { - accessModes = append(accessModes, m) - } - } - return accessModes -} - -func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - -// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format. -func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) { - if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil { - return unversioned.Time{Time: t}, nil - } - t, err := time.Parse(time.RFC3339, s) - if err != nil { - return unversioned.Time{}, err - } - return unversioned.Time{Time: t}, nil -} - -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements -// labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) { - if len(nsm) == 0 { - return labels.Nothing(), nil - } - selector := labels.NewSelector() - for _, expr := range nsm { - var op labels.Operator - switch expr.Operator { - case NodeSelectorOpIn: - op = labels.InOperator - case NodeSelectorOpNotIn: - op = labels.NotInOperator - case NodeSelectorOpExists: - op = labels.ExistsOperator - case NodeSelectorOpDoesNotExist: - op = labels.DoesNotExistOperator - case NodeSelectorOpGt: - op = labels.GreaterThanOperator - case NodeSelectorOpLt: - op = labels.LessThanOperator - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...)) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -const ( - // AffinityAnnotationKey represents the key of affinity data (json serialized) - // in the Annotations of a Pod. - AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" - - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" -) - -// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations -// and converts it to the Affinity type in api. -func GetAffinityFromPodAnnotations(annotations map[string]string) (Affinity, error) { - var affinity Affinity - if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) - if err != nil { - return affinity, err - } - } - return affinity, nil -} - -// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in api. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) { - var tolerations []Toleration - if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations) - if err != nil { - return tolerations, err - } - } - return tolerations, nil -} - -// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in api. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) { - var taints []Taint - if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints) - if err != nil { - return []Taint{}, err - } - } - return taints, nil -} - -// TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration Toleration, taint Taint) bool { - if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { - return false - } - - if toleration.Key != taint.Key { - return false - } - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value { - return true - } - if toleration.Operator == TolerationOpExists { - return true - } - return false - -} - -// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint Taint, tolerations []Toleration) bool { - tolerated := false - for _, toleration := range tolerations { - if TolerationToleratesTaint(toleration, taint) { - tolerated = true - break - } - } - return tolerated -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/install.go b/vendor/k8s.io/kubernetes/pkg/api/install/install.go deleted file mode 100644 index bed5f0791..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/install/install.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the v1 monolithic api, making it available as an -// option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -const importPrefix = "k8s.io/kubernetes/pkg/api" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", api.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -// userResources is a group of resources mostly used by a kubectl user -var userResources = []string{"rc", "svc", "pods", "pvc"} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString( - "Node", - "Namespace", - "PersistentVolume", - "ComponentStatus", - ) - - // these kinds should be excluded from the list of resources - ignoredKinds := sets.NewString( - "ListOptions", - "DeleteOptions", - "Status", - "PodLogOptions", - "PodExecOptions", - "PodAttachOptions", - "PodProxyOptions", - "NodeProxyOptions", - "ServiceProxyOptions", - "ThirdPartyResource", - "ThirdPartyResourceData", - "ThirdPartyResourceList") - - mapper := api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) - // setup aliases for groups of resources - mapper.AddResourceAlias("all", userResources...) - - return mapper -} - -// InterfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(api.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - api.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1.SchemeGroupVersion: - v1.AddToScheme(api.Scheme) - } - } - - // This is a "fast-path" that avoids reflection for common types. It focuses on the objects that are - // converted the most in the cluster. - // TODO: generate one of these for every external API group - this is to prove the impact - api.Scheme.AddGenericConversionFunc(func(objA, objB interface{}, s conversion.Scope) (bool, error) { - switch a := objA.(type) { - case *v1.Pod: - switch b := objB.(type) { - case *api.Pod: - return true, v1.Convert_v1_Pod_To_api_Pod(a, b, s) - } - case *api.Pod: - switch b := objB.(type) { - case *v1.Pod: - return true, v1.Convert_api_Pod_To_v1_Pod(a, b, s) - } - - case *v1.Event: - switch b := objB.(type) { - case *api.Event: - return true, v1.Convert_v1_Event_To_api_Event(a, b, s) - } - case *api.Event: - switch b := objB.(type) { - case *v1.Event: - return true, v1.Convert_api_Event_To_v1_Event(a, b, s) - } - - case *v1.ReplicationController: - switch b := objB.(type) { - case *api.ReplicationController: - return true, v1.Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) - } - case *api.ReplicationController: - switch b := objB.(type) { - case *v1.ReplicationController: - return true, v1.Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) - } - - case *v1.Node: - switch b := objB.(type) { - case *api.Node: - return true, v1.Convert_v1_Node_To_api_Node(a, b, s) - } - case *api.Node: - switch b := objB.(type) { - case *v1.Node: - return true, v1.Convert_api_Node_To_v1_Node(a, b, s) - } - - case *v1.Namespace: - switch b := objB.(type) { - case *api.Namespace: - return true, v1.Convert_v1_Namespace_To_api_Namespace(a, b, s) - } - case *api.Namespace: - switch b := objB.(type) { - case *v1.Namespace: - return true, v1.Convert_api_Namespace_To_v1_Namespace(a, b, s) - } - - case *v1.Service: - switch b := objB.(type) { - case *api.Service: - return true, v1.Convert_v1_Service_To_api_Service(a, b, s) - } - case *api.Service: - switch b := objB.(type) { - case *v1.Service: - return true, v1.Convert_api_Service_To_v1_Service(a, b, s) - } - - case *v1.Endpoints: - switch b := objB.(type) { - case *api.Endpoints: - return true, v1.Convert_v1_Endpoints_To_api_Endpoints(a, b, s) - } - case *api.Endpoints: - switch b := objB.(type) { - case *v1.Endpoints: - return true, v1.Convert_api_Endpoints_To_v1_Endpoints(a, b, s) - } - - case *versioned.Event: - switch b := objB.(type) { - case *versioned.InternalEvent: - return true, versioned.Convert_versioned_Event_to_versioned_InternalEvent(a, b, s) - } - case *versioned.InternalEvent: - switch b := objB.(type) { - case *versioned.Event: - return true, versioned.Convert_versioned_InternalEvent_to_versioned_Event(a, b, s) - } - } - return false, nil - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/mapper.go b/vendor/k8s.io/kubernetes/pkg/api/mapper.go deleted file mode 100644 index 0216771ee..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/mapper.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/sets" -) - -var RESTMapper meta.RESTMapper - -func init() { - RESTMapper = meta.MultiRESTMapper{} -} - -func RegisterRESTMapper(m meta.RESTMapper) { - RESTMapper = append(RESTMapper.(meta.MultiRESTMapper), m) -} - -func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, interfacesFunc meta.VersionInterfacesFunc, - importPathPrefix string, ignoredKinds, rootScoped sets.String) *meta.DefaultRESTMapper { - - mapper := meta.NewDefaultRESTMapper(defaultGroupVersions, interfacesFunc) - // enumerate all supported versions, get the kinds, and register with the mapper how to address - // our resources. - for _, gv := range defaultGroupVersions { - for kind, oType := range Scheme.KnownTypes(gv) { - gvk := gv.WithKind(kind) - // TODO: Remove import path check. - // We check the import path because we currently stuff both "api" and "extensions" objects - // into the same group within Scheme since Scheme has no notion of groups yet. - if !strings.Contains(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { - continue - } - scope := meta.RESTScopeNamespace - if rootScoped.Has(kind) { - scope = meta.RESTScopeRoot - } - mapper.Add(gvk, scope) - } - } - return mapper -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta.go b/vendor/k8s.io/kubernetes/pkg/api/meta.go deleted file mode 100644 index 9d5dae2c5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" -) - -// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta. -func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) { - meta.CreationTimestamp = unversioned.Now() - meta.UID = util.NewUUID() - meta.SelfLink = "" -} - -// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. -func HasObjectMetaSystemFieldValues(meta *ObjectMeta) bool { - return !meta.CreationTimestamp.Time.IsZero() || - len(meta.UID) != 0 -} - -// ObjectMetaFor returns a pointer to a provided object's ObjectMeta. -// TODO: allow runtime.Unknown to extract this object -// TODO: Remove this function and use meta.Accessor() instead. -func ObjectMetaFor(obj runtime.Object) (*ObjectMeta, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - var meta *ObjectMeta - err = runtime.FieldPtr(v, "ObjectMeta", &meta) - return meta, err -} - -// ListMetaFor returns a pointer to a provided object's ListMeta, -// or an error if the object does not have that pointer. -// TODO: allow runtime.Unknown to extract this object -func ListMetaFor(obj runtime.Object) (*unversioned.ListMeta, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - var meta *unversioned.ListMeta - err = runtime.FieldPtr(v, "ListMeta", &meta) - return meta, err -} - -func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj } - -func (obj *ObjectReference) GetObjectKind() unversioned.ObjectKind { return obj } - -// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows -// fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } -func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } -func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp } -func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) { - meta.CreationTimestamp = creationTimestamp -} -func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp } -func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) { - meta.DeletionTimestamp = deletionTimestamp -} -func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } -func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } -func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } -func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } -func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } - -func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference { - ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences)) - for i := 0; i < len(meta.OwnerReferences); i++ { - ret[i].Kind = meta.OwnerReferences[i].Kind - ret[i].Name = meta.OwnerReferences[i].Name - ret[i].UID = meta.OwnerReferences[i].UID - ret[i].APIVersion = meta.OwnerReferences[i].APIVersion - if meta.OwnerReferences[i].Controller != nil { - value := *meta.OwnerReferences[i].Controller - ret[i].Controller = &value - } - } - return ret -} - -func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) { - newReferences := make([]OwnerReference, len(references)) - for i := 0; i < len(references); i++ { - newReferences[i].Kind = references[i].Kind - newReferences[i].Name = references[i].Name - newReferences[i].UID = references[i].UID - newReferences[i].APIVersion = references[i].APIVersion - if references[i].Controller != nil { - value := *references[i].Controller - newReferences[i].Controller = &value - } - } - meta.OwnerReferences = newReferences -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go b/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go deleted file mode 100644 index 4a132184f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package meta provides functions for retrieving API metadata from objects -// belonging to the Kubernetes API -package meta diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go b/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go deleted file mode 100644 index 2b89bcb8a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource -type AmbiguousResourceError struct { - PartialResource unversioned.GroupVersionResource - - MatchingResources []unversioned.GroupVersionResource - MatchingKinds []unversioned.GroupVersionKind -} - -func (e *AmbiguousResourceError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) - - } - - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) -} - -func IsAmbiguousResourceError(err error) bool { - if err == nil { - return false - } - - _, ok := err.(*AmbiguousResourceError) - return ok -} - -// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource -type NoResourceMatchError struct { - PartialResource unversioned.GroupVersionResource -} - -func (e *NoResourceMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialResource) -} - -func IsNoResourceMatchError(err error) bool { - if err == nil { - return false - } - - _, ok := err.(*NoResourceMatchError) - return ok -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/help.go b/vendor/k8s.io/kubernetes/pkg/api/meta/help.go deleted file mode 100644 index cdc07930f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/help.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -// IsListType returns true if the provided Object has a slice called Items -func IsListType(obj runtime.Object) bool { - _, err := GetItemsPtr(obj) - return err == nil -} - -// GetItemsPtr returns a pointer to the list object's Items member. -// If 'list' doesn't have an Items member, it's not really a list type -// and an error will be returned. -// This function will either return a pointer to a slice, or an error, but not both. -func GetItemsPtr(list runtime.Object) (interface{}, error) { - v, err := conversion.EnforcePtr(list) - if err != nil { - return nil, err - } - items := v.FieldByName("Items") - if !items.IsValid() { - return nil, fmt.Errorf("no Items field in %#v", list) - } - switch items.Kind() { - case reflect.Interface, reflect.Ptr: - target := reflect.TypeOf(items.Interface()).Elem() - if target.Kind() != reflect.Slice { - return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) - } - return items.Interface(), nil - case reflect.Slice: - return items.Addr().Interface(), nil - default: - return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) - } -} - -// ExtractList returns obj's Items element as an array of runtime.Objects. -// Returns an error if obj is not a List type (does not have an Items member). -func ExtractList(obj runtime.Object) ([]runtime.Object, error) { - itemsPtr, err := GetItemsPtr(obj) - if err != nil { - return nil, err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return nil, err - } - list := make([]runtime.Object, items.Len()) - for i := range list { - raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: - switch { - case item.Object != nil: - list[i] = item.Object - case item.Raw != nil: - // TODO: Set ContentEncoding and ContentType correctly. - list[i] = &runtime.Unknown{Raw: item.Raw} - default: - list[i] = nil - } - case runtime.Object: - list[i] = item - default: - var found bool - if list[i], found = raw.Addr().Interface().(runtime.Object); !found { - return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) - } - } - } - return list, nil -} - -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) - -// SetList sets the given list object's Items member have the elements given in -// objects. -// Returns an error if list is not a List type (does not have an Items member), -// or if any of the objects are not of the right type. -func SetList(list runtime.Object, objects []runtime.Object) error { - itemsPtr, err := GetItemsPtr(list) - if err != nil { - return err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return err - } - if items.Type() == objectSliceType { - items.Set(reflect.ValueOf(objects)) - return nil - } - slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) - for i := range objects { - dest := slice.Index(i) - src, err := conversion.EnforcePtr(objects[i]) - if err != nil { - return err - } - if src.Type().AssignableTo(dest.Type()) { - dest.Set(src) - } else if src.Type().ConvertibleTo(dest.Type()) { - dest.Set(src.Convert(dest.Type())) - } else { - return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type()) - } - } - items.Set(slice) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go b/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go deleted file mode 100644 index 286bdc0dd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/interfaces.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" -) - -// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version. -type VersionInterfaces struct { - runtime.ObjectConvertor - MetadataAccessor -} - -type ObjectMetaAccessor interface { - GetObjectMeta() Object -} - -// Object lets you work with object metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field (Name, UID, Namespace on lists) will be a no-op and return -// a default value. -type Object interface { - GetNamespace() string - SetNamespace(namespace string) - GetName() string - SetName(name string) - GetGenerateName() string - SetGenerateName(name string) - GetUID() types.UID - SetUID(uid types.UID) - GetResourceVersion() string - SetResourceVersion(version string) - GetSelfLink() string - SetSelfLink(selfLink string) - GetCreationTimestamp() unversioned.Time - SetCreationTimestamp(timestamp unversioned.Time) - GetDeletionTimestamp() *unversioned.Time - SetDeletionTimestamp(timestamp *unversioned.Time) - GetLabels() map[string]string - SetLabels(labels map[string]string) - GetAnnotations() map[string]string - SetAnnotations(annotations map[string]string) - GetFinalizers() []string - SetFinalizers(finalizers []string) - GetOwnerReferences() []metatypes.OwnerReference - SetOwnerReferences([]metatypes.OwnerReference) -} - -var _ Object = &runtime.Unstructured{} - -type ListMetaAccessor interface { - GetListMeta() List -} - -// List lets you work with list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field will be a no-op and return a default value. -type List unversioned.List - -// Type exposes the type and APIVersion of versioned or internal API objects. -type Type unversioned.Type - -// MetadataAccessor lets you work with object and list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field (Name, UID, Namespace on lists) will be a no-op and return -// a default value. -// -// MetadataAccessor exposes Interface in a way that can be used with multiple objects. -type MetadataAccessor interface { - APIVersion(obj runtime.Object) (string, error) - SetAPIVersion(obj runtime.Object, version string) error - - Kind(obj runtime.Object) (string, error) - SetKind(obj runtime.Object, kind string) error - - Namespace(obj runtime.Object) (string, error) - SetNamespace(obj runtime.Object, namespace string) error - - Name(obj runtime.Object) (string, error) - SetName(obj runtime.Object, name string) error - - GenerateName(obj runtime.Object) (string, error) - SetGenerateName(obj runtime.Object, name string) error - - UID(obj runtime.Object) (types.UID, error) - SetUID(obj runtime.Object, uid types.UID) error - - SelfLink(obj runtime.Object) (string, error) - SetSelfLink(obj runtime.Object, selfLink string) error - - Labels(obj runtime.Object) (map[string]string, error) - SetLabels(obj runtime.Object, labels map[string]string) error - - Annotations(obj runtime.Object) (map[string]string, error) - SetAnnotations(obj runtime.Object, annotations map[string]string) error - - runtime.ResourceVersioner -} - -type RESTScopeName string - -const ( - RESTScopeNameNamespace RESTScopeName = "namespace" - RESTScopeNameRoot RESTScopeName = "root" -) - -// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy -type RESTScope interface { - // Name of the scope - Name() RESTScopeName - // ParamName is the optional name of the parameter that should be inserted in the resource url - // If empty, no param will be inserted - ParamName() string - // ArgumentName is the optional name that should be used for the variable holding the value. - ArgumentName() string - // ParamDescription is the optional description to use to document the parameter in api documentation - ParamDescription() string -} - -// RESTMapping contains the information needed to deal with objects of a specific -// resource and kind in a RESTful manner. -type RESTMapping struct { - // Resource is a string representing the name of this resource as a REST client would see it - Resource string - - GroupVersionKind unversioned.GroupVersionKind - - // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy - Scope RESTScope - - runtime.ObjectConvertor - MetadataAccessor -} - -// RESTMapper allows clients to map resources to kind, and map kind and version -// to interfaces for manipulating those objects. It is primarily intended for -// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. -// -// The Kubernetes API provides versioned resources and object kinds which are scoped -// to API groups. In other words, kinds and resources should not be assumed to be -// unique across groups. -// -// TODO(caesarxuchao): Add proper multi-group support so that kinds & resources are -// scoped to groups. See http://issues.k8s.io/12413 and http://issues.k8s.io/10009. -type RESTMapper interface { - // KindFor takes a partial resource and returns back the single match. Returns an error if there are multiple matches - KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) - - // KindsFor takes a partial resource and returns back the list of potential kinds in priority order - KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) - - // ResourceFor takes a partial resource and returns back the single match. Returns an error if there are multiple matches - ResourceFor(input unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) - - // ResourcesFor takes a partial resource and returns back the list of potential resource in priority order - ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) - - RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) - - AliasesForResource(resource string) ([]string, bool) - ResourceSingularizer(resource string) (singular string, err error) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go b/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go deleted file mode 100644 index 5f185f39f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/meta.go +++ /dev/null @@ -1,656 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - - "github.com/golang/glog" -) - -func ListAccessor(obj interface{}) (List, error) { - if listMetaAccessor, ok := obj.(ListMetaAccessor); ok { - if om := listMetaAccessor.GetListMeta(); om != nil { - return om, nil - } - } - if listMetaAccessor, ok := obj.(unversioned.ListMetaAccessor); ok { - if om := listMetaAccessor.GetListMeta(); om != nil { - return om, nil - } - } - // we may get passed an object that is directly portable to List - if list, ok := obj.(List); ok { - return list, nil - } - glog.V(4).Infof("Calling ListAccessor on non-internal object: %v", reflect.TypeOf(obj)) - // legacy path for objects that do not implement List and ListMetaAccessor via - // reflection - very slow code path. - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - t := v.Type() - if v.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) - } - a := &genericAccessor{} - listMeta := v.FieldByName("ListMeta") - if listMeta.IsValid() { - // look for the ListMeta fields - if err := extractFromListMeta(listMeta, a); err != nil { - return nil, fmt.Errorf("unable to find list fields on %#v: %v", listMeta, err) - } - } else { - return nil, fmt.Errorf("unable to find listMeta on %#v", v) - } - return a, nil -} - -// Accessor takes an arbitrary object pointer and returns meta.Interface. -// obj must be a pointer to an API type. An error is returned if the minimum -// required fields are missing. Fields that are not required return the default -// value and are a no-op if set. -func Accessor(obj interface{}) (Object, error) { - if objectMetaAccessor, ok := obj.(ObjectMetaAccessor); ok { - if om := objectMetaAccessor.GetObjectMeta(); om != nil { - return om, nil - } - } - // we may get passed an object that is directly portable to Object - if object, ok := obj.(Object); ok { - return object, nil - } - - glog.V(4).Infof("Calling Accessor on non-internal object: %v", reflect.TypeOf(obj)) - // legacy path for objects that do not implement Object and ObjectMetaAccessor via - // reflection - very slow code path. - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - t := v.Type() - if v.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) - } - - typeMeta := v.FieldByName("TypeMeta") - if !typeMeta.IsValid() { - return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) - } - - a := &genericAccessor{} - if err := extractFromTypeMeta(typeMeta, a); err != nil { - return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) - } - - objectMeta := v.FieldByName("ObjectMeta") - if objectMeta.IsValid() { - // look for the ObjectMeta fields - if err := extractFromObjectMeta(objectMeta, a); err != nil { - return nil, fmt.Errorf("unable to find object fields on %#v: %v", objectMeta, err) - } - } else { - listMeta := v.FieldByName("ListMeta") - if listMeta.IsValid() { - // look for the ListMeta fields - if err := extractFromListMeta(listMeta, a); err != nil { - return nil, fmt.Errorf("unable to find list fields on %#v: %v", listMeta, err) - } - } else { - // look for the older TypeMeta with all metadata - if err := extractFromObjectMeta(typeMeta, a); err != nil { - return nil, fmt.Errorf("unable to find object fields on %#v: %v", typeMeta, err) - } - } - } - - return a, nil -} - -// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion -// and Kind of an in-memory internal object. -// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta -// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube -// api conventions). -func TypeAccessor(obj interface{}) (Type, error) { - if typed, ok := obj.(runtime.Object); ok { - return objectAccessor{typed}, nil - } - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - t := v.Type() - if v.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) - } - - typeMeta := v.FieldByName("TypeMeta") - if !typeMeta.IsValid() { - return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) - } - a := &genericAccessor{} - if err := extractFromTypeMeta(typeMeta, a); err != nil { - return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) - } - return a, nil -} - -type objectAccessor struct { - runtime.Object -} - -func (obj objectAccessor) GetKind() string { - return obj.GetObjectKind().GroupVersionKind().Kind -} - -func (obj objectAccessor) SetKind(kind string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gvk.Kind = kind - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -func (obj objectAccessor) GetAPIVersion() string { - return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() -} - -func (obj objectAccessor) SetAPIVersion(version string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gv, err := unversioned.ParseGroupVersion(version) - if err != nil { - gv = unversioned.GroupVersion{Version: version} - } - gvk.Group, gvk.Version = gv.Group, gv.Version - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -// NewAccessor returns a MetadataAccessor that can retrieve -// or manipulate resource version on objects derived from core API -// metadata concepts. -func NewAccessor() MetadataAccessor { - return resourceAccessor{} -} - -// resourceAccessor implements ResourceVersioner and SelfLinker. -type resourceAccessor struct{} - -func (resourceAccessor) Kind(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetKind(), nil -} - -func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { - objectAccessor{obj}.SetKind(kind) - return nil -} - -func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetAPIVersion(), nil -} - -func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { - objectAccessor{obj}.SetAPIVersion(version) - return nil -} - -func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetNamespace(), nil -} - -func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetNamespace(namespace) - return nil -} - -func (resourceAccessor) Name(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetName(), nil -} - -func (resourceAccessor) SetName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetName(name) - return nil -} - -func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetGenerateName(), nil -} - -func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetGenerateName(name) - return nil -} - -func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetUID(), nil -} - -func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetUID(uid) - return nil -} - -func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetSelfLink(), nil -} - -func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetSelfLink(selfLink) - return nil -} - -func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetLabels(), nil -} - -func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetLabels(labels) - return nil -} - -func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetAnnotations(), nil -} - -func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetAnnotations(annotations) - return nil -} - -func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetResourceVersion(), nil -} - -func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetResourceVersion(version) - return nil -} - -// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. -func extractFromOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error { - if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { - return err - } - if err := runtime.Field(v, "Kind", &o.Kind); err != nil { - return err - } - if err := runtime.Field(v, "Name", &o.Name); err != nil { - return err - } - if err := runtime.Field(v, "UID", &o.UID); err != nil { - return err - } - var controllerPtr *bool - if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { - return err - } - if controllerPtr != nil { - controller := *controllerPtr - o.Controller = &controller - } - return nil -} - -// setOwnerReference sets v to o. v is the OwnerReferences field of an object. -func setOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error { - if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { - return err - } - if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { - return err - } - if err := runtime.SetField(o.Name, v, "Name"); err != nil { - return err - } - if err := runtime.SetField(o.UID, v, "UID"); err != nil { - return err - } - if o.Controller != nil { - controller := *(o.Controller) - if err := runtime.SetField(&controller, v, "Controller"); err != nil { - return err - } - } - return nil -} - -// genericAccessor contains pointers to strings that can modify an arbitrary -// struct and implements the Accessor interface. -type genericAccessor struct { - namespace *string - name *string - generateName *string - uid *types.UID - apiVersion *string - kind *string - resourceVersion *string - selfLink *string - creationTimestamp *unversioned.Time - deletionTimestamp **unversioned.Time - labels *map[string]string - annotations *map[string]string - ownerReferences reflect.Value - finalizers *[]string -} - -func (a genericAccessor) GetNamespace() string { - if a.namespace == nil { - return "" - } - return *a.namespace -} - -func (a genericAccessor) SetNamespace(namespace string) { - if a.namespace == nil { - return - } - *a.namespace = namespace -} - -func (a genericAccessor) GetName() string { - if a.name == nil { - return "" - } - return *a.name -} - -func (a genericAccessor) SetName(name string) { - if a.name == nil { - return - } - *a.name = name -} - -func (a genericAccessor) GetGenerateName() string { - if a.generateName == nil { - return "" - } - return *a.generateName -} - -func (a genericAccessor) SetGenerateName(generateName string) { - if a.generateName == nil { - return - } - *a.generateName = generateName -} - -func (a genericAccessor) GetUID() types.UID { - if a.uid == nil { - return "" - } - return *a.uid -} - -func (a genericAccessor) SetUID(uid types.UID) { - if a.uid == nil { - return - } - *a.uid = uid -} - -func (a genericAccessor) GetAPIVersion() string { - return *a.apiVersion -} - -func (a genericAccessor) SetAPIVersion(version string) { - *a.apiVersion = version -} - -func (a genericAccessor) GetKind() string { - return *a.kind -} - -func (a genericAccessor) SetKind(kind string) { - *a.kind = kind -} - -func (a genericAccessor) GetResourceVersion() string { - return *a.resourceVersion -} - -func (a genericAccessor) SetResourceVersion(version string) { - *a.resourceVersion = version -} - -func (a genericAccessor) GetSelfLink() string { - return *a.selfLink -} - -func (a genericAccessor) SetSelfLink(selfLink string) { - *a.selfLink = selfLink -} - -func (a genericAccessor) GetCreationTimestamp() unversioned.Time { - return *a.creationTimestamp -} - -func (a genericAccessor) SetCreationTimestamp(timestamp unversioned.Time) { - *a.creationTimestamp = timestamp -} - -func (a genericAccessor) GetDeletionTimestamp() *unversioned.Time { - return *a.deletionTimestamp -} - -func (a genericAccessor) SetDeletionTimestamp(timestamp *unversioned.Time) { - *a.deletionTimestamp = timestamp -} - -func (a genericAccessor) GetLabels() map[string]string { - if a.labels == nil { - return nil - } - return *a.labels -} - -func (a genericAccessor) SetLabels(labels map[string]string) { - *a.labels = labels -} - -func (a genericAccessor) GetAnnotations() map[string]string { - if a.annotations == nil { - return nil - } - return *a.annotations -} - -func (a genericAccessor) SetAnnotations(annotations map[string]string) { - if a.annotations == nil { - emptyAnnotations := make(map[string]string) - a.annotations = &emptyAnnotations - } - *a.annotations = annotations -} - -func (a genericAccessor) GetFinalizers() []string { - if a.finalizers == nil { - return nil - } - return *a.finalizers -} - -func (a genericAccessor) SetFinalizers(finalizers []string) { - *a.finalizers = finalizers -} - -func (a genericAccessor) GetOwnerReferences() []metatypes.OwnerReference { - var ret []metatypes.OwnerReference - s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) - return ret - } - s = s.Elem() - // Set the capacity to one element greater to avoid copy if the caller later append an element. - ret = make([]metatypes.OwnerReference, s.Len(), s.Len()+1) - for i := 0; i < s.Len(); i++ { - if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) - return ret - } - } - return ret -} - -func (a genericAccessor) SetOwnerReferences(references []metatypes.OwnerReference) { - s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) - } - s = s.Elem() - newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) - for i := 0; i < len(references); i++ { - if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) - return - } - } - s.Set(newReferences) -} - -// extractFromTypeMeta extracts pointers to version and kind fields from an object -func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { - if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { - return err - } - return nil -} - -// extractFromObjectMeta extracts pointers to metadata fields from an object -func extractFromObjectMeta(v reflect.Value, a *genericAccessor) error { - if err := runtime.FieldPtr(v, "Namespace", &a.namespace); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Name", &a.name); err != nil { - return err - } - if err := runtime.FieldPtr(v, "GenerateName", &a.generateName); err != nil { - return err - } - if err := runtime.FieldPtr(v, "UID", &a.uid); err != nil { - return err - } - if err := runtime.FieldPtr(v, "ResourceVersion", &a.resourceVersion); err != nil { - return err - } - if err := runtime.FieldPtr(v, "SelfLink", &a.selfLink); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Labels", &a.labels); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Annotations", &a.annotations); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Finalizers", &a.finalizers); err != nil { - return err - } - ownerReferences := v.FieldByName("OwnerReferences") - if !ownerReferences.IsValid() { - return fmt.Errorf("struct %#v lacks OwnerReferences type", v) - } - if ownerReferences.Kind() != reflect.Slice { - return fmt.Errorf("expect %v to be a slice", ownerReferences.Kind()) - } - a.ownerReferences = ownerReferences.Addr() - return nil -} - -// extractFromObjectMeta extracts pointers to metadata fields from a list object -func extractFromListMeta(v reflect.Value, a *genericAccessor) error { - if err := runtime.FieldPtr(v, "ResourceVersion", &a.resourceVersion); err != nil { - return err - } - if err := runtime.FieldPtr(v, "SelfLink", &a.selfLink); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go b/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go deleted file mode 100644 index e7ac3e08d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/metatypes/types.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// The types defined in this package are used by the meta package to represent -// the in-memory version of objects. We cannot reuse the __internal version of -// API objects because it causes import cycle. -package metatypes - -import "k8s.io/kubernetes/pkg/types" - -type OwnerReference struct { - APIVersion string - Kind string - UID types.UID - Name string - Controller *bool -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go deleted file mode 100644 index b720f8fa2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/multirestmapper.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" -) - -// MultiRESTMapper is a wrapper for multiple RESTMappers. -type MultiRESTMapper []RESTMapper - -func (m MultiRESTMapper) String() string { - nested := []string{} - for _, t := range m { - currString := fmt.Sprintf("%v", t) - splitStrings := strings.Split(currString, "\n") - nested = append(nested, strings.Join(splitStrings, "\n\t")) - } - - return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t")) -} - -// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod) -// This implementation supports multiple REST schemas and return the first match. -func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - for _, t := range m { - singular, err = t.ResourceSingularizer(resource) - if err == nil { - return - } - } - return -} - -func (m MultiRESTMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - allGVRs := []unversioned.GroupVersionResource{} - for _, t := range m { - gvrs, err := t.ResourcesFor(resource) - // ignore "no match" errors, but any other error percolates back up - if IsNoResourceMatchError(err) { - continue - } - if err != nil { - return nil, err - } - - // walk the existing values to de-dup - for _, curr := range gvrs { - found := false - for _, existing := range allGVRs { - if curr == existing { - found = true - break - } - } - - if !found { - allGVRs = append(allGVRs, curr) - } - } - } - - if len(allGVRs) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - return allGVRs, nil -} - -func (m MultiRESTMapper) KindsFor(resource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) { - allGVKs := []unversioned.GroupVersionKind{} - for _, t := range m { - gvks, err := t.KindsFor(resource) - // ignore "no match" errors, but any other error percolates back up - if IsNoResourceMatchError(err) { - continue - } - if err != nil { - return nil, err - } - - // walk the existing values to de-dup - for _, curr := range gvks { - found := false - for _, existing := range allGVKs { - if curr == existing { - found = true - break - } - } - - if !found { - allGVKs = append(allGVKs, curr) - } - } - } - - if len(allGVKs) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - return allGVKs, nil -} - -func (m MultiRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return unversioned.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m MultiRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return unversioned.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -// RESTMapping provides the REST mapping for the resource based on the -// kind and version. This implementation supports multiple REST schemas and -// return the first match. -func (m MultiRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) { - allMappings := []*RESTMapping{} - errors := []error{} - - for _, t := range m { - currMapping, err := t.RESTMapping(gk, versions...) - // ignore "no match" errors, but any other error percolates back up - if IsNoResourceMatchError(err) { - continue - } - if err != nil { - errors = append(errors, err) - continue - } - - allMappings = append(allMappings, currMapping) - } - - // if we got exactly one mapping, then use it even if other requested failed - if len(allMappings) == 1 { - return allMappings[0], nil - } - if len(allMappings) > 1 { - return nil, fmt.Errorf("multiple matches found for %v in %v", gk, versions) - } - if len(errors) > 0 { - return nil, utilerrors.NewAggregate(errors) - } - return nil, fmt.Errorf("no match found for %v in %v", gk, versions) -} - -// AliasesForResource finds the first alias response for the provided mappers. -func (m MultiRESTMapper) AliasesForResource(alias string) ([]string, bool) { - seenAliases := sets.NewString() - allAliases := []string{} - handled := false - - for _, t := range m { - if currAliases, currOk := t.AliasesForResource(alias); currOk { - for _, currAlias := range currAliases { - if !seenAliases.Has(currAlias) { - allAliases = append(allAliases, currAlias) - seenAliases.Insert(currAlias) - } - } - handled = true - } - } - return allAliases, handled -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go b/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go deleted file mode 100644 index 24f38f78f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/priority.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -const ( - AnyGroup = "*" - AnyVersion = "*" - AnyResource = "*" - AnyKind = "*" -) - -// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind -// when multiple matches are possible -type PriorityRESTMapper struct { - // Delegate is the RESTMapper to use to locate all the Kind and Resource matches - Delegate RESTMapper - - // ResourcePriority is a list of priority patterns to apply to matching resources. - // The list of all matching resources is narrowed based on the patterns until only one remains. - // A pattern with no matches is skipped. A pattern with more than one match uses its - // matches as the list to continue matching against. - ResourcePriority []unversioned.GroupVersionResource - - // KindPriority is a list of priority patterns to apply to matching kinds. - // The list of all matching kinds is narrowed based on the patterns until only one remains. - // A pattern with no matches is skipped. A pattern with more than one match uses its - // matches as the list to continue matching against. - KindPriority []unversioned.GroupVersionKind -} - -func (m PriorityRESTMapper) String() string { - return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate) -} - -// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. -func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - originalGVRs, err := m.Delegate.ResourcesFor(partiallySpecifiedResource) - if err != nil { - return unversioned.GroupVersionResource{}, err - } - if len(originalGVRs) == 1 { - return originalGVRs[0], nil - } - - remainingGVRs := append([]unversioned.GroupVersionResource{}, originalGVRs...) - for _, pattern := range m.ResourcePriority { - matchedGVRs := []unversioned.GroupVersionResource{} - for _, gvr := range remainingGVRs { - if resourceMatches(pattern, gvr) { - matchedGVRs = append(matchedGVRs, gvr) - } - } - - switch len(matchedGVRs) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matchedGVRs[0], nil - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remainingGVRs = matchedGVRs - } - } - - return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} -} - -// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. -func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - originalGVKs, err := m.Delegate.KindsFor(partiallySpecifiedResource) - if err != nil { - return unversioned.GroupVersionKind{}, err - } - if len(originalGVKs) == 1 { - return originalGVKs[0], nil - } - - remainingGVKs := append([]unversioned.GroupVersionKind{}, originalGVKs...) - for _, pattern := range m.KindPriority { - matchedGVKs := []unversioned.GroupVersionKind{} - for _, gvr := range remainingGVKs { - if kindMatches(pattern, gvr) { - matchedGVKs = append(matchedGVKs, gvr) - } - } - - switch len(matchedGVKs) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matchedGVKs[0], nil - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remainingGVKs = matchedGVKs - } - } - - return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} -} - -func resourceMatches(pattern unversioned.GroupVersionResource, resource unversioned.GroupVersionResource) bool { - if pattern.Group != AnyGroup && pattern.Group != resource.Group { - return false - } - if pattern.Version != AnyVersion && pattern.Version != resource.Version { - return false - } - if pattern.Resource != AnyResource && pattern.Resource != resource.Resource { - return false - } - - return true -} - -func kindMatches(pattern unversioned.GroupVersionKind, kind unversioned.GroupVersionKind) bool { - if pattern.Group != AnyGroup && pattern.Group != kind.Group { - return false - } - if pattern.Version != AnyVersion && pattern.Version != kind.Version { - return false - } - if pattern.Kind != AnyKind && pattern.Kind != kind.Kind { - return false - } - - return true -} - -func (m PriorityRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (mapping *RESTMapping, err error) { - return m.Delegate.RESTMapping(gk, versions...) -} - -func (m PriorityRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) { - return m.Delegate.AliasesForResource(alias) -} - -func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - return m.Delegate.ResourceSingularizer(resource) -} - -func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - return m.Delegate.ResourcesFor(partiallySpecifiedResource) -} - -func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource unversioned.GroupVersionResource) (gvk []unversioned.GroupVersionKind, err error) { - return m.Delegate.KindsFor(partiallySpecifiedResource) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go b/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go deleted file mode 100644 index 4e07ab741..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/meta/restmapper.go +++ /dev/null @@ -1,520 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: move everything in this file to pkg/api/rest -package meta - -import ( - "fmt" - "sort" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// Implements RESTScope interface -type restScope struct { - name RESTScopeName - paramName string - argumentName string - paramDescription string -} - -func (r *restScope) Name() RESTScopeName { - return r.name -} -func (r *restScope) ParamName() string { - return r.paramName -} -func (r *restScope) ArgumentName() string { - return r.argumentName -} -func (r *restScope) ParamDescription() string { - return r.paramDescription -} - -var RESTScopeNamespace = &restScope{ - name: RESTScopeNameNamespace, - paramName: "namespaces", - argumentName: "namespace", - paramDescription: "object name and auth scope, such as for teams and projects", -} - -var RESTScopeRoot = &restScope{ - name: RESTScopeNameRoot, -} - -// DefaultRESTMapper exposes mappings between the types defined in a -// runtime.Scheme. It assumes that all types defined the provided scheme -// can be mapped with the provided MetadataAccessor and Codec interfaces. -// -// The resource name of a Kind is defined as the lowercase, -// English-plural version of the Kind string. -// When converting from resource to Kind, the singular version of the -// resource name is also accepted for convenience. -// -// TODO: Only accept plural for some operations for increased control? -// (`get pod bar` vs `get pods bar`) -type DefaultRESTMapper struct { - defaultGroupVersions []unversioned.GroupVersion - - resourceToKind map[unversioned.GroupVersionResource]unversioned.GroupVersionKind - kindToPluralResource map[unversioned.GroupVersionKind]unversioned.GroupVersionResource - kindToScope map[unversioned.GroupVersionKind]RESTScope - singularToPlural map[unversioned.GroupVersionResource]unversioned.GroupVersionResource - pluralToSingular map[unversioned.GroupVersionResource]unversioned.GroupVersionResource - - interfacesFunc VersionInterfacesFunc - - // aliasToResource is used for mapping aliases to resources - aliasToResource map[string][]string -} - -func (m *DefaultRESTMapper) String() string { - return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) -} - -var _ RESTMapper = &DefaultRESTMapper{} - -// VersionInterfacesFunc returns the appropriate typer, and metadata accessor for a -// given api version, or an error if no such api version exists. -type VersionInterfacesFunc func(version unversioned.GroupVersion) (*VersionInterfaces, error) - -// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion -// to a resource name and back based on the objects in a runtime.Scheme -// and the Kubernetes API conventions. Takes a group name, a priority list of the versions -// to search when an object has no default version (set empty to return an error), -// and a function that retrieves the correct metadata for a given version. -func NewDefaultRESTMapper(defaultGroupVersions []unversioned.GroupVersion, f VersionInterfacesFunc) *DefaultRESTMapper { - resourceToKind := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionKind) - kindToPluralResource := make(map[unversioned.GroupVersionKind]unversioned.GroupVersionResource) - kindToScope := make(map[unversioned.GroupVersionKind]RESTScope) - singularToPlural := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) - pluralToSingular := make(map[unversioned.GroupVersionResource]unversioned.GroupVersionResource) - aliasToResource := make(map[string][]string) - // TODO: verify name mappings work correctly when versions differ - - return &DefaultRESTMapper{ - resourceToKind: resourceToKind, - kindToPluralResource: kindToPluralResource, - kindToScope: kindToScope, - defaultGroupVersions: defaultGroupVersions, - singularToPlural: singularToPlural, - pluralToSingular: pluralToSingular, - aliasToResource: aliasToResource, - interfacesFunc: f, - } -} - -func (m *DefaultRESTMapper) Add(kind unversioned.GroupVersionKind, scope RESTScope) { - plural, singular := KindToResource(kind) - - m.singularToPlural[singular] = plural - m.pluralToSingular[plural] = singular - - m.resourceToKind[singular] = kind - m.resourceToKind[plural] = kind - - m.kindToPluralResource[kind] = plural - m.kindToScope[kind] = scope -} - -// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular -// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. -// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all -// callers to use the RESTMapper they mean. -var unpluralizedSuffixes = []string{ - "endpoints", -} - -// KindToResource converts Kind to a resource name. -// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match -// and they aren't guaranteed to do so. -func KindToResource(kind unversioned.GroupVersionKind) ( /*plural*/ unversioned.GroupVersionResource /*singular*/, unversioned.GroupVersionResource) { - kindName := kind.Kind - if len(kindName) == 0 { - return unversioned.GroupVersionResource{}, unversioned.GroupVersionResource{} - } - singularName := strings.ToLower(kindName) - singular := kind.GroupVersion().WithResource(singularName) - - for _, skip := range unpluralizedSuffixes { - if strings.HasSuffix(singularName, skip) { - return singular, singular - } - } - - switch string(singularName[len(singularName)-1]) { - case "s": - return kind.GroupVersion().WithResource(singularName + "es"), singular - case "y": - return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular - } - - return kind.GroupVersion().WithResource(singularName + "s"), singular -} - -// ResourceSingularizer implements RESTMapper -// It converts a resource name from plural to singular (e.g., from pods to pod) -func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { - partialResource := unversioned.GroupVersionResource{Resource: resourceType} - resources, err := m.ResourcesFor(partialResource) - if err != nil { - return resourceType, err - } - - singular := unversioned.GroupVersionResource{} - for _, curr := range resources { - currSingular, ok := m.pluralToSingular[curr] - if !ok { - continue - } - if singular.IsEmpty() { - singular = currSingular - continue - } - - if currSingular.Resource != singular.Resource { - return resourceType, fmt.Errorf("multiple possibile singular resources (%v) found for %v", resources, resourceType) - } - } - - if singular.IsEmpty() { - return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) - } - - return singular.Resource, nil -} - -// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) -func coerceResourceForMatching(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { - resource.Resource = strings.ToLower(resource.Resource) - if resource.Version == runtime.APIVersionInternal { - resource.Version = "" - } - - return resource -} - -func (m *DefaultRESTMapper) ResourcesFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []unversioned.GroupVersionResource{} - switch { - // fully qualified. Find the exact match - case hasGroup && hasVersion: - for plural, singular := range m.pluralToSingular { - if singular == resource { - ret = append(ret, plural) - break - } - if plural == resource { - ret = append(ret, plural) - break - } - } - - case hasGroup: - requestedGroupResource := resource.GroupResource() - for plural, singular := range m.pluralToSingular { - if singular.GroupResource() == requestedGroupResource { - ret = append(ret, plural) - } - if plural.GroupResource() == requestedGroupResource { - ret = append(ret, plural) - } - } - - case hasVersion: - for plural, singular := range m.pluralToSingular { - if singular.Version == resource.Version && singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Version == resource.Version && plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - - default: - for plural, singular := range m.pluralToSingular { - if singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return unversioned.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return unversioned.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m *DefaultRESTMapper) KindsFor(input unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []unversioned.GroupVersionKind{} - switch { - // fully qualified. Find the exact match - case hasGroup && hasVersion: - kind, exists := m.resourceToKind[resource] - if exists { - ret = append(ret, kind) - } - - case hasGroup: - requestedGroupResource := resource.GroupResource() - for currResource, currKind := range m.resourceToKind { - if currResource.GroupResource() == requestedGroupResource { - ret = append(ret, currKind) - } - } - - case hasVersion: - for currResource, currKind := range m.resourceToKind { - if currResource.Version == resource.Version && currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - - default: - for currResource, currKind := range m.resourceToKind { - if currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: input} - } - - sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return unversioned.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return unversioned.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -type kindByPreferredGroupVersion struct { - list []unversioned.GroupVersionKind - sortOrder []unversioned.GroupVersion -} - -func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } -func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o kindByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Kind < rhs.Kind - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -type resourceByPreferredGroupVersion struct { - list []unversioned.GroupVersionResource - sortOrder []unversioned.GroupVersion -} - -func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } -func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o resourceByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Resource < rhs.Resource - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -// RESTMapping returns a struct representing the resource path and conversion interfaces a -// RESTClient should use to operate on the provided group/kind in order of versions. If a version search -// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which -// version should be used to access the named group/kind. -func (m *DefaultRESTMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*RESTMapping, error) { - // Pick an appropriate version - var gvk *unversioned.GroupVersionKind - hadVersion := false - for _, version := range versions { - if len(version) == 0 || version == runtime.APIVersionInternal { - continue - } - - currGVK := gk.WithVersion(version) - hadVersion = true - if _, ok := m.kindToPluralResource[currGVK]; ok { - gvk = &currGVK - break - } - } - // Use the default preferred versions - if !hadVersion && (gvk == nil) { - for _, gv := range m.defaultGroupVersions { - if gv.Group != gk.Group { - continue - } - - currGVK := gk.WithVersion(gv.Version) - if _, ok := m.kindToPluralResource[currGVK]; ok { - gvk = &currGVK - break - } - } - } - if gvk == nil { - return nil, fmt.Errorf("no kind named %q is registered in versions %q", gk, versions) - } - - // Ensure we have a REST mapping - resource, ok := m.kindToPluralResource[*gvk] - if !ok { - found := []unversioned.GroupVersion{} - for _, gv := range m.defaultGroupVersions { - if _, ok := m.kindToPluralResource[*gvk]; ok { - found = append(found, gv) - } - } - if len(found) > 0 { - return nil, fmt.Errorf("object with kind %q exists in versions %v, not %v", gvk.Kind, found, gvk.GroupVersion().String()) - } - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported object", gvk.GroupVersion().String(), gvk.Kind) - } - - // Ensure we have a REST scope - scope, ok := m.kindToScope[*gvk] - if !ok { - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion().String(), gvk.Kind) - } - - interfaces, err := m.interfacesFunc(gvk.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("the provided version %q has no relevant versions", gvk.GroupVersion().String()) - } - - retVal := &RESTMapping{ - Resource: resource.Resource, - GroupVersionKind: *gvk, - Scope: scope, - - ObjectConvertor: interfaces.ObjectConvertor, - MetadataAccessor: interfaces.MetadataAccessor, - } - - return retVal, nil -} - -// AddResourceAlias maps aliases to resources -func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { - if len(resources) == 0 { - return - } - m.aliasToResource[alias] = resources -} - -// AliasesForResource returns whether a resource has an alias or not -func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { - if res, ok := m.aliasToResource[alias]; ok { - return res, true - } - return nil, false -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/node_example.json b/vendor/k8s.io/kubernetes/pkg/api/node_example.json deleted file mode 100644 index 260183484..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/node_example.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "e2e-test-wojtekt-minion-etd6", - "selfLink": "/api/v1/nodes/e2e-test-wojtekt-minion-etd6", - "uid": "a7e89222-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "379", - "creationTimestamp": "2015-04-22T11:49:39Z" - }, - "spec": { - "externalID": "15488322946290398375" - }, - "status": { - "capacity": { - "cpu": "1", - "memory": "1745152Ki" - }, - "conditions": [ - { - "type": "Ready", - "status": "True", - "lastHeartbeatTime": "2015-04-22T11:58:17Z", - "lastTransitionTime": "2015-04-22T11:49:52Z", - "reason": "kubelet is posting ready status" - } - ], - "addresses": [ - { - "type": "ExternalIP", - "address": "104.197.49.213" - }, - { - "type": "LegacyHostIP", - "address": "104.197.20.11" - } - ], - "nodeInfo": { - "machineID": "", - "systemUUID": "D59FA3FA-7B5B-7287-5E1A-1D79F13CB577", - "bootID": "44a832f3-8cfb-4de5-b7d2-d66030b6cd95", - "kernelVersion": "3.16.0-0.bpo.4-amd64", - "osImage": "Debian GNU/Linux 7 (wheezy)", - "containerRuntimeVersion": "docker://1.5.0", - "kubeletVersion": "v0.15.0-484-g0c8ee980d705a3-dirty", - "kubeProxyVersion": "v0.15.0-484-g0c8ee980d705a3-dirty" - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/pod/util.go deleted file mode 100644 index 1bdacfe20..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod/util.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/intstr" -) - -const ( - // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Hostname field. - // The annotation value is a string specifying the hostname to be used for the pod e.g 'my-webserver-1' - PodHostnameAnnotation = "pod.beta.kubernetes.io/hostname" - - // TODO: to be de!eted after v1.3 is released. PodSpec has a dedicated Subdomain field. - // The annotation value is a string specifying the subdomain e.g. "my-web-service" - // If specified, on the the pod itself, "<hostname>.my-web-service.<namespace>.svc.<cluster domain>" would resolve to - // the pod's IP. - // If there is a headless service named "my-web-service" in the same namespace as the pod, then, - // <hostname>.my-web-service.<namespace>.svc.<cluster domain>" would be resolved by the cluster DNS Server. - PodSubdomainAnnotation = "pod.beta.kubernetes.io/subdomain" -) - -// FindPort locates the container port for the given pod and portName. If the -// targetPort is a number, use that. If the targetPort is a string, look that -// string up in all named ports in all containers in the target pod. If no -// match is found, fail. -func FindPort(pod *api.Pod, svcPort *api.ServicePort) (int, error) { - portName := svcPort.TargetPort - switch portName.Type { - case intstr.String: - name := portName.StrVal - for _, container := range pod.Spec.Containers { - for _, port := range container.Ports { - if port.Name == name && port.Protocol == svcPort.Protocol { - return int(port.ContainerPort), nil - } - } - } - case intstr.Int: - return portName.IntValue(), nil - } - - return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/pod_example.json b/vendor/k8s.io/kubernetes/pkg/api/pod_example.json deleted file mode 100644 index 8284240a2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/pod_example.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "etcd-server-e2e-test-wojtekt-master", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/pods/etcd-server-e2e-test-wojtekt-master", - "uid": "a671734a-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "22", - "creationTimestamp": "2015-04-22T11:49:36Z", - "annotations": { - "kubernetes.io/config.mirror": "mirror", - "kubernetes.io/config.source": "file" - } - }, - "spec": { - "volumes": [ - { - "name": "varetcd", - "hostPath": { - "path": "/mnt/master-pd/var/etcd" - }, - "emptyDir": null, - "gcePersistentDisk": null, - "awsElasticBlockStore": null, - "gitRepo": null, - "secret": null, - "nfs": null, - "iscsi": null, - "glusterfs": null - } - ], - "containers": [ - { - "name": "etcd-container", - "image": "gcr.io/google_containers/etcd:2.0.9", - "command": [ - "/usr/local/bin/etcd", - "--addr", - "127.0.0.1:4001", - "--bind-addr", - "127.0.0.1:4001", - "--data-dir", - "/var/etcd/data" - ], - "ports": [ - { - "name": "serverport", - "hostPort": 2380, - "containerPort": 2380, - "protocol": "TCP" - }, - { - "name": "clientport", - "hostPort": 4001, - "containerPort": 4001, - "protocol": "TCP" - } - ], - "resources": {}, - "volumeMounts": [ - { - "name": "varetcd", - "mountPath": "/var/etcd" - } - ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {} - } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst", - "nodeName": "e2e-test-wojtekt-master", - "hostNetwork": true - }, - "status": { - "phase": "Running", - "conditions": [ - { - "type": "Ready", - "status": "True" - } - ], - "containerStatuses": [ - { - "name": "etcd-container", - "state": { - "running": { - "startedAt": "2015-04-22T11:49:32Z" - } - }, - "lastState": {}, - "ready": true, - "restartCount": 0, - "image": "gcr.io/google_containers/etcd:2.0.9", - "imageID": "docker://b6b9a86dc06aa1361357ca1b105feba961f6a4145adca6c54e142c0be0fe87b0", - "containerID": "docker://3cbbf818f1addfc252957b4504f56ef2907a313fe6afc47fc75373674255d46d" - } - ] - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/ref.go b/vendor/k8s.io/kubernetes/pkg/api/ref.go deleted file mode 100644 index 08dede071..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/ref.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "errors" - "fmt" - "net/url" - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -var ( - // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") -) - -// GetReference returns an ObjectReference which refers to the given -// object, or an error if the object doesn't follow the conventions -// that would allow this. -// TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(obj runtime.Object) (*ObjectReference, error) { - if obj == nil { - return nil, ErrNilObject - } - if ref, ok := obj.(*ObjectReference); ok { - // Don't make a reference to a reference. - return ref, nil - } - meta, err := meta.Accessor(obj) - if err != nil { - return nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := Scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := meta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) - if err != nil { - return nil, err - } - // example paths: /<prefix>/<version>/* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 3 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - version = parts[2] - } - - return &ObjectReference{ - Kind: kind, - APIVersion: version, - Name: meta.GetName(), - Namespace: meta.GetNamespace(), - UID: meta.GetUID(), - ResourceVersion: meta.GetResourceVersion(), - }, nil -} - -// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(obj runtime.Object, fieldPath string) (*ObjectReference, error) { - ref, err := GetReference(obj) - if err != nil { - return nil, err - } - ref.FieldPath = fieldPath - return ref, nil -} - -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that -// intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *ObjectReference) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/api/register.go deleted file mode 100644 index 631a7f958..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer" -) - -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -var Scheme = runtime.NewScheme() - -// Codecs provides access to encoding and decoding for the scheme -var Codecs = serializer.NewCodecFactory(Scheme) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Unversiond is group version for unversioned API objects -// TODO: this should be v1 probably -var Unversioned = unversioned.GroupVersion{Group: "", Version: "v1"} - -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - if err := Scheme.AddIgnoredConversionType(&unversioned.TypeMeta{}, &unversioned.TypeMeta{}); err != nil { - panic(err) - } - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &DeleteOptions{}, - &ListOptions{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - // Register Unversioned types under their own special group - Scheme.AddUnversionedTypes(Unversioned, - &unversioned.ExportOptions{}, - &unversioned.Status{}, - &unversioned.APIVersions{}, - &unversioned.APIGroupList{}, - &unversioned.APIGroup{}, - &unversioned.APIResourceList{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json b/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json deleted file mode 100644 index 5c3c4fe71..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/replication_controller_example.json +++ /dev/null @@ -1,82 +0,0 @@ -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "elasticsearch-logging-controller", - "namespace": "default", - "selfLink": "/api/v1/namespaces/default/replicationcontrollers/elasticsearch-logging-controller", - "uid": "aa76f162-e8e5-11e4-8fde-42010af09327", - "resourceVersion": "98", - "creationTimestamp": "2015-04-22T11:49:43Z", - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "replicas": 1, - "selector": { - "name": "elasticsearch-logging" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "kubernetes.io/cluster-service": "true", - "name": "elasticsearch-logging" - } - }, - "spec": { - "volumes": [ - { - "name": "es-persistent-storage", - "hostPath": null, - "emptyDir": { - "medium": "" - }, - "gcePersistentDisk": null, - "awsElasticBlockStore": null, - "gitRepo": null, - "secret": null, - "nfs": null, - "iscsi": null, - "glusterfs": null - } - ], - "containers": [ - { - "name": "elasticsearch-logging", - "image": "gcr.io/google_containers/elasticsearch:1.0", - "ports": [ - { - "name": "db", - "containerPort": 9200, - "protocol": "TCP" - }, - { - "name": "transport", - "containerPort": 9300, - "protocol": "TCP" - } - ], - "resources": {}, - "volumeMounts": [ - { - "name": "es-persistent-storage", - "mountPath": "/data" - } - ], - "terminationMessagePath": "/dev/termination-log", - "imagePullPolicy": "IfNotPresent", - "capabilities": {} - } - ], - "restartPolicy": "Always", - "dnsPolicy": "ClusterFirst" - } - } - }, - "status": { - "replicas": 1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go b/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go deleted file mode 100644 index 2c27d6862..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/requestcontext.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "errors" - "net/http" - "sync" -) - -// RequestContextMapper keeps track of the context associated with a particular request -type RequestContextMapper interface { - // Get returns the context associated with the given request (if any), and true if the request has an associated context, and false if it does not. - Get(req *http.Request) (Context, bool) - // Update maps the request to the given context. If no context was previously associated with the request, an error is returned. - // Update should only be called with a descendant context of the previously associated context. - // Updating to an unrelated context may return an error in the future. - // The context associated with a request should only be updated by a limited set of callers. - // Valid examples include the authentication layer, or an audit/tracing layer. - Update(req *http.Request, context Context) error -} - -type requestContextMap struct { - contexts map[*http.Request]Context - lock sync.Mutex -} - -// NewRequestContextMapper returns a new RequestContextMapper. -// The returned mapper must be added as a request filter using NewRequestContextFilter. -func NewRequestContextMapper() RequestContextMapper { - return &requestContextMap{ - contexts: make(map[*http.Request]Context), - } -} - -// Get returns the context associated with the given request (if any), and true if the request has an associated context, and false if it does not. -// Get will only return a valid context when called from inside the filter chain set up by NewRequestContextFilter() -func (c *requestContextMap) Get(req *http.Request) (Context, bool) { - c.lock.Lock() - defer c.lock.Unlock() - context, ok := c.contexts[req] - return context, ok -} - -// Update maps the request to the given context. -// If no context was previously associated with the request, an error is returned and the context is ignored. -func (c *requestContextMap) Update(req *http.Request, context Context) error { - c.lock.Lock() - defer c.lock.Unlock() - if _, ok := c.contexts[req]; !ok { - return errors.New("No context associated") - } - // TODO: ensure the new context is a descendant of the existing one - c.contexts[req] = context - return nil -} - -// init maps the request to the given context and returns true if there was no context associated with the request already. -// if a context was already associated with the request, it ignores the given context and returns false. -// init is intentionally unexported to ensure that all init calls are paired with a remove after a request is handled -func (c *requestContextMap) init(req *http.Request, context Context) bool { - c.lock.Lock() - defer c.lock.Unlock() - if _, exists := c.contexts[req]; exists { - return false - } - c.contexts[req] = context - return true -} - -// remove is intentionally unexported to ensure that the context is not removed until a request is handled -func (c *requestContextMap) remove(req *http.Request) { - c.lock.Lock() - defer c.lock.Unlock() - delete(c.contexts, req) -} - -// NewRequestContextFilter ensures there is a Context object associated with the request before calling the passed handler. -// After the passed handler runs, the context is cleaned up. -func NewRequestContextFilter(mapper RequestContextMapper, handler http.Handler) (http.Handler, error) { - if mapper, ok := mapper.(*requestContextMap); ok { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if mapper.init(req, NewContext()) { - // If we were the ones to successfully initialize, pair with a remove - defer mapper.remove(req) - } - handler.ServeHTTP(w, req) - }), nil - } else { - return handler, errors.New("Unknown RequestContextMapper implementation.") - } - -} - -// IsEmpty returns true if there are no contexts registered, or an error if it could not be determined. Intended for use by tests. -func IsEmpty(requestsToContexts RequestContextMapper) (bool, error) { - if requestsToContexts, ok := requestsToContexts.(*requestContextMap); ok { - return len(requestsToContexts.contexts) == 0, nil - } - return true, errors.New("Unknown RequestContextMapper implementation") -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/deep_copy.go b/vendor/k8s.io/kubernetes/pkg/api/resource/deep_copy.go deleted file mode 100644 index 4efc0406f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/deep_copy.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - inf "gopkg.in/inf.v0" - - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func DeepCopy_resource_Quantity(in Quantity, out *Quantity, c *conversion.Cloner) error { - *out = in - if in.d.Dec != nil { - tmp := &inf.Dec{} - out.d.Dec = tmp.Set(in.d.Dec) - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go deleted file mode 100644 index cf9447a32..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.pb.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/resource/generated.proto -// DO NOT EDIT! - -/* - Package resource is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/api/resource/generated.proto - - It has these top-level messages: - Quantity -*/ -package resource - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} - -func init() { - proto.RegisterType((*Quantity)(nil), "k8s.io.kubernetes.pkg.api.resource.Quantity") -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto deleted file mode 100644 index e1c2a3d60..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource/generated.proto +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.resource; - -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "resource"; - -// Quantity is a fixed-point representation of a number. -// It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and Int64() accessors. -// -// The serialization format is: -// -// <quantity> ::= <signedNumber><suffix> -// (Note that <suffix> may be empty, from the "" case in <decimalSI>.) -// <digit> ::= 0 | 1 | ... | 9 -// <digits> ::= <digit> | <digit><digits> -// <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> -// <sign> ::= "+" | "-" -// <signedNumber> ::= <number> | <sign><number> -// <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> -// <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei -// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) -// <decimalSI> ::= m | "" | k | M | G | T | P | E -// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) -// <decimalExponent> ::= "e" <signedNumber> | "E" <signedNumber> -// -// No matter which of the three exponent forms is used, no quantity may represent -// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal -// places. Numbers larger or more precise will be capped or rounded up. -// (E.g.: 0.1m will rounded up to 1m.) -// This may be extended in the future if we require larger or smaller quantities. -// -// When a Quantity is parsed from a string, it will remember the type of suffix -// it had, and will use the same type again when it is serialized. -// -// Before serializing, Quantity will be put in "canonical form". -// This means that Exponent/suffix will be adjusted up or down (with a -// corresponding increase or decrease in Mantissa) such that: -// a. No precision is lost -// b. No fractional digits will be emitted -// c. The exponent (or suffix) is as large as possible. -// The sign will be omitted unless the number is negative. -// -// Examples: -// 1.5 will be serialized as "1500m" -// 1.5Gi will be serialized as "1536Mi" -// -// NOTE: We reserve the right to amend this canonical format, perhaps to -// allow 1.5 to be canonical. -// TODO: Remove above disclaimer after all bikeshedding about format is over, -// or after March 2015. -// -// Note that the quantity will NEVER be internally represented by a -// floating point number. That is the whole point of this exercise. -// -// Non-canonical values will still parse as long as they are well formed, -// but will be re-emitted in their canonical form. (So always use canonical -// form, or don't diff.) -// -// This format is intended to make it difficult to use these numbers without -// writing some sort of special handling code in the hopes that that will -// cause implementors to also use a fixed point implementation. -// -// +gencopy=false -// +protobuf=true -// +protobuf.embed=string -// +protobuf.options.marshal=false -// +protobuf.options.(gogoproto.goproto_stringer)=false -message Quantity { - optional string string = 1; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go b/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go deleted file mode 100644 index 4c55b120e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource_helpers.go +++ /dev/null @@ -1,209 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func GetContainerStatus(statuses []ContainerStatus, name string) (ContainerStatus, bool) { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i], true - } - } - return ContainerStatus{}, false -} - -func GetExistingContainerStatus(statuses []ContainerStatus, name string) ContainerStatus { - for i := range statuses { - if statuses[i].Name == name { - return statuses[i] - } - } - return ContainerStatus{} -} - -// IsPodReady returns true if a pod is ready; false otherwise. -func IsPodReady(pod *Pod) bool { - return IsPodReadyConditionTrue(pod.Status) -} - -// IsPodReady retruns true if a pod is ready; false otherwise. -func IsPodReadyConditionTrue(status PodStatus) bool { - condition := GetPodReadyCondition(status) - return condition != nil && condition.Status == ConditionTrue -} - -// Extracts the pod ready condition from the given status and returns that. -// Returns nil if the condition is not present. -func GetPodReadyCondition(status PodStatus) *PodCondition { - _, condition := GetPodCondition(&status, PodReady) - return condition -} - -// GetPodCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the the index of the located condition. -func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// GetNodeCondition extracts the provided condition from the given status and returns that. -// Returns nil and -1 if the condition is not present, and the the index of the located condition. -func GetNodeCondition(status *NodeStatus, conditionType NodeConditionType) (int, *NodeCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the -// status has changed. -// Returns true if pod condition has changed or has been added. -func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool { - condition.LastTransitionTime = unversioned.Now() - // Try to find this pod condition. - conditionIndex, oldCondition := GetPodCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new pod condition. - status.Conditions = append(status.Conditions, *condition) - return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual - } -} - -// IsNodeReady returns true if a node is ready; false otherwise. -func IsNodeReady(node *Node) bool { - for _, c := range node.Status.Conditions { - if c.Type == NodeReady { - return c.Status == ConditionTrue - } - } - return false -} - -// PodRequestsAndLimits returns a dictionary of all defined resources summed up for all -// containers of the pod. -func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, limits map[ResourceName]resource.Quantity, err error) { - reqs, limits = map[ResourceName]resource.Quantity{}, map[ResourceName]resource.Quantity{} - for _, container := range pod.Spec.Containers { - for name, quantity := range container.Resources.Requests { - if value, ok := reqs[name]; !ok { - reqs[name] = *quantity.Copy() - } else { - value.Add(quantity) - reqs[name] = value - } - } - for name, quantity := range container.Resources.Limits { - if value, ok := limits[name]; !ok { - limits[name] = *quantity.Copy() - } else { - value.Add(quantity) - limits[name] = value - } - } - } - // init containers define the minimum of any resource - for _, container := range pod.Spec.InitContainers { - for name, quantity := range container.Resources.Requests { - value, ok := reqs[name] - if !ok { - reqs[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - reqs[name] = *quantity.Copy() - } - } - for name, quantity := range container.Resources.Limits { - value, ok := limits[name] - if !ok { - limits[name] = *quantity.Copy() - continue - } - if quantity.Cmp(value) > 0 { - limits[name] = *quantity.Copy() - } - } - } - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/create.go b/vendor/k8s.io/kubernetes/pkg/api/rest/create.go deleted file mode 100644 index fa95b7f93..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/create.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// RESTCreateStrategy defines the minimum validation, accepted input, and -// name generation behavior to create an object that follows Kubernetes -// API conventions. -type RESTCreateStrategy interface { - runtime.ObjectTyper - // The name generate is used when the standard GenerateName field is set. - // The NameGenerator will be invoked prior to validation. - api.NameGenerator - - // NamespaceScoped returns true if the object must be within a namespace. - NamespaceScoped() bool - // PrepareForCreate is invoked on create before validation to normalize - // the object. For example: remove fields that are not to be persisted, - // sort order-insensitive list fields, etc. This should not remove fields - // whose presence would be considered a validation error. - PrepareForCreate(obj runtime.Object) - // Validate is invoked after default fields in the object have been filled in before - // the object is persisted. This method should not mutate the object. - Validate(ctx api.Context, obj runtime.Object) field.ErrorList - // Canonicalize is invoked after validation has succeeded but before the - // object has been persisted. This method may mutate the object. - Canonicalize(obj runtime.Object) -} - -// BeforeCreate ensures that common operations for all resources are performed on creation. It only returns -// errors that can be converted to api.Status. It invokes PrepareForCreate, then GenerateName, then Validate. -// It returns nil if the object should be created. -func BeforeCreate(strategy RESTCreateStrategy, ctx api.Context, obj runtime.Object) error { - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return kerr - } - - if strategy.NamespaceScoped() { - if !api.ValidNamespace(ctx, objectMeta) { - return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") - } - } else { - objectMeta.Namespace = api.NamespaceNone - } - objectMeta.DeletionTimestamp = nil - objectMeta.DeletionGracePeriodSeconds = nil - strategy.PrepareForCreate(obj) - api.FillObjectMetaSystemFields(ctx, objectMeta) - api.GenerateName(strategy, objectMeta) - - if errs := strategy.Validate(ctx, obj); len(errs) > 0 { - return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) - } - - // Custom validation (including name validation) passed - // Now run common validation on object meta - // Do this *after* custom validation so that specific error messages are shown whenever possible - if errs := validation.ValidateObjectMeta(objectMeta, strategy.NamespaceScoped(), validation.ValidatePathSegmentName, field.NewPath("metadata")); len(errs) > 0 { - return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) - } - - strategy.Canonicalize(obj) - - return nil -} - -// CheckGeneratedNameError checks whether an error that occurred creating a resource is due -// to generation being unable to pick a valid name. -func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error { - if !errors.IsAlreadyExists(err) { - return err - } - - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return kerr - } - - if len(objectMeta.GenerateName) == 0 { - return err - } - - return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0) -} - -// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error. -func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (*api.ObjectMeta, unversioned.GroupVersionKind, error) { - objectMeta, err := api.ObjectMetaFor(obj) - if err != nil { - return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) - } - kinds, _, err := typer.ObjectKinds(obj) - if err != nil { - return nil, unversioned.GroupVersionKind{}, errors.NewInternalError(err) - } - return objectMeta, kinds[0], nil -} - -// NamespaceScopedStrategy has a method to tell if the object must be in a namespace. -type NamespaceScopedStrategy interface { - // NamespaceScoped returns if the object must be in a namespace. - NamespaceScoped() bool -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go b/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go deleted file mode 100644 index 34965d52f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/delete.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "fmt" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// RESTDeleteStrategy defines deletion behavior on an object that follows Kubernetes -// API conventions. -type RESTDeleteStrategy interface { - runtime.ObjectTyper -} - -// RESTGracefulDeleteStrategy must be implemented by the registry that supports -// graceful deletion. -type RESTGracefulDeleteStrategy interface { - // CheckGracefulDelete should return true if the object can be gracefully deleted and set - // any default values on the DeleteOptions. - CheckGracefulDelete(obj runtime.Object, options *api.DeleteOptions) bool -} - -// BeforeDelete tests whether the object can be gracefully deleted. If graceful is set the object -// should be gracefully deleted, if gracefulPending is set the object has already been gracefully deleted -// (and the provided grace period is longer than the time to deletion), and an error is returned if the -// condition cannot be checked or the gracePeriodSeconds is invalid. The options argument may be updated with -// default values if graceful is true. -func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Object, options *api.DeleteOptions) (graceful, gracefulPending bool, err error) { - objectMeta, gvk, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return false, false, kerr - } - // Checking the Preconditions here to fail early. They'll be enforced later on when we actually do the deletion, too. - if options.Preconditions != nil && options.Preconditions.UID != nil && *options.Preconditions.UID != objectMeta.UID { - return false, false, errors.NewConflict(unversioned.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, objectMeta.Name, fmt.Errorf("the UID in the precondition (%s) does not match the UID in record (%s). The object might have been deleted and then recreated", *options.Preconditions.UID, objectMeta.UID)) - } - gracefulStrategy, ok := strategy.(RESTGracefulDeleteStrategy) - if !ok { - return false, false, nil - } - // if the object is already being deleted - if objectMeta.DeletionTimestamp != nil { - // if we are already being deleted, we may only shorten the deletion grace period - // this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set, - // so we force deletion immediately - if objectMeta.DeletionGracePeriodSeconds == nil { - return false, false, nil - } - // only a shorter grace period may be provided by a user - if options.GracePeriodSeconds != nil { - period := int64(*options.GracePeriodSeconds) - if period > *objectMeta.DeletionGracePeriodSeconds { - return false, true, nil - } - now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) - objectMeta.DeletionTimestamp = &now - objectMeta.DeletionGracePeriodSeconds = &period - options.GracePeriodSeconds = &period - return true, false, nil - } - // graceful deletion is pending, do nothing - options.GracePeriodSeconds = objectMeta.DeletionGracePeriodSeconds - return false, true, nil - } - - if !gracefulStrategy.CheckGracefulDelete(obj, options) { - return false, false, nil - } - now := unversioned.NewTime(unversioned.Now().Add(time.Second * time.Duration(*options.GracePeriodSeconds))) - objectMeta.DeletionTimestamp = &now - objectMeta.DeletionGracePeriodSeconds = options.GracePeriodSeconds - return true, false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go b/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go deleted file mode 100644 index 8fed0e9f4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rest defines common logic around changes to Kubernetes resources. -package rest diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/export.go b/vendor/k8s.io/kubernetes/pkg/api/rest/export.go deleted file mode 100644 index e12f65de3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/export.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// RESTExportStrategy is the interface that defines how to export a Kubernetes object -type RESTExportStrategy interface { - // Export strips fields that can not be set by the user. If 'exact' is false - // fields specific to the cluster are also stripped - Export(obj runtime.Object, exact bool) error -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go b/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go deleted file mode 100644 index 4d5b5ba97..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/rest.go +++ /dev/null @@ -1,306 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "io" - "net/http" - "net/url" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -//TODO: -// Storage interfaces need to be separated into two groups; those that operate -// on collections and those that operate on individually named items. -// Collection interfaces: -// (Method: Current -> Proposed) -// GET: Lister -> CollectionGetter -// WATCH: Watcher -> CollectionWatcher -// CREATE: Creater -> CollectionCreater -// DELETE: (n/a) -> CollectionDeleter -// UPDATE: (n/a) -> CollectionUpdater -// -// Single item interfaces: -// (Method: Current -> Proposed) -// GET: Getter -> NamedGetter -// WATCH: (n/a) -> NamedWatcher -// CREATE: (n/a) -> NamedCreater -// DELETE: Deleter -> NamedDeleter -// UPDATE: Update -> NamedUpdater - -// Storage is a generic interface for RESTful storage services. -// Resources which are exported to the RESTful API of apiserver need to implement this interface. It is expected -// that objects may implement any of the below interfaces. -type Storage interface { - // New returns an empty object that can be used with Create and Update after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object -} - -// KindProvider specifies a different kind for its API than for its internal storage. This is necessary for external -// objects that are not compiled into the api server. For such objects, there is no in-memory representation for -// the object, so they must be represented as generic objects (e.g. runtime.Unknown), but when we present the object as part of -// API discovery we want to present the specific kind, not the generic internal representation. -type KindProvider interface { - Kind() string -} - -// Lister is an object that can retrieve resources that match the provided field and label criteria. -type Lister interface { - // NewList returns an empty object that can be used with the List call. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - NewList() runtime.Object - // List selects resources in the storage which match to the selector. 'options' can be nil. - List(ctx api.Context, options *api.ListOptions) (runtime.Object, error) -} - -// Exporter is an object that knows how to strip a RESTful resource for export -type Exporter interface { - // Export an object. Fields that are not user specified (e.g. Status, ObjectMeta.ResourceVersion) are stripped out - // Returns the stripped object. If 'exact' is true, fields that are specific to the cluster (e.g. namespace) are - // retained, otherwise they are stripped also. - Export(ctx api.Context, name string, opts unversioned.ExportOptions) (runtime.Object, error) -} - -// Getter is an object that can retrieve a named RESTful resource. -type Getter interface { - // Get finds a resource in the storage by name and returns it. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - Get(ctx api.Context, name string) (runtime.Object, error) -} - -// GetterWithOptions is an object that retrieve a named RESTful resource and takes -// additional options on the get request. It allows a caller to also receive the -// subpath of the GET request. -type GetterWithOptions interface { - // Get finds a resource in the storage by name and returns it. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - // The options object passed to it is of the same type returned by the NewGetOptions - // method. - Get(ctx api.Context, name string, options runtime.Object) (runtime.Object, error) - - // NewGetOptions returns an empty options object that will be used to pass - // options to the Get method. It may return a bool and a string, if true, the - // value of the request path below the object will be included as the named - // string in the serialization of the runtime object. E.g., returning "path" - // will convert the trailing request scheme value to "path" in the map[string][]string - // passed to the converter. - NewGetOptions() (runtime.Object, bool, string) -} - -// Deleter is an object that can delete a named RESTful resource. -type Deleter interface { - // Delete finds a resource in the storage and deletes it. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - // Delete *may* return the object that was deleted, or a status object indicating additional - // information about deletion. - Delete(ctx api.Context, name string) (runtime.Object, error) -} - -// GracefulDeleter knows how to pass deletion options to allow delayed deletion of a -// RESTful object. -type GracefulDeleter interface { - // Delete finds a resource in the storage and deletes it. - // If options are provided, the resource will attempt to honor them or return an invalid - // request error. - // Although it can return an arbitrary error value, IsNotFound(err) is true for the - // returned error value err when the specified resource is not found. - // Delete *may* return the object that was deleted, or a status object indicating additional - // information about deletion. - Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) -} - -// GracefulDeleteAdapter adapts the Deleter interface to GracefulDeleter -type GracefulDeleteAdapter struct { - Deleter -} - -// Delete implements RESTGracefulDeleter in terms of Deleter -func (w GracefulDeleteAdapter) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { - return w.Deleter.Delete(ctx, name) -} - -// CollectionDeleter is an object that can delete a collection -// of RESTful resources. -type CollectionDeleter interface { - // DeleteCollection selects all resources in the storage matching given 'listOptions' - // and deletes them. If 'options' are provided, the resource will attempt to honor - // them or return an invalid request error. - // DeleteCollection may not be atomic - i.e. it may delete some objects and still - // return an error after it. On success, returns a list of deleted objects. - DeleteCollection(ctx api.Context, options *api.DeleteOptions, listOptions *api.ListOptions) (runtime.Object, error) -} - -// Creater is an object that can create an instance of a RESTful object. -type Creater interface { - // New returns an empty object that can be used with Create after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object - - // Create creates a new version of a resource. - Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) -} - -// NamedCreater is an object that can create an instance of a RESTful object using a name parameter. -type NamedCreater interface { - // New returns an empty object that can be used with Create after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object - - // Create creates a new version of a resource. It expects a name parameter from the path. - // This is needed for create operations on subresources which include the name of the parent - // resource in the path. - Create(ctx api.Context, name string, obj runtime.Object) (runtime.Object, error) -} - -// UpdatedObjectInfo provides information about an updated object to an Updater. -// It requires access to the old object in order to return the newly updated object. -type UpdatedObjectInfo interface { - // Returns preconditions built from the updated object, if applicable. - // May return nil, or a preconditions object containing nil fields, - // if no preconditions can be determined from the updated object. - Preconditions() *api.Preconditions - - // UpdatedObject returns the updated object, given a context and old object. - // The only time an empty oldObj should be passed in is if a "create on update" is occurring (there is no oldObj). - UpdatedObject(ctx api.Context, oldObj runtime.Object) (newObj runtime.Object, err error) -} - -// Updater is an object that can update an instance of a RESTful object. -type Updater interface { - // New returns an empty object that can be used with Update after request data has been put into it. - // This object must be a pointer type for use with Codec.DecodeInto([]byte, runtime.Object) - New() runtime.Object - - // Update finds a resource in the storage and updates it. Some implementations - // may allow updates creates the object - they should set the created boolean - // to true. - Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) -} - -// CreaterUpdater is a storage object that must support both create and update. -// Go prevents embedded interfaces that implement the same method. -type CreaterUpdater interface { - Creater - Update(ctx api.Context, name string, objInfo UpdatedObjectInfo) (runtime.Object, bool, error) -} - -// CreaterUpdater must satisfy the Updater interface. -var _ Updater = CreaterUpdater(nil) - -// Patcher is a storage object that supports both get and update. -type Patcher interface { - Getter - Updater -} - -// Watcher should be implemented by all Storage objects that -// want to offer the ability to watch for changes through the watch api. -type Watcher interface { - // 'label' selects on labels; 'field' selects on the object's fields. Not all fields - // are supported; an error should be returned if 'field' tries to select on a field that - // isn't supported. 'resourceVersion' allows for continuing/starting a watch at a - // particular version. - Watch(ctx api.Context, options *api.ListOptions) (watch.Interface, error) -} - -// StandardStorage is an interface covering the common verbs. Provided for testing whether a -// resource satisfies the normal storage methods. Use Storage when passing opaque storage objects. -type StandardStorage interface { - Getter - Lister - CreaterUpdater - GracefulDeleter - CollectionDeleter - Watcher -} - -// Redirector know how to return a remote resource's location. -type Redirector interface { - // ResourceLocation should return the remote location of the given resource, and an optional transport to use to request it, or an error. - ResourceLocation(ctx api.Context, id string) (remoteLocation *url.URL, transport http.RoundTripper, err error) -} - -// Responder abstracts the normal response behavior for a REST method and is passed to callers that -// may wish to handle the response directly in some cases, but delegate to the normal error or object -// behavior in other cases. -type Responder interface { - // Object writes the provided object to the response. Invoking this method multiple times is undefined. - Object(statusCode int, obj runtime.Object) - // Error writes the provided error to the response. This method may only be invoked once. - Error(err error) -} - -// Connecter is a storage object that responds to a connection request. -type Connecter interface { - // Connect returns an http.Handler that will handle the request/response for a given API invocation. - // The provided responder may be used for common API responses. The responder will write both status - // code and body, so the ServeHTTP method should exit after invoking the responder. The Handler will - // be used for a single API request and then discarded. The Responder is guaranteed to write to the - // same http.ResponseWriter passed to ServeHTTP. - Connect(ctx api.Context, id string, options runtime.Object, r Responder) (http.Handler, error) - - // NewConnectOptions returns an empty options object that will be used to pass - // options to the Connect method. If nil, then a nil options object is passed to - // Connect. It may return a bool and a string. If true, the value of the request - // path below the object will be included as the named string in the serialization - // of the runtime object. - NewConnectOptions() (runtime.Object, bool, string) - - // ConnectMethods returns the list of HTTP methods handled by Connect - ConnectMethods() []string -} - -// ResourceStreamer is an interface implemented by objects that prefer to be streamed from the server -// instead of decoded directly. -type ResourceStreamer interface { - // InputStream should return an io.ReadCloser if the provided object supports streaming. The desired - // api version and a accept header (may be empty) are passed to the call. If no error occurs, - // the caller may return a flag indicating whether the result should be flushed as writes occur - // and a content type string that indicates the type of the stream. - // If a null stream is returned, a StatusNoContent response wil be generated. - InputStream(apiVersion, acceptHeader string) (stream io.ReadCloser, flush bool, mimeType string, err error) -} - -// StorageMetadata is an optional interface that callers can implement to provide additional -// information about their Storage objects. -type StorageMetadata interface { - // ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE, - // PATCH) can respond with. - ProducesMIMETypes(verb string) []string -} - -// ConnectRequest is an object passed to admission control for Connect operations -type ConnectRequest struct { - // Name is the name of the object on which the connect request was made - Name string - - // Options is the options object passed to the connect request. See the NewConnectOptions method on Connecter - Options runtime.Object - - // ResourcePath is the path for the resource in the REST server (ie. "pods/proxy") - ResourcePath string -} - -func (obj *ConnectRequest) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/types.go b/vendor/k8s.io/kubernetes/pkg/api/rest/types.go deleted file mode 100644 index 0e7f048ba..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/types.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// ObjectFunc is a function to act on a given object. An error may be returned -// if the hook cannot be completed. An ObjectFunc may transform the provided -// object. -type ObjectFunc func(obj runtime.Object) error - -// AllFuncs returns an ObjectFunc that attempts to run all of the provided functions -// in order, returning early if there are any errors. -func AllFuncs(fns ...ObjectFunc) ObjectFunc { - return func(obj runtime.Object) error { - for _, fn := range fns { - if fn == nil { - continue - } - if err := fn(obj); err != nil { - return err - } - } - return nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/rest/update.go b/vendor/k8s.io/kubernetes/pkg/api/rest/update.go deleted file mode 100644 index bc5ed0c5f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/rest/update.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rest - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// RESTUpdateStrategy defines the minimum validation, accepted input, and -// name generation behavior to update an object that follows Kubernetes -// API conventions. A resource may have many UpdateStrategies, depending on -// the call pattern in use. -type RESTUpdateStrategy interface { - runtime.ObjectTyper - // NamespaceScoped returns true if the object must be within a namespace. - NamespaceScoped() bool - // AllowCreateOnUpdate returns true if the object can be created by a PUT. - AllowCreateOnUpdate() bool - // PrepareForUpdate is invoked on update before validation to normalize - // the object. For example: remove fields that are not to be persisted, - // sort order-insensitive list fields, etc. This should not remove fields - // whose presence would be considered a validation error. - PrepareForUpdate(obj, old runtime.Object) - // ValidateUpdate is invoked after default fields in the object have been - // filled in before the object is persisted. This method should not mutate - // the object. - ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList - // Canonicalize is invoked after validation has succeeded but before the - // object has been persisted. This method may mutate the object. - Canonicalize(obj runtime.Object) - // AllowUnconditionalUpdate returns true if the object can be updated - // unconditionally (irrespective of the latest resource version), when - // there is no resource version specified in the object. - AllowUnconditionalUpdate() bool -} - -// TODO: add other common fields that require global validation. -func validateCommonFields(obj, old runtime.Object) (field.ErrorList, error) { - allErrs := field.ErrorList{} - objectMeta, err := api.ObjectMetaFor(obj) - if err != nil { - return nil, fmt.Errorf("failed to get new object metadata: %v", err) - } - oldObjectMeta, err := api.ObjectMetaFor(old) - if err != nil { - return nil, fmt.Errorf("failed to get old object metadata: %v", err) - } - allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(objectMeta, oldObjectMeta, field.NewPath("metadata"))...) - - return allErrs, nil -} - -// BeforeUpdate ensures that common operations for all resources are performed on update. It only returns -// errors that can be converted to api.Status. It will invoke update validation with the provided existing -// and updated objects. -func BeforeUpdate(strategy RESTUpdateStrategy, ctx api.Context, obj, old runtime.Object) error { - objectMeta, kind, kerr := objectMetaAndKind(strategy, obj) - if kerr != nil { - return kerr - } - if strategy.NamespaceScoped() { - if !api.ValidNamespace(ctx, objectMeta) { - return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request") - } - } else { - objectMeta.Namespace = api.NamespaceNone - } - - strategy.PrepareForUpdate(obj, old) - - // Ensure some common fields, like UID, are validated for all resources. - errs, err := validateCommonFields(obj, old) - if err != nil { - return errors.NewInternalError(err) - } - - errs = append(errs, strategy.ValidateUpdate(ctx, obj, old)...) - if len(errs) > 0 { - return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs) - } - - strategy.Canonicalize(obj) - - return nil -} - -// TransformFunc is a function to transform and return newObj -type TransformFunc func(ctx api.Context, newObj runtime.Object, oldObj runtime.Object) (transformedNewObj runtime.Object, err error) - -// defaultUpdatedObjectInfo implements UpdatedObjectInfo -type defaultUpdatedObjectInfo struct { - // obj is the updated object - obj runtime.Object - - // copier makes a copy of the object before returning it. - // this allows repeated calls to UpdatedObject() to return - // pristine data, even if the returned value is mutated. - copier runtime.ObjectCopier - - // transformers is an optional list of transforming functions that modify or - // replace obj using information from the context, old object, or other sources. - transformers []TransformFunc -} - -// DefaultUpdatedObjectInfo returns an UpdatedObjectInfo impl based on the specified object. -func DefaultUpdatedObjectInfo(obj runtime.Object, copier runtime.ObjectCopier, transformers ...TransformFunc) UpdatedObjectInfo { - return &defaultUpdatedObjectInfo{obj, copier, transformers} -} - -// Preconditions satisfies the UpdatedObjectInfo interface. -func (i *defaultUpdatedObjectInfo) Preconditions() *api.Preconditions { - // Attempt to get the UID out of the object - accessor, err := meta.Accessor(i.obj) - if err != nil { - // If no UID can be read, no preconditions are possible - return nil - } - - // If empty, no preconditions needed - uid := accessor.GetUID() - if len(uid) == 0 { - return nil - } - - return &api.Preconditions{UID: &uid} -} - -// UpdatedObject satisfies the UpdatedObjectInfo interface. -// It returns a copy of the held obj, passed through any configured transformers. -func (i *defaultUpdatedObjectInfo) UpdatedObject(ctx api.Context, oldObj runtime.Object) (runtime.Object, error) { - var err error - // Start with the configured object - newObj := i.obj - - // If the original is non-nil (might be nil if the first transformer builds the object from the oldObj), make a copy, - // so we don't return the original. BeforeUpdate can mutate the returned object, doing things like clearing ResourceVersion. - // If we're re-called, we need to be able to return the pristine version. - if newObj != nil { - newObj, err = i.copier.Copy(newObj) - if err != nil { - return nil, err - } - } - - // Allow any configured transformers to update the new object - for _, transformer := range i.transformers { - newObj, err = transformer(ctx, newObj, oldObj) - if err != nil { - return nil, err - } - } - - return newObj, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go b/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go deleted file mode 100644 index 9d57fa4c2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/service/annotations.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package service - -const ( - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go deleted file mode 100644 index b6611d237..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package service - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" - netsets "k8s.io/kubernetes/pkg/util/net/sets" -) - -const ( - defaultLoadBalancerSourceRanges = "0.0.0.0/0" -) - -// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 -func IsAllowAll(ipnets netsets.IPNet) bool { - for _, s := range ipnets.StringSlice() { - if s == "0.0.0.0/0" { - return true - } - } - return false -} - -// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. -// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, -// extracting the source ranges to allow, and if not present returns a default (allow-all) value. -func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) { - var ipnets netsets.IPNet - var err error - // if SourceRange field is specified, ignore sourceRange annotation - if len(service.Spec.LoadBalancerSourceRanges) > 0 { - specs := service.Spec.LoadBalancerSourceRanges - ipnets, err = netsets.ParseIPNets(specs...) - - if err != nil { - return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) - } - } else { - val := service.Annotations[AnnotationLoadBalancerSourceRangesKey] - val = strings.TrimSpace(val) - if val == "" { - val = defaultLoadBalancerSourceRanges - } - specs := strings.Split(val, ",") - ipnets, err = netsets.ParseIPNets(specs...) - if err != nil { - return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", AnnotationLoadBalancerSourceRangesKey, val) - } - } - return ipnets, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.generated.go b/vendor/k8s.io/kubernetes/pkg/api/types.generated.go deleted file mode 100644 index b2dcd4afb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/types.generated.go +++ /dev/null @@ -1,59756 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package api - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/kubernetes/pkg/api/resource" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg6_fields "k8s.io/kubernetes/pkg/fields" - pkg5_labels "k8s.io/kubernetes/pkg/labels" - pkg7_runtime "k8s.io/kubernetes/pkg/runtime" - pkg1_types "k8s.io/kubernetes/pkg/types" - pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_resource.Quantity - var v1 pkg2_unversioned.Time - var v2 pkg6_fields.Selector - var v3 pkg5_labels.Selector - var v4 pkg7_runtime.Object - var v5 pkg1_types.UID - var v6 pkg4_intstr.IntOrString - var v7 time.Time - _, _, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6, v7 - } -} - -func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.GenerateName != "" - yyq2[2] = x.Namespace != "" - yyq2[3] = x.SelfLink != "" - yyq2[4] = x.UID != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.Generation != 0 - yyq2[7] = true - yyq2[8] = x.DeletionTimestamp != nil - yyq2[9] = x.DeletionGracePeriodSeconds != nil - yyq2[10] = len(x.Labels) != 0 - yyq2[11] = len(x.Annotations) != 0 - yyq2[12] = len(x.OwnerReferences) != 0 - yyq2[13] = len(x.Finalizers) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.CreationTimestamp - yym26 := z.EncBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.EncExt(yy25) { - } else if yym26 { - z.EncBinaryMarshal(yy25) - } else if !yym26 && z.IsJSONHandle() { - z.EncJSONMarshal(yy25) - } else { - z.EncFallback(yy25) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.CreationTimestamp - yym28 := z.EncBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.EncExt(yy27) { - } else if yym28 { - z.EncBinaryMarshal(yy27) - } else if !yym28 && z.IsJSONHandle() { - z.EncJSONMarshal(yy27) - } else { - z.EncFallback(yy27) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym30 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym30 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym31 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy33 := *x.DeletionGracePeriodSeconds - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeInt(int64(yy33)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy35 := *x.DeletionGracePeriodSeconds - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(yy35)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} - } else { - yyv11 := &x.CreationTimestamp - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if yym12 { - z.DecBinaryUnmarshal(yyv11) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - case "deletionTimestamp": - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym14 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv17 := &x.Labels - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecMapStringStringX(yyv17, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv19 := &x.Annotations - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecMapStringStringX(yyv19, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv21 := &x.OwnerReferences - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv21), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv23 := &x.Finalizers - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecSliceStringX(yyv23, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj25 int - var yyb25 bool - var yyhl25 bool = l >= 0 - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} - } else { - yyv33 := &x.CreationTimestamp - yym34 := z.DecBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.DecExt(yyv33) { - } else if yym34 { - z.DecBinaryUnmarshal(yyv33) - } else if !yym34 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv33) - } else { - z.DecFallback(yyv33, false) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) - } - yym36 := z.DecBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym36 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym36 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv39 := &x.Labels - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - z.F.DecMapStringStringX(yyv39, false, d) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv41 := &x.Annotations - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - z.F.DecMapStringStringX(yyv41, false, d) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv43 := &x.OwnerReferences - yym44 := z.DecBinary() - _ = yym44 - if false { - } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv43), d) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv45 := &x.Finalizers - yym46 := z.DecBinary() - _ = yym46 - if false { - } else { - z.F.DecSliceStringX(yyv45, false, d) - } - } - for { - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj25-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [21]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil - yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil - yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil - yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil - yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil - yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil - yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil - yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil - yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil - yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil - yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil - yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil - yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil - yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil - yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil - yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(21) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - var yyn6 bool - if x.VolumeSource.HostPath == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn9 bool - if x.VolumeSource.EmptyDir == nil { - yyn9 = true - goto LABEL9 - } - LABEL9: - if yyr2 || yy2arr2 { - if yyn9 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn9 { - r.EncodeNil() - } else { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - } - var yyn12 bool - if x.VolumeSource.GCEPersistentDisk == nil { - yyn12 = true - goto LABEL12 - } - LABEL12: - if yyr2 || yy2arr2 { - if yyn12 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn12 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn15 bool - if x.VolumeSource.AWSElasticBlockStore == nil { - yyn15 = true - goto LABEL15 - } - LABEL15: - if yyr2 || yy2arr2 { - if yyn15 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn15 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn18 bool - if x.VolumeSource.GitRepo == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.VolumeSource.Secret == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.VolumeSource.NFS == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn27 bool - if x.VolumeSource.ISCSI == nil { - yyn27 = true - goto LABEL27 - } - LABEL27: - if yyr2 || yy2arr2 { - if yyn27 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn27 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn30 bool - if x.VolumeSource.Glusterfs == nil { - yyn30 = true - goto LABEL30 - } - LABEL30: - if yyr2 || yy2arr2 { - if yyn30 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn30 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn33 bool - if x.VolumeSource.PersistentVolumeClaim == nil { - yyn33 = true - goto LABEL33 - } - LABEL33: - if yyr2 || yy2arr2 { - if yyn33 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn33 { - r.EncodeNil() - } else { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - } - var yyn36 bool - if x.VolumeSource.RBD == nil { - yyn36 = true - goto LABEL36 - } - LABEL36: - if yyr2 || yy2arr2 { - if yyn36 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn36 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn39 bool - if x.VolumeSource.FlexVolume == nil { - yyn39 = true - goto LABEL39 - } - LABEL39: - if yyr2 || yy2arr2 { - if yyn39 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn39 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn42 bool - if x.VolumeSource.Cinder == nil { - yyn42 = true - goto LABEL42 - } - LABEL42: - if yyr2 || yy2arr2 { - if yyn42 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn42 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn45 bool - if x.VolumeSource.CephFS == nil { - yyn45 = true - goto LABEL45 - } - LABEL45: - if yyr2 || yy2arr2 { - if yyn45 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn45 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn48 bool - if x.VolumeSource.Flocker == nil { - yyn48 = true - goto LABEL48 - } - LABEL48: - if yyr2 || yy2arr2 { - if yyn48 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn48 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn51 bool - if x.VolumeSource.DownwardAPI == nil { - yyn51 = true - goto LABEL51 - } - LABEL51: - if yyr2 || yy2arr2 { - if yyn51 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn51 { - r.EncodeNil() - } else { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - } - var yyn54 bool - if x.VolumeSource.FC == nil { - yyn54 = true - goto LABEL54 - } - LABEL54: - if yyr2 || yy2arr2 { - if yyn54 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn54 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn57 bool - if x.VolumeSource.AzureFile == nil { - yyn57 = true - goto LABEL57 - } - LABEL57: - if yyr2 || yy2arr2 { - if yyn57 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn57 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn60 bool - if x.VolumeSource.ConfigMap == nil { - yyn60 = true - goto LABEL60 - } - LABEL60: - if yyr2 || yy2arr2 { - if yyn60 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn60 { - r.EncodeNil() - } else { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - } - var yyn63 bool - if x.VolumeSource.VsphereVolume == nil { - yyn63 = true - goto LABEL63 - } - LABEL63: - if yyr2 || yy2arr2 { - if yyn63 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[20] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn63 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPath": - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "flexVolume": - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj25 int - var yyb25 bool - var yyhl25 bool = l >= 0 - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj25-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [20]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HostPath != nil - yyq2[1] = x.EmptyDir != nil - yyq2[2] = x.GCEPersistentDisk != nil - yyq2[3] = x.AWSElasticBlockStore != nil - yyq2[4] = x.GitRepo != nil - yyq2[5] = x.Secret != nil - yyq2[6] = x.NFS != nil - yyq2[7] = x.ISCSI != nil - yyq2[8] = x.Glusterfs != nil - yyq2[9] = x.PersistentVolumeClaim != nil - yyq2[10] = x.RBD != nil - yyq2[11] = x.FlexVolume != nil - yyq2[12] = x.Cinder != nil - yyq2[13] = x.CephFS != nil - yyq2[14] = x.Flocker != nil - yyq2[15] = x.DownwardAPI != nil - yyq2[16] = x.FC != nil - yyq2[17] = x.AzureFile != nil - yyq2[18] = x.ConfigMap != nil - yyq2[19] = x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(20) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.GCEPersistentDisk != nil - yyq2[1] = x.AWSElasticBlockStore != nil - yyq2[2] = x.HostPath != nil - yyq2[3] = x.Glusterfs != nil - yyq2[4] = x.NFS != nil - yyq2[5] = x.RBD != nil - yyq2[6] = x.ISCSI != nil - yyq2[7] = x.FlexVolume != nil - yyq2[8] = x.Cinder != nil - yyq2[9] = x.CephFS != nil - yyq2[10] = x.FC != nil - yyq2[11] = x.Flocker != nil - yyq2[12] = x.AzureFile != nil - yyq2[13] = x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "claimName": - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - x.ClaimName = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - x.ClaimName = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [18]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.AccessModes) != 0 - yyq2[2] = x.ClaimRef != nil - yyq2[3] = x.PersistentVolumeReclaimPolicy != "" - yyq2[4] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq2[5] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq2[6] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil - yyq2[7] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq2[8] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil - yyq2[9] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil - yyq2[10] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil - yyq2[11] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq2[12] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil - yyq2[13] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil - yyq2[14] = x.PersistentVolumeSource.FC != nil && x.FC != nil - yyq2[15] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil - yyq2[16] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil - yyq2[17] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(18) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } - } - var yyn15 bool - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - yyn15 = true - goto LABEL15 - } - LABEL15: - if yyr2 || yy2arr2 { - if yyn15 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn15 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn18 bool - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.PersistentVolumeSource.HostPath == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.PersistentVolumeSource.Glusterfs == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn27 bool - if x.PersistentVolumeSource.NFS == nil { - yyn27 = true - goto LABEL27 - } - LABEL27: - if yyr2 || yy2arr2 { - if yyn27 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn27 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn30 bool - if x.PersistentVolumeSource.RBD == nil { - yyn30 = true - goto LABEL30 - } - LABEL30: - if yyr2 || yy2arr2 { - if yyn30 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn30 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn33 bool - if x.PersistentVolumeSource.ISCSI == nil { - yyn33 = true - goto LABEL33 - } - LABEL33: - if yyr2 || yy2arr2 { - if yyn33 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn33 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn36 bool - if x.PersistentVolumeSource.FlexVolume == nil { - yyn36 = true - goto LABEL36 - } - LABEL36: - if yyr2 || yy2arr2 { - if yyn36 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn36 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn39 bool - if x.PersistentVolumeSource.Cinder == nil { - yyn39 = true - goto LABEL39 - } - LABEL39: - if yyr2 || yy2arr2 { - if yyn39 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn39 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn42 bool - if x.PersistentVolumeSource.CephFS == nil { - yyn42 = true - goto LABEL42 - } - LABEL42: - if yyr2 || yy2arr2 { - if yyn42 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn42 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn45 bool - if x.PersistentVolumeSource.FC == nil { - yyn45 = true - goto LABEL45 - } - LABEL45: - if yyr2 || yy2arr2 { - if yyn45 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn45 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn48 bool - if x.PersistentVolumeSource.Flocker == nil { - yyn48 = true - goto LABEL48 - } - LABEL48: - if yyr2 || yy2arr2 { - if yyn48 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn48 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn51 bool - if x.PersistentVolumeSource.AzureFile == nil { - yyn51 = true - goto LABEL51 - } - LABEL51: - if yyr2 || yy2arr2 { - if yyn51 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn51 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn54 bool - if x.PersistentVolumeSource.VsphereVolume == nil { - yyn54 = true - goto LABEL54 - } - LABEL54: - if yyr2 || yy2arr2 { - if yyn54 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn54 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv5 := &x.AccessModes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) - } - } - case "claimRef": - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - case "persistentVolumeReclaimPolicy": - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) - } - case "gcePersistentDisk": - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "iscsi": - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "flexVolume": - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "azureFile": - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv24 := &x.Capacity - yyv24.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv25 := &x.AccessModes - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv25), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) - } - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = x.Message != "" - yyq2[2] = x.Reason != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.AccessModes) != 0 - yyq2[1] = x.Selector != nil - yyq2[2] = true - yyq2[3] = x.VolumeName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Resources - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Resources - yy12.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv4 := &x.AccessModes - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv8 := &x.Resources - yyv8.CodecDecodeSelf(d) - } - case "volumeName": - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - x.VolumeName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv11 := &x.AccessModes - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv11), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv15 := &x.Resources - yyv15.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - x.VolumeName = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = len(x.AccessModes) != 0 - yyq2[2] = len(x.Capacity) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv5 := &x.AccessModes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) - } - } - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv7 := &x.Capacity - yyv7.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv10 := &x.AccessModes - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv12 := &x.Capacity - yyv12.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Medium != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Medium.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("medium")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Medium.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "medium": - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - x.Medium = StorageMedium(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - x.Medium = StorageMedium(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.Partition != 0 - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pdName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "pdName": - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - x.PDName = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - x.PDName = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.TargetPortal != "" - yyq2[1] = x.IQN != "" - yyq2[2] = x.Lun != 0 - yyq2[3] = x.ISCSIInterface != "" - yyq2[4] = x.FSType != "" - yyq2[5] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iqn")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetPortal": - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - x.TargetPortal = string(r.DecodeString()) - } - case "iqn": - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - x.IQN = string(r.DecodeString()) - } - case "lun": - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - x.Lun = int32(r.DecodeInt(32)) - } - case "iscsiInterface": - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - x.ISCSIInterface = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - x.TargetPortal = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - x.IQN = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - x.Lun = int32(r.DecodeInt(32)) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - x.ISCSIInterface = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.FSType != "" - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy7 := *x.Lun - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy9 := *x.Lun - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetWWNs": - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv4 := &x.TargetWWNs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "lun": - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv11 := &x.TargetWWNs - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.SecretRef != nil - yyq2[3] = x.ReadOnly != false - yyq2[4] = len(x.Options) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("driver")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Options == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("options")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Options == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "driver": - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - x.Driver = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "options": - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv8 := &x.Options - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecMapStringStringX(yyv8, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - x.Driver = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv15 := &x.Options - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecMapStringStringX(yyv15, false, d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.Partition != 0 - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Revision != "" - yyq2[2] = x.Directory != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("repository")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("directory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "repository": - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - x.Repository = string(r.DecodeString()) - } - case "revision": - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - x.Revision = string(r.DecodeString()) - } - case "directory": - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - x.Directory = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - x.Repository = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - x.Revision = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - x.Directory = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.SecretName != "" - yyq2[1] = len(x.Items) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv5 := &x.Items - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv9 := &x.Items - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("server")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "server": - if r.TryDecodeAsNil() { - x.Server = "" - } else { - x.Server = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Server = "" - } else { - x.Server = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("endpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "endpoints": - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - x.EndpointsName = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - x.EndpointsName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.FSType != "" - yyq2[3] = x.RBDPool != "" - yyq2[4] = x.RadosUser != "" - yyq2[5] = x.Keyring != "" - yyq2[6] = x.SecretRef != nil - yyq2[7] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pool")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("keyring")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "monitors": - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv4 := &x.CephMonitors - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "image": - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - x.RBDImage = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "pool": - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - x.RBDPool = string(r.DecodeString()) - } - case "user": - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - x.RadosUser = string(r.DecodeString()) - } - case "keyring": - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - x.Keyring = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv14 := &x.CephMonitors - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - z.F.DecSliceStringX(yyv14, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - x.RBDImage = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - x.RBDPool = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - x.RadosUser = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - x.Keyring = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Path != "" - yyq2[2] = x.User != "" - yyq2[3] = x.SecretFile != "" - yyq2[4] = x.SecretRef != nil - yyq2[5] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "monitors": - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv4 := &x.Monitors - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "secretFile": - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - x.SecretFile = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv12 := &x.Monitors - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - x.SecretFile = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("datasetName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "datasetName": - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - x.DatasetName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - x.DatasetName = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FieldRef != nil - yyq2[2] = x.ResourceFieldRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("shareName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - case "shareName": - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - x.ShareName = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - x.ShareName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumePath": - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - x.VolumePath = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - x.VolumePath = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv4), d) - } - } - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv8 := &x.Items - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv8), d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.HostPort != 0 - yyq2[3] = x.Protocol != "" - yyq2[4] = x.HostIP != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPort": - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - x.HostPort = int32(r.DecodeInt(32)) - } - case "containerPort": - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - x.ContainerPort = int32(r.DecodeInt(32)) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - x.HostPort = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - x.ContainerPort = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.ReadOnly != false - yyq2[3] = x.SubPath != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mountPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "mountPath": - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - x.MountPath = string(r.DecodeString()) - } - case "subPath": - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - x.SubPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - x.MountPath = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - x.SubPath = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Value != "" - yyq2[2] = x.ValueFrom != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "valueFrom": - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.FieldRef != nil - yyq2[1] = x.ResourceFieldRef != nil - yyq2[2] = x.ConfigMapKeyRef != nil - yyq2[3] = x.SecretKeyRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - case "configMapKeyRef": - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - case "secretKeyRef": - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ContainerName != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Divisor - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("divisor")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Divisor - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "containerName": - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - x.ContainerName = string(r.DecodeString()) - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - case "divisor": - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv6 := &x.Divisor - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - x.ContainerName = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv11 := &x.Divisor - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = true - yyq2[2] = x.Host != "" - yyq2[3] = x.Scheme != "" - yyq2[4] = len(x.HTTPHeaders) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.Port - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Port - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Scheme.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scheme")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Scheme.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv5 := &x.Port - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - case "scheme": - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - x.Scheme = URIScheme(r.DecodeString()) - } - case "httpHeaders": - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv9 := &x.HTTPHeaders - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv9), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv13 := &x.Port - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - x.Scheme = URIScheme(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv17 := &x.HTTPHeaders - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv17), d) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.Port - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Port - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv4 := &x.Port - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv7 := &x.Port - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Command) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Command == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv4 := &x.Command - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv7 := &x.Command - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecSliceStringX(yyv7, false, d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.InitialDelaySeconds != 0 - yyq2[1] = x.TimeoutSeconds != 0 - yyq2[2] = x.PeriodSeconds != 0 - yyq2[3] = x.SuccessThreshold != 0 - yyq2[4] = x.FailureThreshold != 0 - yyq2[5] = x.Handler.Exec != nil && x.Exec != nil - yyq2[6] = x.Handler.HTTPGet != nil && x.HTTPGet != nil - yyq2[7] = x.Handler.TCPSocket != nil && x.TCPSocket != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } - } - var yyn18 bool - if x.Handler.Exec == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.Handler.HTTPGet == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.Handler.TCPSocket == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "initialDelaySeconds": - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) - } - case "periodSeconds": - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) - } - case "successThreshold": - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) - } - case "failureThreshold": - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - x.FailureThreshold = int32(r.DecodeInt(32)) - } - case "exec": - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - x.FailureThreshold = int32(r.DecodeInt(32)) - } - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Add) != 0 - yyq2[1] = len(x.Drop) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Add == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("add")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Add == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Drop == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("drop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Drop == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "add": - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv4 := &x.Add - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv4), d) - } - } - case "drop": - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv6 := &x.Drop - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv9 := &x.Add - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv11 := &x.Drop - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Limits) != 0 - yyq2[1] = len(x.Requests) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requests")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4 := &x.Limits - yyv4.CodecDecodeSelf(d) - } - case "requests": - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv5 := &x.Requests - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv7 := &x.Limits - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv8 := &x.Requests - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [18]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Command) != 0 - yyq2[3] = len(x.Args) != 0 - yyq2[4] = x.WorkingDir != "" - yyq2[5] = len(x.Ports) != 0 - yyq2[6] = len(x.Env) != 0 - yyq2[7] = true - yyq2[8] = len(x.VolumeMounts) != 0 - yyq2[9] = x.LivenessProbe != nil - yyq2[10] = x.ReadinessProbe != nil - yyq2[11] = x.Lifecycle != nil - yyq2[12] = x.TerminationMessagePath != "" - yyq2[14] = x.SecurityContext != nil - yyq2[15] = x.Stdin != false - yyq2[16] = x.StdinOnce != false - yyq2[17] = x.TTY != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(18) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Command == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Args == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("args")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Args == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("workingDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Env == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("env")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Env == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.Resources - yy25.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.Resources - yy27.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.ImagePullPolicy.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ImagePullPolicy.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym54 := z.EncBinary() - _ = yym54 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv6 := &x.Command - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "args": - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv8 := &x.Args - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "workingDir": - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - x.WorkingDir = string(r.DecodeString()) - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv11 := &x.Ports - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv11), d) - } - } - case "env": - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv13 := &x.Env - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv13), d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv15 := &x.Resources - yyv15.CodecDecodeSelf(d) - } - case "volumeMounts": - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv16 := &x.VolumeMounts - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv16), d) - } - } - case "livenessProbe": - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - case "readinessProbe": - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - case "lifecycle": - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - case "terminationMessagePath": - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - x.TerminationMessagePath = string(r.DecodeString()) - } - case "imagePullPolicy": - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdinOnce": - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - x.StdinOnce = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj27 int - var yyb27 bool - var yyhl27 bool = l >= 0 - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv30 := &x.Command - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - z.F.DecSliceStringX(yyv30, false, d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv32 := &x.Args - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - z.F.DecSliceStringX(yyv32, false, d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - x.WorkingDir = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv35 := &x.Ports - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv35), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv37 := &x.Env - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv37), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv39 := &x.Resources - yyv39.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv40 := &x.VolumeMounts - yym41 := z.DecBinary() - _ = yym41 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv40), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - x.TerminationMessagePath = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - x.StdinOnce = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - for { - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj27-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Exec != nil - yyq2[1] = x.HTTPGet != nil - yyq2[2] = x.TCPSocket != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exec": - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PostStart != nil - yyq2[1] = x.PreStop != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("postStart")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preStop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "postStart": - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - case "preStop": - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Reason != "" - yyq2[1] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.StartedAt - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if yym5 { - z.EncBinaryMarshal(yy4) - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.StartedAt - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if yym7 { - z.EncBinaryMarshal(yy6) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv4 := &x.StartedAt - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if yym5 { - z.DecBinaryUnmarshal(yyv4) - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv7 := &x.StartedAt - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if yym8 { - z.DecBinaryUnmarshal(yyv7) - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Signal != 0 - yyq2[2] = x.Reason != "" - yyq2[3] = x.Message != "" - yyq2[4] = true - yyq2[5] = true - yyq2[6] = x.ContainerID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exitCode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("signal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.StartedAt - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(yy16) { - } else if yym17 { - z.EncBinaryMarshal(yy16) - } else if !yym17 && z.IsJSONHandle() { - z.EncJSONMarshal(yy16) - } else { - z.EncFallback(yy16) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy18 := &x.StartedAt - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(yy18) { - } else if yym19 { - z.EncBinaryMarshal(yy18) - } else if !yym19 && z.IsJSONHandle() { - z.EncJSONMarshal(yy18) - } else { - z.EncFallback(yy18) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy21 := &x.FinishedAt - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(yy21) { - } else if yym22 { - z.EncBinaryMarshal(yy21) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(yy21) - } else { - z.EncFallback(yy21) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy23 := &x.FinishedAt - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.EncExt(yy23) { - } else if yym24 { - z.EncBinaryMarshal(yy23) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(yy23) - } else { - z.EncFallback(yy23) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exitCode": - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - x.ExitCode = int32(r.DecodeInt(32)) - } - case "signal": - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - x.Signal = int32(r.DecodeInt(32)) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv8 := &x.StartedAt - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "finishedAt": - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} - } else { - yyv10 := &x.FinishedAt - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(yyv10) { - } else if yym11 { - z.DecBinaryUnmarshal(yyv10) - } else if !yym11 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv10) - } else { - z.DecFallback(yyv10, false) - } - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - x.ExitCode = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - x.Signal = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv18 := &x.StartedAt - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else if yym19 { - z.DecBinaryUnmarshal(yyv18) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv18) - } else { - z.DecFallback(yyv18, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} - } else { - yyv20 := &x.FinishedAt - yym21 := z.DecBinary() - _ = yym21 - if false { - } else if z.HasExtensions() && z.DecExt(yyv20) { - } else if yym21 { - z.DecBinaryUnmarshal(yyv20) - } else if !yym21 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv20) - } else { - z.DecFallback(yyv20, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Waiting != nil - yyq2[1] = x.Running != nil - yyq2[2] = x.Terminated != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("waiting")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("running")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "waiting": - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - case "running": - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - case "terminated": - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = true - yyq2[7] = x.ContainerID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.State - yy7.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("state")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.State - yy9.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy12 := &x.LastTerminationState - yy12.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastState")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.LastTerminationState - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ready")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "state": - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv5 := &x.State - yyv5.CodecDecodeSelf(d) - } - case "lastState": - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv6 := &x.LastTerminationState - yyv6.CodecDecodeSelf(d) - } - case "ready": - if r.TryDecodeAsNil() { - x.Ready = false - } else { - x.Ready = bool(r.DecodeBool()) - } - case "restartCount": - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - x.RestartCount = int32(r.DecodeInt(32)) - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - case "imageID": - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - x.ImageID = string(r.DecodeString()) - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv14 := &x.State - yyv14.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv15 := &x.LastTerminationState - yyv15.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ready = false - } else { - x.Ready = bool(r.DecodeBool()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - x.RestartCount = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - x.ImageID = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = PodConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = PodConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "nodeSelectorTerms": - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv4 := &x.NodeSelectorTerms - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv7 := &x.NodeSelectorTerms - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv4 := &x.MatchExpressions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv7 := &x.MatchExpressions - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Values) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv6 := &x.Values - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv11 := &x.Values - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.NodeAffinity != nil - yyq2[1] = x.PodAffinity != nil - yyq2[2] = x.PodAntiAffinity != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "nodeAffinity": - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - case "podAffinity": - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - case "podAntiAffinity": - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.PodAffinityTerm - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.PodAffinityTerm - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "podAffinityTerm": - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv5 := &x.PodAffinityTerm - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv8 := &x.PodAffinityTerm - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.LabelSelector != nil - yyq2[2] = x.TopologyKey != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaces")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "labelSelector": - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - case "namespaces": - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv6 := &x.Namespaces - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "topologyKey": - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - x.TopologyKey = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv12 := &x.Namespaces - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - x.TopologyKey = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Preference - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Preference - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - case "preference": - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv5 := &x.Preference - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv8 := &x.Preference - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Value != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Effect.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Key != "" - yyq2[1] = x.Operator != "" - yyq2[2] = x.Value != "" - yyq2[3] = x.Effect != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - x.Operator.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Effect.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = TolerationOperator(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = TolerationOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [13]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.RestartPolicy != "" - yyq2[3] = x.TerminationGracePeriodSeconds != nil - yyq2[4] = x.ActiveDeadlineSeconds != nil - yyq2[5] = x.DNSPolicy != "" - yyq2[6] = len(x.NodeSelector) != 0 - yyq2[8] = x.NodeName != "" - yyq2[9] = x.SecurityContext != nil - yyq2[10] = len(x.ImagePullSecrets) != 0 - yyq2[11] = x.Hostname != "" - yyq2[12] = x.Subdomain != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(13) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.RestartPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.RestartPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy13 := *x.TerminationGracePeriodSeconds - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(yy13)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy15 := *x.TerminationGracePeriodSeconds - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.ActiveDeadlineSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy20 := *x.ActiveDeadlineSeconds - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - x.DNSPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.DNSPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subdomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv4 := &x.Volumes - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv4), d) - } - } - case "containers": - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv6 := &x.Containers - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv6), d) - } - } - case "restartPolicy": - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) - } - case "terminationGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "dnsPolicy": - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) - } - case "nodeSelector": - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv14 := &x.NodeSelector - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - z.F.DecMapStringStringX(yyv14, false, d) - } - } - case "serviceAccountName": - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - x.ServiceAccountName = string(r.DecodeString()) - } - case "nodeName": - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - x.NodeName = string(r.DecodeString()) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv19 := &x.ImagePullSecrets - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv19), d) - } - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - case "subdomain": - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - x.Subdomain = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv24 := &x.Volumes - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv24), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv26 := &x.Containers - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv26), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv34 := &x.NodeSelector - yym35 := z.DecBinary() - _ = yym35 - if false { - } else { - z.F.DecMapStringStringX(yyv34, false, d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - x.ServiceAccountName = string(r.DecodeString()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - x.NodeName = string(r.DecodeString()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv39 := &x.ImagePullSecrets - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv39), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - x.Subdomain = string(r.DecodeString()) - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HostNetwork != false - yyq2[1] = x.HostPID != false - yyq2[2] = x.HostIPC != false - yyq2[3] = x.SELinuxOptions != nil - yyq2[4] = x.RunAsUser != nil - yyq2[5] = x.RunAsNonRoot != nil - yyq2[6] = len(x.SupplementalGroups) != 0 - yyq2[7] = x.FSGroup != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy16 := *x.RunAsUser - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy18 := *x.RunAsUser - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy21 := *x.RunAsNonRoot - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeBool(bool(yy21)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy23 := *x.RunAsNonRoot - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeBool(bool(yy23)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy29 := *x.FSGroup - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeInt(int64(yy29)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy31 := *x.FSGroup - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeInt(int64(yy31)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv12 := &x.SupplementalGroups - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceInt64X(yyv12, false, d) - } - } - case "fsGroup": - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv25 := &x.SupplementalGroups - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - z.F.DecSliceInt64X(yyv25, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = len(x.Conditions) != 0 - yyq2[2] = x.Message != "" - yyq2[3] = x.Reason != "" - yyq2[4] = x.HostIP != "" - yyq2[5] = x.PodIP != "" - yyq2[6] = x.StartTime != nil - yyq2[7] = len(x.ContainerStatuses) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym22 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym23 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym23 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PodPhase(r.DecodeString()) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv5 := &x.Conditions - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv5), d) - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - case "podIP": - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - x.PodIP = string(r.DecodeString()) - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym12 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "containerStatuses": - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv13 := &x.ContainerStatuses - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv13), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PodPhase(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv17 := &x.Conditions - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv17), d) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - x.PodIP = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym24 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym24 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv25 := &x.ContainerStatuses - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv25), d) - } - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv7 := &x.ObjectMeta - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv8 := &x.Spec - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Template - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv5 := &x.Template - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Template != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv5 := &x.Selector - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - z.F.DecMapStringStringX(yyv5, false, d) - } - } - case "template": - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv10 := &x.Selector - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecMapStringStringX(yyv10, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ObservedGeneration != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceService((*[]Service)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceService((*[]Service)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ingress) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv4 := &x.Ingress - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv7 := &x.Ingress - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.IP != "" - yyq2[1] = x.Hostname != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ip")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ip": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[3] = x.ClusterIP != "" - yyq2[4] = len(x.ExternalIPs) != 0 - yyq2[5] = x.LoadBalancerIP != "" - yyq2[6] = x.SessionAffinity != "" - yyq2[7] = len(x.LoadBalancerSourceRanges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - x.SessionAffinity.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.SessionAffinity.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ServiceType(r.DecodeString()) - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv5 := &x.Ports - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv5), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv7 := &x.Selector - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecMapStringStringX(yyv7, false, d) - } - } - case "clusterIP": - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - x.ClusterIP = string(r.DecodeString()) - } - case "externalIPs": - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv10 := &x.ExternalIPs - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecSliceStringX(yyv10, false, d) - } - } - case "loadBalancerIP": - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - x.LoadBalancerIP = string(r.DecodeString()) - } - case "sessionAffinity": - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) - } - case "loadBalancerSourceRanges": - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv14 := &x.LoadBalancerSourceRanges - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - z.F.DecSliceStringX(yyv14, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ServiceType(r.DecodeString()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv18 := &x.Ports - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv18), d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv20 := &x.Selector - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - z.F.DecMapStringStringX(yyv20, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - x.ClusterIP = string(r.DecodeString()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv23 := &x.ExternalIPs - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecSliceStringX(yyv23, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - x.LoadBalancerIP = string(r.DecodeString()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv27 := &x.LoadBalancerSourceRanges - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - z.F.DecSliceStringX(yyv27, false, d) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Protocol.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy13 := &x.TargetPort - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(yy13) { - } else if !yym14 && z.IsJSONHandle() { - z.EncJSONMarshal(yy13) - } else { - z.EncFallback(yy13) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy15 := &x.TargetPort - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "targetPort": - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv7 := &x.TargetPort - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - case "nodePort": - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - x.NodePort = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv14 := &x.TargetPort - yym15 := z.DecBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.DecExt(yyv14) { - } else if !yym15 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv14) - } else { - z.DecFallback(yyv14, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - x.NodePort = int32(r.DecodeInt(32)) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = len(x.ImagePullSecrets) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Secrets == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secrets == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "secrets": - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv5 := &x.Secrets - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv5), d) - } - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv7 := &x.ImagePullSecrets - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv7), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv12 := &x.ObjectMeta - yyv12.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv13 := &x.Secrets - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv13), d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv15 := &x.ImagePullSecrets - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv15), d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Subsets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "Subsets": - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv5 := &x.Subsets - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv5), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv11 := &x.Subsets - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("NotReadyAddresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv4 := &x.Addresses - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) - } - } - case "NotReadyAddresses": - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv6 := &x.NotReadyAddresses - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) - } - } - case "Ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv8 := &x.Ports - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv11 := &x.Addresses - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv13 := &x.NotReadyAddresses - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv15 := &x.Ports - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Hostname != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("IP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "IP": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - case "TargetRef": - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Protocol.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "Port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "Protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodCIDR != "" - yyq2[1] = x.ExternalID != "" - yyq2[2] = x.ProviderID != "" - yyq2[3] = x.Unschedulable != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("providerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podCIDR": - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - case "externalID": - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - x.ExternalID = string(r.DecodeString()) - } - case "providerID": - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - x.ProviderID = string(r.DecodeString()) - } - case "unschedulable": - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - x.Unschedulable = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - x.ExternalID = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - x.ProviderID = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - x.Unschedulable = bool(r.DecodeBool()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.KubeletEndpoint - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.KubeletEndpoint - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kubeletEndpoint": - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv4 := &x.KubeletEndpoint - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv6 := &x.KubeletEndpoint - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 10 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("machineID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("bootID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("osImage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("architecture")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "machineID": - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - x.MachineID = string(r.DecodeString()) - } - case "systemUUID": - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - x.SystemUUID = string(r.DecodeString()) - } - case "bootID": - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - x.BootID = string(r.DecodeString()) - } - case "kernelVersion": - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - x.KernelVersion = string(r.DecodeString()) - } - case "osImage": - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - x.OSImage = string(r.DecodeString()) - } - case "containerRuntimeVersion": - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) - } - case "kubeletVersion": - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - x.KubeletVersion = string(r.DecodeString()) - } - case "kubeProxyVersion": - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - x.KubeProxyVersion = string(r.DecodeString()) - } - case "operatingSystem": - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - x.OperatingSystem = string(r.DecodeString()) - } - case "architecture": - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - x.Architecture = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - x.MachineID = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - x.SystemUUID = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - x.BootID = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - x.KernelVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - x.OSImage = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - x.KubeletVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - x.KubeProxyVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - x.OperatingSystem = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - x.Architecture = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Capacity) != 0 - yyq2[1] = len(x.Allocatable) != 0 - yyq2[2] = x.Phase != "" - yyq2[3] = len(x.Conditions) != 0 - yyq2[4] = len(x.Addresses) != 0 - yyq2[5] = true - yyq2[6] = true - yyq2[7] = len(x.Images) != 0 - yyq2[8] = len(x.VolumesInUse) != 0 - yyq2[9] = len(x.VolumesAttached) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocatable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Addresses == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy19 := &x.DaemonEndpoints - yy19.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy21 := &x.DaemonEndpoints - yy21.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yy24 := &x.NodeInfo - yy24.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy26 := &x.NodeInfo - yy26.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Images == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("images")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - case "allocatable": - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv5 := &x.Allocatable - yyv5.CodecDecodeSelf(d) - } - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NodePhase(r.DecodeString()) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv7 := &x.Conditions - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) - } - } - case "addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv9 := &x.Addresses - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) - } - } - case "daemonEndpoints": - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv11 := &x.DaemonEndpoints - yyv11.CodecDecodeSelf(d) - } - case "nodeInfo": - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv12 := &x.NodeInfo - yyv12.CodecDecodeSelf(d) - } - case "images": - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv13 := &x.Images - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) - } - } - case "volumesInUse": - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv15 := &x.VolumesInUse - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) - } - } - case "volumesAttached": - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv17 := &x.VolumesAttached - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj19 int - var yyb19 bool - var yyhl19 bool = l >= 0 - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv20 := &x.Capacity - yyv20.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv21 := &x.Allocatable - yyv21.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NodePhase(r.DecodeString()) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv23 := &x.Conditions - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv25 := &x.Addresses - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv27 := &x.DaemonEndpoints - yyv27.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv28 := &x.NodeInfo - yyv28.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv29 := &x.Images - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv31 := &x.VolumesInUse - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv33 := &x.VolumesAttached - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) - } - } - for { - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj19-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Name.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Name.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("devicePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = UniqueVolumeName(r.DecodeString()) - } - case "devicePath": - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - x.DevicePath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = UniqueVolumeName(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - x.DevicePath = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SizeBytes != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("names")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "names": - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv4 := &x.Names - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "sizeBytes": - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - x.SizeBytes = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv8 := &x.Names - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - x.SizeBytes = int64(r.DecodeInt(64)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastHeartbeatTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastHeartbeatTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastHeartbeatTime": - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} - } else { - yyv6 := &x.LastHeartbeatTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} - } else { - yyv15 := &x.LastHeartbeatTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeAddressType(r.DecodeString()) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeAddressType(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeResources) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Capacity) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeResources) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeResources) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeResources) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv6 := &x.Capacity - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encResourceList((ResourceList)(x), e) - } - } -} - -func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decResourceList((*ResourceList)(x), d) - } -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNode((*[]Node)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceNode((*[]Node)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv4 := &x.Finalizers - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv7 := &x.Finalizers - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NamespacePhase(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NamespacePhase(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.Target - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Target - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "target": - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv5 := &x.Target - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.UID != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.UID == nil { - r.EncodeNil() - } else { - yy4 := *x.UID - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UID == nil { - r.EncodeNil() - } else { - yy6 := *x.UID - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "uid": - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.GracePeriodSeconds != nil - yyq2[1] = x.Preconditions != nil - yyq2[2] = x.OrphanDependents != nil - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy4 := *x.GracePeriodSeconds - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy6 := *x.GracePeriodSeconds - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preconditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy12 := *x.OrphanDependents - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy14 := *x.OrphanDependents - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(yy14)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "gracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "preconditions": - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - case "orphanDependents": - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("export")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exact")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "export": - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - case "exact": - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[5] = x.Kind != "" - yyq2[6] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("LabelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.FieldSelector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.FieldSelector) { - } else { - z.EncFallback(x.FieldSelector) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("FieldSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldSelector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.FieldSelector) { - } else { - z.EncFallback(x.FieldSelector) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Watch")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ResourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.TimeoutSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TimeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.TimeoutSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "LabelSelector": - if r.TryDecodeAsNil() { - x.LabelSelector = nil - } else { - yyv4 := &x.LabelSelector - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, true) - } - } - case "FieldSelector": - if r.TryDecodeAsNil() { - x.FieldSelector = nil - } else { - yyv6 := &x.FieldSelector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else { - z.DecFallback(yyv6, true) - } - } - case "Watch": - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - case "ResourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "TimeoutSeconds": - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LabelSelector = nil - } else { - yyv15 := &x.LabelSelector - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else { - z.DecFallback(yyv15, true) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldSelector = nil - } else { - yyv17 := &x.FieldSelector - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else { - z.DecFallback(yyv17, true) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[8] = x.Kind != "" - yyq2[9] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 8 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Follow")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Previous")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy13 := *x.SinceSeconds - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(yy13)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("SinceSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy15 := *x.SinceSeconds - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym18 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("SinceTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym19 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym19 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Timestamps")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TailLines == nil { - r.EncodeNil() - } else { - yy24 := *x.TailLines - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(yy24)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TailLines")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TailLines == nil { - r.EncodeNil() - } else { - yy26 := *x.TailLines - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(yy26)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy29 := *x.LimitBytes - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeInt(int64(yy29)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("LimitBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy31 := *x.LimitBytes - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeInt(int64(yy31)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "Follow": - if r.TryDecodeAsNil() { - x.Follow = false - } else { - x.Follow = bool(r.DecodeBool()) - } - case "Previous": - if r.TryDecodeAsNil() { - x.Previous = false - } else { - x.Previous = bool(r.DecodeBool()) - } - case "SinceSeconds": - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - case "SinceTime": - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym10 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - case "Timestamps": - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - x.Timestamps = bool(r.DecodeBool()) - } - case "TailLines": - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - case "LimitBytes": - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Follow = false - } else { - x.Follow = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Previous = false - } else { - x.Previous = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) - } - yym25 := z.DecBinary() - _ = yym25 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym25 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym25 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - x.Timestamps = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Stdin != false - yyq2[1] = x.Stdout != false - yyq2[2] = x.Stderr != false - yyq2[3] = x.TTY != false - yyq2[4] = x.Container != "" - yyq2[5] = x.Kind != "" - yyq2[6] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - case "stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[6] = x.Kind != "" - yyq2[7] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 6 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("TTY")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "Stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - case "Stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - case "TTY": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - case "Container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "Command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv9 := &x.Command - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecSliceStringX(yyv9, false, d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv19 := &x.Command - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecSliceStringX(yyv19, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[4] = x.Controller != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Controller == nil { - r.EncodeNil() - } else { - yy16 := *x.Controller - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("controller")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Controller == nil { - r.EncodeNil() - } else { - yy18 := *x.Controller - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "controller": - if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } - } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Controller)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } - } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.Controller)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.Namespace != "" - yyq2[2] = x.Name != "" - yyq2[3] = x.UID != "" - yyq2[4] = x.APIVersion != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.FieldPath != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.Reference - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Reference - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "reference": - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv4 := &x.Reference - yyv4.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv8 := &x.Reference - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Component != "" - yyq2[1] = x.Host != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("component")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "component": - if r.TryDecodeAsNil() { - x.Component = "" - } else { - x.Component = string(r.DecodeString()) - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Component = "" - } else { - x.Component = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [11]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Reason != "" - yyq2[3] = x.Message != "" - yyq2[4] = true - yyq2[5] = true - yyq2[6] = true - yyq2[7] = x.Count != 0 - yyq2[8] = x.Type != "" - yyq2[9] = x.Kind != "" - yyq2[10] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(11) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.InvolvedObject - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.InvolvedObject - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Source - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("source")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Source - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy25 := &x.FirstTimestamp - yym26 := z.EncBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.EncExt(yy25) { - } else if yym26 { - z.EncBinaryMarshal(yy25) - } else if !yym26 && z.IsJSONHandle() { - z.EncJSONMarshal(yy25) - } else { - z.EncFallback(yy25) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.FirstTimestamp - yym28 := z.EncBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.EncExt(yy27) { - } else if yym28 { - z.EncBinaryMarshal(yy27) - } else if !yym28 && z.IsJSONHandle() { - z.EncJSONMarshal(yy27) - } else { - z.EncFallback(yy27) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yy30 := &x.LastTimestamp - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(yy30) { - } else if yym31 { - z.EncBinaryMarshal(yy30) - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(yy30) - } else { - z.EncFallback(yy30) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy32 := &x.LastTimestamp - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(yy32) { - } else if yym33 { - z.EncBinaryMarshal(yy32) - } else if !yym33 && z.IsJSONHandle() { - z.EncJSONMarshal(yy32) - } else { - z.EncFallback(yy32) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("count")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "involvedObject": - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv5 := &x.InvolvedObject - yyv5.CodecDecodeSelf(d) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "source": - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv8 := &x.Source - yyv8.CodecDecodeSelf(d) - } - case "firstTimestamp": - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} - } else { - yyv9 := &x.FirstTimestamp - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if yym10 { - z.DecBinaryUnmarshal(yyv9) - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - case "lastTimestamp": - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} - } else { - yyv11 := &x.LastTimestamp - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if yym12 { - z.DecBinaryUnmarshal(yyv11) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - case "count": - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - x.Count = int32(r.DecodeInt(32)) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv18 := &x.ObjectMeta - yyv18.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv19 := &x.InvolvedObject - yyv19.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv22 := &x.Source - yyv22.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} - } else { - yyv23 := &x.FirstTimestamp - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(yyv23) { - } else if yym24 { - z.DecBinaryUnmarshal(yyv23) - } else if !yym24 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv23) - } else { - z.DecFallback(yyv23, false) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} - } else { - yyv25 := &x.LastTimestamp - yym26 := z.DecBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.DecExt(yyv25) { - } else if yym26 { - z.DecBinaryUnmarshal(yyv25) - } else if !yym26 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv25) - } else { - z.DecFallback(yyv25, false) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - x.Count = int32(r.DecodeInt(32)) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj17-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceruntime_Object(([]pkg7_runtime.Object)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceruntime_Object((*[]pkg7_runtime.Object)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = len(x.Max) != 0 - yyq2[2] = len(x.Min) != 0 - yyq2[3] = len(x.Default) != 0 - yyq2[4] = len(x.DefaultRequest) != 0 - yyq2[5] = len(x.MaxLimitRequestRatio) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("default")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = LimitType(r.DecodeString()) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv5 := &x.Max - yyv5.CodecDecodeSelf(d) - } - case "min": - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv6 := &x.Min - yyv6.CodecDecodeSelf(d) - } - case "default": - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv7 := &x.Default - yyv7.CodecDecodeSelf(d) - } - case "defaultRequest": - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv8 := &x.DefaultRequest - yyv8.CodecDecodeSelf(d) - } - case "maxLimitRequestRatio": - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv9 := &x.MaxLimitRequestRatio - yyv9.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = LimitType(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv12 := &x.Max - yyv12.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv13 := &x.Min - yyv13.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv14 := &x.Default - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv15 := &x.DefaultRequest - yyv15.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv16 := &x.MaxLimitRequestRatio - yyv16.CodecDecodeSelf(d) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4 := &x.Limits - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv7 := &x.Limits - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hard) != 0 - yyq2[1] = len(x.Scopes) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Scopes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scopes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Scopes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4 := &x.Hard - yyv4.CodecDecodeSelf(d) - } - case "scopes": - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv5 := &x.Scopes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv8 := &x.Hard - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv9 := &x.Scopes - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hard) != 0 - yyq2[1] = len(x.Used) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("used")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4 := &x.Hard - yyv4.CodecDecodeSelf(d) - } - case "used": - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv5 := &x.Used - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv7 := &x.Hard - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv8 := &x.Used - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Type != "" - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Data == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv5), d) - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = SecretType(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv12 := &x.Data - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv12), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = SecretType(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Data == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - z.F.DecMapStringStringX(yyv5, false, d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv11 := &x.Data - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecMapStringStringX(yyv11, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PatchType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PatchType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Message != "" - yyq2[3] = x.Error != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("error")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ComponentConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "error": - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ComponentConditionType(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Conditions) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv5 := &x.Conditions - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv5), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv11 := &x.Conditions - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Capabilities != nil - yyq2[1] = x.Privileged != nil - yyq2[2] = x.SELinuxOptions != nil - yyq2[3] = x.RunAsUser != nil - yyq2[4] = x.RunAsNonRoot != nil - yyq2[5] = x.ReadOnlyRootFilesystem != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Privileged == nil { - r.EncodeNil() - } else { - yy7 := *x.Privileged - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Privileged == nil { - r.EncodeNil() - } else { - yy9 := *x.Privileged - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy15 := *x.RunAsUser - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy17 := *x.RunAsUser - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy20 := *x.RunAsNonRoot - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(yy20)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy22 := *x.RunAsNonRoot - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy25 := *x.ReadOnlyRootFilesystem - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy27 := *x.ReadOnlyRootFilesystem - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capabilities": - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - case "privileged": - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.User != "" - yyq2[1] = x.Role != "" - yyq2[2] = x.Type != "" - yyq2[3] = x.Level != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("role")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("level")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "role": - if r.TryDecodeAsNil() { - x.Role = "" - } else { - x.Role = string(r.DecodeString()) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - case "level": - if r.TryDecodeAsNil() { - x.Level = "" - } else { - x.Level = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Role = "" - } else { - x.Role = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Level = "" - } else { - x.Level = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("range")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "range": - if r.TryDecodeAsNil() { - x.Range = "" - } else { - x.Range = string(r.DecodeString()) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv6 := &x.Data - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Range = "" - } else { - x.Range = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv13 := &x.Data - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *yyv13 = r.DecodeBytes(*(*[]byte)(yyv13), false, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []OwnerReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]OwnerReference, yyrl1) - } - } else { - yyv1 = make([]OwnerReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = OwnerReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, OwnerReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = OwnerReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, OwnerReference{}) // var yyz1 OwnerReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = OwnerReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []OwnerReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolumeAccessMode{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolumeAccessMode, yyrl1) - } - } else { - yyv1 = make([]PersistentVolumeAccessMode, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolumeAccessMode{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 456) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolume, yyrl1) - } - } else { - yyv1 = make([]PersistentVolume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PersistentVolume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolumeClaim{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolumeClaim, yyrl1) - } - } else { - yyv1 = make([]PersistentVolumeClaim, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PersistentVolumeClaim{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolumeClaim{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []KeyToPath{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]KeyToPath, yyrl1) - } - } else { - yyv1 = make([]KeyToPath, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, KeyToPath{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []KeyToPath{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) - } - } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HTTPHeader{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HTTPHeader, yyrl1) - } - } else { - yyv1 = make([]HTTPHeader, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPHeader{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPHeader{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Capability{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Capability, yyrl1) - } - } else { - yyv1 = make([]Capability, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = Capability(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = Capability(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 Capability - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = Capability(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Capability{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerPort, yyrl1) - } - } else { - yyv1 = make([]ContainerPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EnvVar{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EnvVar, yyrl1) - } - } else { - yyv1 = make([]EnvVar, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EnvVar{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EnvVar{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []VolumeMount{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]VolumeMount, yyrl1) - } - } else { - yyv1 = make([]VolumeMount, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, VolumeMount{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []VolumeMount{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Pod{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 624) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Pod, yyrl1) - } - } else { - yyv1 = make([]Pod, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Pod{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Pod{}) // var yyz1 Pod - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Pod{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeSelectorTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeSelectorTerm, yyrl1) - } - } else { - yyv1 = make([]NodeSelectorTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeSelectorTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeSelectorTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeSelectorRequirement{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeSelectorRequirement, yyrl1) - } - } else { - yyv1 = make([]NodeSelectorRequirement, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeSelectorRequirement{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeSelectorRequirement{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodAffinityTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodAffinityTerm, yyrl1) - } - } else { - yyv1 = make([]PodAffinityTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodAffinityTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodAffinityTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []WeightedPodAffinityTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]WeightedPodAffinityTerm, yyrl1) - } - } else { - yyv1 = make([]WeightedPodAffinityTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, WeightedPodAffinityTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []WeightedPodAffinityTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PreferredSchedulingTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PreferredSchedulingTerm, yyrl1) - } - } else { - yyv1 = make([]PreferredSchedulingTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PreferredSchedulingTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PreferredSchedulingTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Volume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 176) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Volume, yyrl1) - } - } else { - yyv1 = make([]Volume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Volume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Volume{}) // var yyz1 Volume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Volume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Container{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 256) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Container, yyrl1) - } - } else { - yyv1 = make([]Container, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Container{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Container{}) // var yyz1 Container - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Container{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LocalObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LocalObjectReference, yyrl1) - } - } else { - yyv1 = make([]LocalObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LocalObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LocalObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodCondition, yyrl1) - } - } else { - yyv1 = make([]PodCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerStatus, yyrl1) - } - } else { - yyv1 = make([]ContainerStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodTemplate{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 672) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodTemplate, yyrl1) - } - } else { - yyv1 = make([]PodTemplate, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodTemplate{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodTemplate{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicationController{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicationController, yyrl1) - } - } else { - yyv1 = make([]ReplicationController, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicationController{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicationController{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Service{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 408) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Service, yyrl1) - } - } else { - yyv1 = make([]Service, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Service{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Service{}) // var yyz1 Service - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Service{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LoadBalancerIngress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LoadBalancerIngress, yyrl1) - } - } else { - yyv1 = make([]LoadBalancerIngress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LoadBalancerIngress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LoadBalancerIngress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServicePort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServicePort, yyrl1) - } - } else { - yyv1 = make([]ServicePort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServicePort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServicePort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ObjectReference, yyrl1) - } - } else { - yyv1 = make([]ObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServiceAccount{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServiceAccount, yyrl1) - } - } else { - yyv1 = make([]ServiceAccount, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServiceAccount{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServiceAccount{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointSubset{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointSubset, yyrl1) - } - } else { - yyv1 = make([]EndpointSubset, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointSubset{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointSubset{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointAddress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointAddress, yyrl1) - } - } else { - yyv1 = make([]EndpointAddress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointAddress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointAddress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointPort, yyrl1) - } - } else { - yyv1 = make([]EndpointPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Endpoints{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Endpoints, yyrl1) - } - } else { - yyv1 = make([]Endpoints, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Endpoints{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Endpoints{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeCondition, yyrl1) - } - } else { - yyv1 = make([]NodeCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeAddress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeAddress, yyrl1) - } - } else { - yyv1 = make([]NodeAddress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeAddress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeAddress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerImage{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerImage, yyrl1) - } - } else { - yyv1 = make([]ContainerImage, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerImage{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerImage{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []UniqueVolumeName{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]UniqueVolumeName, yyrl1) - } - } else { - yyv1 = make([]UniqueVolumeName, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = UniqueVolumeName(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = UniqueVolumeName(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = UniqueVolumeName(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []UniqueVolumeName{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []AttachedVolume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]AttachedVolume, yyrl1) - } - } else { - yyv1 = make([]AttachedVolume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, AttachedVolume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []AttachedVolume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yyk1.CodecEncodeSelf(e) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3 := &yyv1 - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(yy3) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3) - } else { - z.EncFallback(yy3) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) - yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) - *v = yyv1 - } - var yymk1 ResourceName - var yymv1 pkg3_resource.Quantity - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = ResourceName(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg3_resource.Quantity{} - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3) { - } else if !yym4 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3) - } else { - z.DecFallback(yyv3, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = ResourceName(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg3_resource.Quantity{} - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 616) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Node, yyrl1) - } - } else { - yyv1 = make([]Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Node{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Node{}) // var yyz1 Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []FinalizerName{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]FinalizerName, yyrl1) - } - } else { - yyv1 = make([]FinalizerName, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FinalizerName(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FinalizerName(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FinalizerName - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FinalizerName(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FinalizerName{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Namespace{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Namespace, yyrl1) - } - } else { - yyv1 = make([]Namespace, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Namespace{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Namespace{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Event{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Event, yyrl1) - } - } else { - yyv1 = make([]Event, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Event{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Event{}) // var yyz1 Event - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Event{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceruntime_Object(v []pkg7_runtime.Object, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym2 := z.EncBinary() - _ = yym2 - if false { - } else if z.HasExtensions() && z.EncExt(yyv1) { - } else { - z.EncFallback(yyv1) - } - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceruntime_Object(v *[]pkg7_runtime.Object, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg7_runtime.Object{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg7_runtime.Object, yyrl1) - } - } else { - yyv1 = make([]pkg7_runtime.Object, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = nil - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2) { - } else { - z.DecFallback(yyv2, true) - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, nil) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = nil - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, true) - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, nil) // var yyz1 pkg7_runtime.Object - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = nil - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else { - z.DecFallback(yyv6, true) - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg7_runtime.Object{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LimitRangeItem{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LimitRangeItem, yyrl1) - } - } else { - yyv1 = make([]LimitRangeItem, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LimitRangeItem{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LimitRangeItem{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LimitRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LimitRange, yyrl1) - } - } else { - yyv1 = make([]LimitRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LimitRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LimitRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ResourceQuotaScope{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ResourceQuotaScope, yyrl1) - } - } else { - yyv1 = make([]ResourceQuotaScope, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = ResourceQuotaScope(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = ResourceQuotaScope(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = ResourceQuotaScope(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ResourceQuotaScope{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ResourceQuota{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ResourceQuota, yyrl1) - } - } else { - yyv1 = make([]ResourceQuota, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ResourceQuota{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ResourceQuota{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]uint8, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []uint8 - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else { - *yyv3 = r.DecodeBytes(*(*[]byte)(yyv3), false, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeUint(uint64(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []uint8{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]uint8, yyrl1) - } - } else { - yyv1 = make([]uint8, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = 0 - } else { - yyv1[yyj1] = uint8(r.DecodeUint(8)) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, 0) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = 0 - } else { - yyv1[yyj1] = uint8(r.DecodeUint(8)) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, 0) // var yyz1 uint8 - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = 0 - } else { - yyv1[yyj1] = uint8(r.DecodeUint(8)) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []uint8{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Secret{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Secret, yyrl1) - } - } else { - yyv1 = make([]Secret, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Secret{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Secret{}) // var yyz1 Secret - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Secret{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ConfigMap{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 248) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ConfigMap, yyrl1) - } - } else { - yyv1 = make([]ConfigMap, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ConfigMap{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ConfigMap{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ComponentCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ComponentCondition, yyrl1) - } - } else { - yyv1 = make([]ComponentCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ComponentCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ComponentCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ComponentStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ComponentStatus, yyrl1) - } - } else { - yyv1 = make([]ComponentStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ComponentStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ComponentStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go deleted file mode 100644 index 6d11c183b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ /dev/null @@ -1,2884 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -type ObjectMeta struct { - // Name is unique within a namespace. Name is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - Name string `json:"name,omitempty"` - - // GenerateName indicates that the name should be made unique by the server prior to persisting - // it. A non-empty value for the field indicates the name will be made unique (and the name - // returned to the client will be different than the name passed). The value of this field will - // be combined with a unique suffix on the server if the Name field has not been provided. - // The provided value must be valid within the rules for Name, and may be truncated by the length - // of the suffix required to make the value unique on the server. - // - // If this field is specified, and Name is not present, the server will NOT return a 409 if the - // generated name exists - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - GenerateName string `json:"generateName,omitempty"` - - // Namespace defines the space within which name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - Namespace string `json:"namespace,omitempty"` - - // SelfLink is a URL representing this object. - SelfLink string `json:"selfLink,omitempty"` - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - UID types.UID `json:"uid,omitempty"` - - // An opaque value that represents the version of this resource. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and values may only be valid for a particular - // resource or set of resources. Only servers will generate resource versions. - ResourceVersion string `json:"resourceVersion,omitempty"` - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - Generation int64 `json:"generation,omitempty"` - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty"` - - // DeletionTimestamp is the time after which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource will be deleted (no longer visible from - // resource lists, and not reachable by name) after the time in this field. Once set, this - // value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet - // will send a hard termination signal to the container. - DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty"` - - // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion - // was requested. Represents the most recent grace period, and may only be shortened once set. - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` - - // Labels are key value pairs that may be used to scope and select individual resources. - // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL - // The prefix is optional. If the prefix is not specified, the key is assumed to be private - // to the user. Other system components that wish to use labels must specify a prefix. The - // "kubernetes.io/" prefix is reserved for use by kubernetes components. - // TODO: replace map[string]string with labels.LabelSet type - Labels map[string]string `json:"labels,omitempty"` - - // Annotations are unstructured key value data stored with a resource that may be set by - // external tooling. They are not queryable and should be preserved when modifying - // objects. Annotation keys have the same formatting restrictions as Label keys. See the - // comments on Labels for details. - Annotations map[string]string `json:"annotations,omitempty"` - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - OwnerReferences []OwnerReference `json:"ownerReferences,omitempty"` - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - Finalizers []string `json:"finalizers,omitempty"` -} - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string `json:"name"` - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - VolumeSource `json:",inline,omitempty"` -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` - // EmptyDir represents a temporary directory that shares a pod's lifetime. - EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty"` - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"` - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"` - // GitRepo represents a git repository at a particular revision. - GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty"` - // Secret represents a secret that should populate this volume. - Secret *SecretVolumeSource `json:"secret,omitempty"` - // NFS represents an NFS mount on the host that shares a pod's lifetime - NFS *NFSVolumeSource `json:"nfs,omitempty"` - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"` - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - RBD *RBDVolumeSource `json:"rbd,omitempty"` - // FlexVolume represents a generic volume resource that is - // provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"` - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - Cinder *CinderVolumeSource `json:"cinder,omitempty"` - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - Flocker *FlockerVolumeSource `json:"flocker,omitempty"` - - // DownwardAPI represents metadata about the pod that should populate this volume - DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - FC *FCVolumeSource `json:"fc,omitempty"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` - // ConfigMap represents a configMap that should populate this volume - ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"` -} - -// Similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty"` - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty"` - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - HostPath *HostPathVolumeSource `json:"hostPath,omitempty"` - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty"` - // NFS represents an NFS mount on the host that shares a pod's lifetime - NFS *NFSVolumeSource `json:"nfs,omitempty"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - RBD *RBDVolumeSource `json:"rbd,omitempty"` - // ISCSIVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"` - // FlexVolume represents a generic volume resource that is - // provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - Cinder *CinderVolumeSource `json:"cinder,omitempty"` - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - CephFS *CephFSVolumeSource `json:"cephfs,omitempty"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - FC *FCVolumeSource `json:"fc,omitempty"` - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - Flocker *FlockerVolumeSource `json:"flocker,omitempty"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"` -} - -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string `json:"claimName"` - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - ReadOnly bool `json:"readOnly,omitempty"` -} - -// +genclient=true,nonNamespaced=true - -type PersistentVolume struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - //Spec defines a persistent volume owned by the cluster - Spec PersistentVolumeSpec `json:"spec,omitempty"` - - // Status represents the current information about persistent volume. - Status PersistentVolumeStatus `json:"status,omitempty"` -} - -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList `json:"capacity"` - // Source represents the location and type of a volume to mount. - PersistentVolumeSource `json:",inline"` - // AccessModes contains all ways the volume can be mounted - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - ClaimRef *ObjectReference `json:"claimRef,omitempty"` - // Optional: what happens to a persistent volume when released from its claim. - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"` -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - Phase PersistentVolumePhase `json:"phase,omitempty"` - // A human-readable message indicating details about why the volume is in this state. - Message string `json:"message,omitempty"` - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - Reason string `json:"reason,omitempty"` -} - -type PersistentVolumeList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PersistentVolume `json:"items"` -} - -// +genclient=true - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the volume requested by a pod author - Spec PersistentVolumeClaimSpec `json:"spec,omitempty"` - - // Status represents the current information about a claim - Status PersistentVolumeClaimStatus `json:"status,omitempty"` -} - -type PersistentVolumeClaimList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PersistentVolumeClaim `json:"items"` -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - // Resources represents the minimum resources required - Resources ResourceRequirements `json:"resources,omitempty"` - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - VolumeName string `json:"volumeName,omitempty"` -} - -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - Phase PersistentVolumeClaimPhase `json:"phase,omitempty"` - // AccessModes contains all ways the volume backing the PVC can be mounted - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty"` - // Represents the actual resources of the underlying volume - Capacity ResourceList `json:"capacity,omitempty"` -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - Path string `json:"path"` -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisifies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - Medium StorageMedium `json:"medium,omitempty"` -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string `json:"pdName"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - Partition int32 `json:"partition,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - TargetPortal string `json:"targetPortal,omitempty"` - // Required: target iSCSI Qualified Name - IQN string `json:"iqn,omitempty"` - // Required: iSCSI target lun number - Lun int32 `json:"lun,omitempty"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - ISCSIInterface string `json:"iscsiInterface,omitempty"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Required: FC target world wide names (WWNs) - TargetWWNs []string `json:"targetWWNs"` - // Required: FC target lun number - Lun *int32 `json:"lun"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string `json:"driver"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - FSType string `json:"fsType,omitempty"` - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` - // Optional: Extra driver options if any. - Options map[string]string `json:"options,omitempty"` -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. A AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string `json:"volumeID"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - Partition int32 `json:"partition,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -type GitRepoVolumeSource struct { - // Repository URL - Repository string `json:"repository"` - // Commit hash, this is optional - Revision string `json:"revision,omitempty"` - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - Directory string `json:"directory,omitempty"` - // TODO: Consider credentials here. -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - SecretName string `json:"secretName,omitempty"` - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - Items []KeyToPath `json:"items,omitempty"` -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string `json:"server"` - - // Path is the exported NFS share - Path string `json:"path"` - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string `json:"endpoints"` - - // Required: Path is the Glusterfs volume path - Path string `json:"path"` - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string `json:"monitors"` - // Required: RBDImage is the rados image name - RBDImage string `json:"image"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty"` - // Optional: RadosPool is the rados pool name,default is rbd - RBDPool string `json:"pool,omitempty"` - // Optional: RBDUser is the rados user name, default is admin - RadosUser string `json:"user,omitempty"` - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - Keyring string `json:"keyring,omitempty"` - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string `json:"volumeID"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string `json:"fsType,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string `json:"monitors"` - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - Path string `json:"path,omitempty"` - // Optional: User is the rados user name, default is admin - User string `json:"user,omitempty"` - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - SecretFile string `json:"secretFile,omitempty"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - SecretRef *LocalObjectReference `json:"secretRef,omitempty"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a Flocker volume mounted by the Flocker agent. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker - DatasetName string `json:"datasetName"` -} - -// Represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - Items []DownwardAPIVolumeFile `json:"items,omitempty"` -} - -// Represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string `json:"path"` - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string `json:"secretName"` - // Share Name - ShareName string `json:"shareName"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty"` -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string `json:"volumePath"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string `json:"fsType,omitempty"` -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference `json:",inline"` - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - Items []KeyToPath `json:"items,omitempty"` -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string `json:"key"` - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string `json:"path"` -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - Name string `json:"name,omitempty"` - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - HostPort int32 `json:"hostPort,omitempty"` - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 `json:"containerPort"` - // Required: Supports "TCP" and "UDP". - Protocol Protocol `json:"protocol,omitempty"` - // Optional: What host IP to bind the external port to. - HostIP string `json:"hostIP,omitempty"` -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string `json:"name"` - // Optional: Defaults to false (read-write). - ReadOnly bool `json:"readOnly,omitempty"` - // Required. Must not contain ':'. - MountPath string `json:"mountPath"` - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - SubPath string `json:"subPath,omitempty"` -} - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string `json:"name"` - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - Value string `json:"value,omitempty"` - // Optional: Specifies a source the value of this var should come from. - ValueFrom *EnvVarSource `json:"valueFrom,omitempty"` -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod; only name and namespace are supported. - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty"` - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty"` - // Selects a key of a ConfigMap. - ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` - // Selects a key of a secret in the pod's namespace. - SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty"` -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string `json:"apiVersion"` - // Required: Path of the field to select in the specified API version - FieldPath string `json:"fieldPath"` -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - ContainerName string `json:"containerName,omitempty"` - // Required: resource to select - Resource string `json:"resource"` - // Specifies the output format of the exposed resources, defaults to "1" - Divisor resource.Quantity `json:"divisor,omitempty"` -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference `json:",inline"` - // The key to select. - Key string `json:"key"` -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference `json:",inline"` - // The key of the secret to select from. Must be a valid secret key. - Key string `json:"key"` -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string `json:"name"` - // The header field value - Value string `json:"value"` -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - Path string `json:"path,omitempty"` - // Required: Name or number of the port to access on the container. - Port intstr.IntOrString `json:"port,omitempty"` - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - Host string `json:"host,omitempty"` - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - Scheme URIScheme `json:"scheme,omitempty"` - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty"` -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - Port intstr.IntOrString `json:"port,omitempty"` -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - Command []string `json:"command,omitempty"` -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler `json:",inline"` - // Length of time before health checking is activated. In seconds. - InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` - // Length of time before health checking times out. In seconds. - TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` - // How often (in seconds) to perform the probe. - PeriodSeconds int32 `json:"periodSeconds,omitempty"` - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness. - SuccessThreshold int32 `json:"successThreshold,omitempty"` - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - FailureThreshold int32 `json:"failureThreshold,omitempty"` -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - Add []Capability `json:"add,omitempty"` - // Removed capabilities - Drop []Capability `json:"drop,omitempty"` -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - Limits ResourceList `json:"limits,omitempty"` - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - Requests ResourceList `json:"requests,omitempty"` -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string `json:"name"` - // Required. - Image string `json:"image"` - // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - Command []string `json:"command,omitempty"` - // Optional: The docker image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - Args []string `json:"args,omitempty"` - // Optional: Defaults to Docker's default. - WorkingDir string `json:"workingDir,omitempty"` - Ports []ContainerPort `json:"ports,omitempty"` - Env []EnvVar `json:"env,omitempty"` - // Compute resource requirements. - Resources ResourceRequirements `json:"resources,omitempty"` - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"` - LivenessProbe *Probe `json:"livenessProbe,omitempty"` - ReadinessProbe *Probe `json:"readinessProbe,omitempty"` - Lifecycle *Lifecycle `json:"lifecycle,omitempty"` - // Required. - TerminationMessagePath string `json:"terminationMessagePath,omitempty"` - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy `json:"imagePullPolicy"` - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - SecurityContext *SecurityContext `json:"securityContext,omitempty"` - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - Stdin bool `json:"stdin,omitempty"` - StdinOnce bool `json:"stdinOnce,omitempty"` - TTY bool `json:"tty,omitempty"` -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - Exec *ExecAction `json:"exec,omitempty"` - // HTTPGet specifies the http request to perform. - HTTPGet *HTTPGetAction `json:"httpGet,omitempty"` - // TCPSocket specifies an action involving a TCP port. - // TODO: implement a realistic TCP lifecycle hook - TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty"` -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - PostStart *Handler `json:"postStart,omitempty"` - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. - PreStop *Handler `json:"preStop,omitempty"` -} - -// The below types are used by kube_client and api_server. - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - Reason string `json:"reason,omitempty"` - // A human-readable message indicating details about why the container is in waiting state. - Message string `json:"message,omitempty"` -} - -type ContainerStateRunning struct { - StartedAt unversioned.Time `json:"startedAt,omitempty"` -} - -type ContainerStateTerminated struct { - ExitCode int32 `json:"exitCode"` - Signal int32 `json:"signal,omitempty"` - Reason string `json:"reason,omitempty"` - Message string `json:"message,omitempty"` - StartedAt unversioned.Time `json:"startedAt,omitempty"` - FinishedAt unversioned.Time `json:"finishedAt,omitempty"` - ContainerID string `json:"containerID,omitempty"` -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - Waiting *ContainerStateWaiting `json:"waiting,omitempty"` - Running *ContainerStateRunning `json:"running,omitempty"` - Terminated *ContainerStateTerminated `json:"terminated,omitempty"` -} - -type ContainerStatus struct { - // Each container in a pod must have a unique name. - Name string `json:"name"` - State ContainerState `json:"state,omitempty"` - LastTerminationState ContainerState `json:"lastState,omitempty"` - // Ready specifies whether the container has passed its readiness check. - Ready bool `json:"ready"` - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 `json:"restartCount"` - Image string `json:"image"` - ImageID string `json:"imageID"` - ContainerID string `json:"containerID,omitempty"` -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" -) - -type PodCondition struct { - Type PodConditionType `json:"type"` - Status ConditionStatus `json:"status"` - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - Reason string `json:"reason,omitempty"` - Message string `json:"message,omitempty"` -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// PodList is a list of Pods. -type PodList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Pod `json:"items"` -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirst indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default (as - // determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"` -} - -// A null or empty node selector term matches no objects. -type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement `json:"matchExpressions"` -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator `json:"operator"` - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - Values []string `json:"values,omitempty"` -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty"` - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - PodAffinity *PodAffinity `json:"podAffinity,omitempty"` - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty"` -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int `json:"weight"` - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm"` -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key <topologyKey> matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty"` - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - Namespaces []string `json:"namespaces"` - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - TopologyKey string `json:"topologyKey,omitempty"` -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 `json:"weight"` - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm `json:"preference"` -} - -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"` - // Required. The taint value corresponding to the taint key. - Value string `json:"value,omitempty"` - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. - Effect TaintEffect `json:"effect"` -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // but allow all already-running pods to continue running. - // Enforced by the scheduler and Kubelet. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // and evict any already-running pods that do not tolerate the taint. - // Enforced by the scheduler and Kubelet. - // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple <key,value,effect> using the matching operator <operator>. -type Toleration struct { - // Required. Key is the taint key that the toleration applies to. - Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key"` - // operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - Operator TolerationOperator `json:"operator,omitempty"` - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - Value string `json:"value,omitempty"` - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. - Effect TaintEffect `json:"effect,omitempty"` - // TODO: For forgiveness (#1574), we'd eventually add at least a grace period - // here, and possibly an occurrence threshold and period. -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume `json:"volumes"` - // List of initialization containers belonging to the pod. - InitContainers []Container `json:"-"` - // List of containers belonging to the pod. - Containers []Container `json:"containers"` - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"` - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` - // Required: Set DNS policy. - DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty"` - // NodeSelector is a selector which must be true for the pod to fit on a node - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string `json:"serviceAccountName"` - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - NodeName string `json:"nodeName,omitempty"` - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - SecurityContext *PodSecurityContext `json:"securityContext,omitempty"` - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - Hostname string `json:"hostname,omitempty"` - // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". - // If not specified, the pod will not have a domainname at all. - Subdomain string `json:"subdomain,omitempty"` -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - HostNetwork bool `json:"hostNetwork,omitempty"` - // Use the host's pid namespace. - // Optional: Default to false. - HostPID bool `json:"hostPID,omitempty"` - // Use the host's ipc namespace. - // Optional: Default to false. - HostIPC bool `json:"hostIPC,omitempty"` - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - RunAsUser *int64 `json:"runAsUser,omitempty"` - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - FSGroup *int64 `json:"fsGroup,omitempty"` -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - Phase PodPhase `json:"phase,omitempty"` - Conditions []PodCondition `json:"conditions,omitempty"` - // A human readable message indicating details about why the pod is in this state. - Message string `json:"message,omitempty"` - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' - Reason string `json:"reason,omitempty"` - - HostIP string `json:"hostIP,omitempty"` - PodIP string `json:"podIP,omitempty"` - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - StartTime *unversioned.Time `json:"startTime,omitempty"` - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses - InitContainerStatuses []ContainerStatus `json:"-"` - // The list has one entry per container in the manifest. Each entry is - // currently the output of `docker inspect`. This output format is *not* - // final and should not be relied upon. - // TODO: Make real decisions about what our info should look like. Re-enable fuzz test - // when we have done this. - ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty"` -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - // Status represents the current information about a pod. This data may not be up - // to date. - Status PodStatus `json:"status,omitempty"` -} - -// +genclient=true - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of a pod. - Spec PodSpec `json:"spec,omitempty"` - - // Status represents the current information about a pod. This data may not be up - // to date. - Status PodStatus `json:"status,omitempty"` -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of a pod. - Spec PodSpec `json:"spec,omitempty"` -} - -// +genclient=true - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Template defines the pods that will be created from this pod template - Template PodTemplateSpec `json:"template,omitempty"` -} - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []PodTemplate `json:"items"` -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 `json:"replicas"` - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string `json:"selector"` - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - //TemplateRef *ObjectReference `json:"templateRef,omitempty"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - Template *PodTemplateSpec `json:"template,omitempty"` -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 `json:"replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` - - // ObservedGeneration is the most recent generation observed by the controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} - -// +genclient=true - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired behavior of this replication controller. - Spec ReplicationControllerSpec `json:"spec,omitempty"` - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - Status ReplicationControllerStatus `json:"status,omitempty"` -} - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []ReplicationController `json:"items"` -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// ServiceList holds a list of services. -type ServiceList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Service `json:"items"` -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"` -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - Ingress []LoadBalancerIngress `json:"ingress,omitempty"` -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - IP string `json:"ip,omitempty"` - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - Hostname string `json:"hostname,omitempty"` -} - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer - Type ServiceType `json:"type,omitempty"` - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort `json:"ports"` - - // This service will route traffic to pods having labels matching this selector. If empty or not present, - // the service is assumed to have endpoints set by an external process and Kubernetes will not modify - // those endpoints. - Selector map[string]string `json:"selector"` - - // ClusterIP is usually assigned by the master. If specified by the user - // we will try to respect it or else fail the request. This field can - // not be changed by updates. - // Valid values are None, empty string (""), or a valid IP address - // None can be specified for headless services when proxying is not required - ClusterIP string `json:"clusterIP,omitempty"` - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - ExternalIPs []string `json:"externalIPs,omitempty"` - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - LoadBalancerIP string `json:"loadBalancerIP,omitempty"` - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty"` - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"` -} - -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string `json:"name"` - - // The IP protocol for this port. Supports "TCP" and "UDP". - Protocol Protocol `json:"protocol"` - - // The port that will be exposed on the service. - Port int32 `json:"port"` - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString `json:"targetPort"` - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 `json:"nodePort"` -} - -// +genclient=true - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of a service. - Spec ServiceSpec `json:"spec,omitempty"` - - // Status represents the current status of a service. - Status ServiceStatus `json:"status,omitempty"` -} - -// +genclient=true - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference `json:"secrets"` - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty"` -} - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []ServiceAccount `json:"items"` -} - -// +genclient=true - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - Hostname string `json:"hostname,omitempty"` - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol -} - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Endpoints `json:"items"` -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - PodCIDR string `json:"podCIDR,omitempty"` - - // External ID of the node assigned by some machine database (e.g. a cloud provider) - ExternalID string `json:"externalID,omitempty"` - - // ID of the node assigned by the cloud provider - // Note: format is "<ProviderName>://<ProviderSpecificNodeID>" - ProviderID string `json:"providerID,omitempty"` - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - Unschedulable bool `json:"unschedulable,omitempty"` -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 `json:"Port"` -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty"` -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // Machine ID reported by the node. - MachineID string `json:"machineID"` - // System UUID reported by the node. - SystemUUID string `json:"systemUUID"` - // Boot ID reported by the node. - BootID string `json:"bootID"` - // Kernel Version reported by the node. - KernelVersion string `json:"kernelVersion"` - // OS Image reported by the node. - OSImage string `json:"osImage"` - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string `json:"containerRuntimeVersion"` - // Kubelet Version reported by the node. - KubeletVersion string `json:"kubeletVersion"` - // KubeProxy Version reported by the node. - KubeProxyVersion string `json:"kubeProxyVersion"` - // The Operating System reported by the node - OperatingSystem string `json:"operatingSystem"` - // The Architecture reported by the node - Architecture string `json:"architecture"` -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - Capacity ResourceList `json:"capacity,omitempty"` - // Allocatable represents the resources of a node that are available for scheduling. - Allocatable ResourceList `json:"allocatable,omitempty"` - // NodePhase is the current lifecycle phase of the node. - Phase NodePhase `json:"phase,omitempty"` - // Conditions is an array of current node conditions. - Conditions []NodeCondition `json:"conditions,omitempty"` - // Queried from cloud provider, if available. - Addresses []NodeAddress `json:"addresses,omitempty"` - // Endpoints of daemons running on the Node. - DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty"` - // Set of ids/uuids to uniquely identify the node. - NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty"` - // List of container images on this node - Images []ContainerImage `json:"images,omitempty"` - // List of attachable volumes in use (mounted) by the node. - VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty"` - // List of volumes that are attached to the node. - VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty"` -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName `json:"name"` - - // DevicePath represents the device path where the volume should be avilable - DevicePath string `json:"devicePath"` -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - Names []string `json:"names"` - // The size of the image in bytes. - SizeBytes int64 `json:"sizeBytes,omitempty"` -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" -) - -type NodeCondition struct { - Type NodeConditionType `json:"type"` - Status ConditionStatus `json:"status"` - LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty"` - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - Reason string `json:"reason,omitempty"` - Message string `json:"message,omitempty"` -} - -type NodeAddressType string - -// These are valid address types of node. NodeLegacyHostIP is used to transit -// from out-dated HostIP field to NodeAddress. -const ( - NodeLegacyHostIP NodeAddressType = "LegacyHostIP" - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" -) - -type NodeAddress struct { - Type NodeAddressType `json:"type"` - Address string `json:"address"` -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - Capacity ResourceList `json:"capacity,omitempty"` -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" - // Number of Pods that may be running on this Node: see ResourcePods -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient=true,nonNamespaced=true - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of a node. - Spec NodeSpec `json:"spec,omitempty"` - - // Status describes the current status of a Node - Status NodeStatus `json:"status,omitempty"` -} - -// NodeList is a list of nodes. -type NodeList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Node `json:"items"` -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here -const ( - FinalizerKubernetes FinalizerName = "kubernetes" - FinalizerOrphan string = "orphan" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - Phase NamespacePhase `json:"phase,omitempty"` -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient=true,nonNamespaced=true - -// A namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of the Namespace. - Spec NamespaceSpec `json:"spec,omitempty"` - - // Status describes the current status of a Namespace - Status NamespaceStatus `json:"status,omitempty"` -} - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Namespace `json:"items"` -} - -// Binding ties one object to another - for example, a pod is bound to a node by a scheduler. -type Binding struct { - unversioned.TypeMeta `json:",inline"` - // ObjectMeta describes the object that is being bound. - ObjectMeta `json:"metadata,omitempty"` - - // Target is the object to bind to. - Target ObjectReference `json:"target"` -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - UID *types.UID `json:"uid,omitempty"` -} - -// DeleteOptions may be provided when deleting an API object -type DeleteOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"` - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - Preconditions *Preconditions `json:"preconditions,omitempty"` - - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - OrphanDependents *bool `json:"orphanDependents,omitempty"` -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - unversioned.TypeMeta `json:",inline"` - // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact"` -} - -// ListOptions is the query options to a standard REST list call, and has future support for -// watch calls. -type ListOptions struct { - unversioned.TypeMeta `json:",inline"` - - // A selector based on labels - LabelSelector labels.Selector - // A selector based on fields - FieldSelector fields.Selector - // If true, watch for changes to this list - Watch bool - // The resource version to watch (no effect on list yet) - ResourceVersion string - // Timeout for the list/watch call. - TimeoutSeconds *int64 -} - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - unversioned.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *unversioned.Time - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 -} - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Stdin if true indicates that stdin is to be redirected for the attach call - Stdin bool `json:"stdin,omitempty"` - - // Stdout if true indicates that stdout is to be redirected for the attach call - Stdout bool `json:"stdout,omitempty"` - - // Stderr if true indicates that stderr is to be redirected for the attach call - Stderr bool `json:"stderr,omitempty"` - - // TTY if true indicates that a tty will be allocated for the attach call - TTY bool `json:"tty,omitempty"` - - // Container to attach to. - Container string `json:"container,omitempty"` -} - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - unversioned.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - unversioned.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - unversioned.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - unversioned.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -type OwnerReference struct { - // API version of the referent. - APIVersion string `json:"apiVersion"` - // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind"` - // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names - Name string `json:"name"` - // UID of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids - UID types.UID `json:"uid"` - // If true, this reference points to the managing controller. - Controller *bool `json:"controller,omitempty"` -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -type ObjectReference struct { - Kind string `json:"kind,omitempty"` - Namespace string `json:"namespace,omitempty"` - Name string `json:"name,omitempty"` - UID types.UID `json:"uid,omitempty"` - APIVersion string `json:"apiVersion,omitempty"` - ResourceVersion string `json:"resourceVersion,omitempty"` - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - FieldPath string `json:"fieldPath,omitempty"` -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - //TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -type SerializedReference struct { - unversioned.TypeMeta `json:",inline"` - Reference ObjectReference `json:"reference,omitempty"` -} - -type EventSource struct { - // Component from which the event is generated. - Component string `json:"component,omitempty"` - // Host name on which the event is generated. - Host string `json:"host,omitempty"` -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient=true - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Required. The object that this event is about. - InvolvedObject ObjectReference `json:"involvedObject,omitempty"` - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - Reason string `json:"reason,omitempty"` - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. - Message string `json:"message,omitempty"` - - // Optional. The component reporting this event. Should be a short machine understandable string. - Source EventSource `json:"source,omitempty"` - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty"` - - // The time at which the most recent occurrence of this event was recorded. - LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty"` - - // The number of times this event has occurred. - Count int32 `json:"count,omitempty"` - - // Type of this event (Normal, Warning), new types could be added in the future. - Type string `json:"type,omitempty"` -} - -// EventList is a list of events. -type EventList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Event `json:"items"` -} - -// List holds a list of objects, which may not be known by the server. -type List struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []runtime.Object `json:"items"` -} - -// A type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - Type LimitType `json:"type,omitempty"` - // Max usage constraints on this kind by resource name - Max ResourceList `json:"max,omitempty"` - // Min usage constraints on this kind by resource name - Min ResourceList `json:"min,omitempty"` - // Default resource requirement limit value by resource name. - Default ResourceList `json:"default,omitempty"` - // DefaultRequest resource requirement request value by resource name. - DefaultRequest ResourceList `json:"defaultRequest,omitempty"` - // MaxLimitRequestRatio represents the max burst value for the named resource - MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty"` -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem `json:"limits"` -} - -// +genclient=true - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the limits enforced - Spec LimitRangeSpec `json:"spec,omitempty"` -} - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is a list of LimitRange objects - Items []LimitRange `json:"items"` -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - Hard ResourceList `json:"hard,omitempty"` - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - Scopes []ResourceQuotaScope `json:"scopes,omitempty"` -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - Hard ResourceList `json:"hard,omitempty"` - // Used is the current observed total usage of the resource in the namespace - Used ResourceList `json:"used,omitempty"` -} - -// +genclient=true - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired quota - Spec ResourceQuotaSpec `json:"spec,omitempty"` - - // Status defines the actual enforced quota and its current usage - Status ResourceQuotaStatus `json:"status,omitempty"` -} - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is a list of ResourceQuota objects - Items []ResourceQuota `json:"items"` -} - -// +genclient=true - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - Data map[string][]byte `json:"data,omitempty"` - - // Used to facilitate programmatic handling of secret data. - Type SecretType `json:"type,omitempty"` -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authetication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secert. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" -) - -type SecretList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []Secret `json:"items"` -} - -// +genclient=true - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - Data map[string]string `json:"data,omitempty"` -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of ConfigMaps. - Items []ConfigMap `json:"items"` -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParamm = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// Similarly to above, these are constants to support HTTP PATCH utilized by -// both the client and server that didn't make sense for a whole package to be -// dedicated to. -type PatchType string - -const ( - JSONPatchType PatchType = "application/json-patch+json" - MergePatchType PatchType = "application/merge-patch+json" - StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" -) - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -type ComponentCondition struct { - Type ComponentConditionType `json:"type"` - Status ConditionStatus `json:"status"` - Message string `json:"message,omitempty"` - Error string `json:"error,omitempty"` -} - -// +genclient=true,nonNamespaced=true - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - - Conditions []ComponentCondition `json:"conditions,omitempty"` -} - -type ComponentStatusList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []ComponentStatus `json:"items"` -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - Capabilities *Capabilities `json:"capabilities,omitempty"` - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - Privileged *bool `json:"privileged,omitempty"` - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty"` - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsUser *int64 `json:"runAsUser,omitempty"` - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"` - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"` -} - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - User string `json:"user,omitempty"` - // SELinux role label - Role string `json:"role,omitempty"` - // SELinux type label - Type string `json:"type,omitempty"` - // SELinux level label. - Level string `json:"level,omitempty"` -} - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - unversioned.TypeMeta `json:",inline"` - ObjectMeta `json:"metadata,omitempty"` - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string `json:"range"` - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte `json:"data"` -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 - - // When the --failure-domains scheduler flag is not specified, - // DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity. - DefaultFailureDomains string = unversioned.LabelHostname + "," + unversioned.LabelZoneFailureDomain + "," + unversioned.LabelZoneRegion -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go deleted file mode 100644 index ff8f1f0d6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/deep_copy_generated.go +++ /dev/null @@ -1,314 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package unversioned - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" - time "time" -) - -func DeepCopy_unversioned_APIGroup(in APIGroup, out *APIGroup, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Name = in.Name - if in.Versions != nil { - in, out := in.Versions, &out.Versions - *out = make([]GroupVersionForDiscovery, len(in)) - for i := range in { - if err := DeepCopy_unversioned_GroupVersionForDiscovery(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Versions = nil - } - if err := DeepCopy_unversioned_GroupVersionForDiscovery(in.PreferredVersion, &out.PreferredVersion, c); err != nil { - return err - } - if in.ServerAddressByClientCIDRs != nil { - in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(in)) - for i := range in { - if err := DeepCopy_unversioned_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - return nil -} - -func DeepCopy_unversioned_APIGroupList(in APIGroupList, out *APIGroupList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if in.Groups != nil { - in, out := in.Groups, &out.Groups - *out = make([]APIGroup, len(in)) - for i := range in { - if err := DeepCopy_unversioned_APIGroup(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Groups = nil - } - return nil -} - -func DeepCopy_unversioned_APIResource(in APIResource, out *APIResource, c *conversion.Cloner) error { - out.Name = in.Name - out.Namespaced = in.Namespaced - out.Kind = in.Kind - return nil -} - -func DeepCopy_unversioned_APIResourceList(in APIResourceList, out *APIResourceList, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.GroupVersion = in.GroupVersion - if in.APIResources != nil { - in, out := in.APIResources, &out.APIResources - *out = make([]APIResource, len(in)) - for i := range in { - if err := DeepCopy_unversioned_APIResource(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.APIResources = nil - } - return nil -} - -func DeepCopy_unversioned_APIVersions(in APIVersions, out *APIVersions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if in.Versions != nil { - in, out := in.Versions, &out.Versions - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Versions = nil - } - if in.ServerAddressByClientCIDRs != nil { - in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(in)) - for i := range in { - if err := DeepCopy_unversioned_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - return nil -} - -func DeepCopy_unversioned_Duration(in Duration, out *Duration, c *conversion.Cloner) error { - out.Duration = in.Duration - return nil -} - -func DeepCopy_unversioned_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func DeepCopy_unversioned_GroupKind(in GroupKind, out *GroupKind, c *conversion.Cloner) error { - out.Group = in.Group - out.Kind = in.Kind - return nil -} - -func DeepCopy_unversioned_GroupResource(in GroupResource, out *GroupResource, c *conversion.Cloner) error { - out.Group = in.Group - out.Resource = in.Resource - return nil -} - -func DeepCopy_unversioned_GroupVersion(in GroupVersion, out *GroupVersion, c *conversion.Cloner) error { - out.Group = in.Group - out.Version = in.Version - return nil -} - -func DeepCopy_unversioned_GroupVersionForDiscovery(in GroupVersionForDiscovery, out *GroupVersionForDiscovery, c *conversion.Cloner) error { - out.GroupVersion = in.GroupVersion - out.Version = in.Version - return nil -} - -func DeepCopy_unversioned_GroupVersionKind(in GroupVersionKind, out *GroupVersionKind, c *conversion.Cloner) error { - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - return nil -} - -func DeepCopy_unversioned_GroupVersionResource(in GroupVersionResource, out *GroupVersionResource, c *conversion.Cloner) error { - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - return nil -} - -func DeepCopy_unversioned_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { - if in.MatchLabels != nil { - in, out := in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - in, out := in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(in)) - for i := range in { - if err := DeepCopy_unversioned_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func DeepCopy_unversioned_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := in.Values, &out.Values - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Values = nil - } - return nil -} - -func DeepCopy_unversioned_ListMeta(in ListMeta, out *ListMeta, c *conversion.Cloner) error { - out.SelfLink = in.SelfLink - out.ResourceVersion = in.ResourceVersion - return nil -} - -func DeepCopy_unversioned_Patch(in Patch, out *Patch, c *conversion.Cloner) error { - return nil -} - -func DeepCopy_unversioned_RootPaths(in RootPaths, out *RootPaths, c *conversion.Cloner) error { - if in.Paths != nil { - in, out := in.Paths, &out.Paths - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Paths = nil - } - return nil -} - -func DeepCopy_unversioned_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func DeepCopy_unversioned_Status(in Status, out *Status, c *conversion.Cloner) error { - if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - out.Status = in.Status - out.Message = in.Message - out.Reason = in.Reason - if in.Details != nil { - in, out := in.Details, &out.Details - *out = new(StatusDetails) - if err := DeepCopy_unversioned_StatusDetails(*in, *out, c); err != nil { - return err - } - } else { - out.Details = nil - } - out.Code = in.Code - return nil -} - -func DeepCopy_unversioned_StatusCause(in StatusCause, out *StatusCause, c *conversion.Cloner) error { - out.Type = in.Type - out.Message = in.Message - out.Field = in.Field - return nil -} - -func DeepCopy_unversioned_StatusDetails(in StatusDetails, out *StatusDetails, c *conversion.Cloner) error { - out.Name = in.Name - out.Group = in.Group - out.Kind = in.Kind - if in.Causes != nil { - in, out := in.Causes, &out.Causes - *out = make([]StatusCause, len(in)) - for i := range in { - if err := DeepCopy_unversioned_StatusCause(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Causes = nil - } - out.RetryAfterSeconds = in.RetryAfterSeconds - return nil -} - -func DeepCopy_unversioned_Time(in Time, out *Time, c *conversion.Cloner) error { - if newVal, err := c.DeepCopy(in.Time); err != nil { - return err - } else { - out.Time = newVal.(time.Time) - } - return nil -} - -func DeepCopy_unversioned_Timestamp(in Timestamp, out *Timestamp, c *conversion.Cloner) error { - out.Seconds = in.Seconds - out.Nanos = in.Nanos - return nil -} - -func DeepCopy_unversioned_TypeMeta(in TypeMeta, out *TypeMeta, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go deleted file mode 100644 index cdaf25730..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/duration.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "encoding/json" - "time" -) - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -type Duration struct { - time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (d *Duration) UnmarshalJSON(b []byte) error { - var str string - json.Unmarshal(b, &str) - - pd, err := time.ParseDuration(str) - if err != nil { - return err - } - d.Duration = pd - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.Duration.String()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go deleted file mode 100644 index cb9803552..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.pb.go +++ /dev/null @@ -1,4212 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/unversioned/generated.proto -// DO NOT EDIT! - -/* - Package unversioned is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/api/unversioned/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - Duration - ExportOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - LabelSelector - LabelSelectorRequirement - ListMeta - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta -*/ -package unversioned - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import time "time" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (m *APIGroup) String() string { return proto.CompactTextString(m) } -func (*APIGroup) ProtoMessage() {} - -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (m *APIGroupList) String() string { return proto.CompactTextString(m) } -func (*APIGroupList) ProtoMessage() {} - -func (m *APIResource) Reset() { *m = APIResource{} } -func (m *APIResource) String() string { return proto.CompactTextString(m) } -func (*APIResource) ProtoMessage() {} - -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (m *APIResourceList) String() string { return proto.CompactTextString(m) } -func (*APIResourceList) ProtoMessage() {} - -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (m *ExportOptions) String() string { return proto.CompactTextString(m) } -func (*ExportOptions) ProtoMessage() {} - -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} - -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} - -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} - -func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } -func (m *GroupVersionForDiscovery) String() string { return proto.CompactTextString(m) } -func (*GroupVersionForDiscovery) ProtoMessage() {} - -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} - -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (m *LabelSelector) String() string { return proto.CompactTextString(m) } -func (*LabelSelector) ProtoMessage() {} - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*LabelSelectorRequirement) ProtoMessage() {} - -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (m *ListMeta) String() string { return proto.CompactTextString(m) } -func (*ListMeta) ProtoMessage() {} - -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (m *RootPaths) String() string { return proto.CompactTextString(m) } -func (*RootPaths) ProtoMessage() {} - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } -func (*ServerAddressByClientCIDR) ProtoMessage() {} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} - -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (m *StatusCause) String() string { return proto.CompactTextString(m) } -func (*StatusCause) ProtoMessage() {} - -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (m *StatusDetails) String() string { return proto.CompactTextString(m) } -func (*StatusDetails) ProtoMessage() {} - -func (m *Time) Reset() { *m = Time{} } -func (m *Time) String() string { return proto.CompactTextString(m) } -func (*Time) ProtoMessage() {} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (m *TypeMeta) String() string { return proto.CompactTextString(m) } -func (*TypeMeta) ProtoMessage() {} - -func init() { - proto.RegisterType((*APIGroup)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroup") - proto.RegisterType((*APIGroupList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIGroupList") - proto.RegisterType((*APIResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResource") - proto.RegisterType((*APIResourceList)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIResourceList") - proto.RegisterType((*APIVersions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.APIVersions") - proto.RegisterType((*Duration)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Duration") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ExportOptions") - proto.RegisterType((*GroupKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupKind") - proto.RegisterType((*GroupResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupResource") - proto.RegisterType((*GroupVersion)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersion") - proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionForDiscovery") - proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionKind") - proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.kubernetes.pkg.api.unversioned.GroupVersionResource") - proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.unversioned.LabelSelectorRequirement") - proto.RegisterType((*ListMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ListMeta") - proto.RegisterType((*RootPaths)(nil), "k8s.io.kubernetes.pkg.api.unversioned.RootPaths") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.kubernetes.pkg.api.unversioned.ServerAddressByClientCIDR") - proto.RegisterType((*Status)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Status") - proto.RegisterType((*StatusCause)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusCause") - proto.RegisterType((*StatusDetails)(nil), "k8s.io.kubernetes.pkg.api.unversioned.StatusDetails") - proto.RegisterType((*Time)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Time") - proto.RegisterType((*Timestamp)(nil), "k8s.io.kubernetes.pkg.api.unversioned.Timestamp") - proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.api.unversioned.TypeMeta") -} -func (m *APIGroup) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIGroup) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *APIGroupList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIGroupList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for _, msg := range m.Groups { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *APIResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - if m.Namespaced { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *APIResourceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIResourceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) - i += copy(data[i:], m.GroupVersion) - if len(m.APIResources) > 0 { - for _, msg := range m.APIResources { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *APIVersions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIVersions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Duration) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Duration) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Duration)) - return i, nil -} - -func (m *ExportOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Export { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Exact { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *GroupKind) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupKind) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *GroupResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - return i, nil -} - -func (m *GroupVersion) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersion) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - return i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersionForDiscovery) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GroupVersion))) - i += copy(data[i:], m.GroupVersion) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - return i, nil -} - -func (m *GroupVersionKind) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersionKind) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *GroupVersionResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GroupVersionResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Version))) - i += copy(data[i:], m.Version) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - return i, nil -} - -func (m *LabelSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k := range m.MatchLabels { - data[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ListMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ListMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) - i += copy(data[i:], m.SelfLink) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - return i, nil -} - -func (m *RootPaths) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RootPaths) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) - i += copy(data[i:], m.ClientCIDR) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) - i += copy(data[i:], m.ServerAddress) - return i, nil -} - -func (m *Status) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Status) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - if m.Details != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Details.Size())) - n3, err := m.Details.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - } - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Code)) - return i, nil -} - -func (m *StatusCause) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StatusCause) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Field))) - i += copy(data[i:], m.Field) - return i, nil -} - -func (m *StatusDetails) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StatusDetails) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Group))) - i += copy(data[i:], m.Group) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - if len(m.Causes) > 0 { - for _, msg := range m.Causes { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RetryAfterSeconds)) - return i, nil -} - -func (m *Timestamp) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Timestamp) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Seconds)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Nanos)) - return i, nil -} - -func (m *TypeMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TypeMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *APIGroup) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.PreferredVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *APIGroupList) Size() (n int) { - var l int - _ = l - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *APIResource) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *APIResourceList) Size() (n int) { - var l int - _ = l - l = len(m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.APIResources) > 0 { - for _, e := range m.APIResources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *APIVersions) Size() (n int) { - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Duration) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Duration)) - return n -} - -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *GroupKind) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupResource) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersion) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersionForDiscovery) Size() (n int) { - var l int - _ = l - l = len(m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersionKind) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GroupVersionResource) Size() (n int) { - var l int - _ = l - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ListMeta) Size() (n int) { - var l int - _ = l - l = len(m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RootPaths) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - l = len(m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Status) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - if m.Details != nil { - l = m.Details.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Code)) - return n -} - -func (m *StatusCause) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Field) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatusDetails) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Causes) > 0 { - for _, e := range m.Causes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) - return n -} - -func (m *Timestamp) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Seconds)) - n += 1 + sovGenerated(uint64(m.Nanos)) - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *APIGroup) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, GroupVersionForDiscovery{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PreferredVersion.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIGroupList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, APIGroup{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Namespaced = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResourceList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GroupVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIResources = append(m.APIResources, APIResource{}) - if err := m.APIResources[len(m.APIResources)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIVersions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Duration) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Duration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Duration |= (time.Duration(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupKind) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersion) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionForDiscovery) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GroupVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionKind) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - m.MatchLabels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelfLink = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RootPaths) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCIDR = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddress = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = StatusReason(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Details == nil { - m.Details = &StatusDetails{} - } - if err := m.Details.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Code |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusCause) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = CauseType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusDetails) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Causes = append(m.Causes, StatusCause{}) - if err := m.Causes[len(m.Causes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) - } - m.RetryAfterSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Timestamp) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - m.Seconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Seconds |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - m.Nanos = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Nanos |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto deleted file mode 100644 index e26cbd7b3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/generated.proto +++ /dev/null @@ -1,377 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.unversioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "unversioned"; - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -message APIGroup { - // name is the name of the group. - optional string name = 1; - - // versions are the versions supported in this group. - repeated GroupVersionForDiscovery versions = 2; - - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - optional GroupVersionForDiscovery preferredVersion = 3; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -message APIGroupList { - // groups is a list of APIGroup. - repeated APIGroup groups = 1; -} - -// APIResource specifies the name of a resource and whether it is namespaced. -message APIResource { - // name is the name of the resource. - optional string name = 1; - - // namespaced indicates if a resource is namespaced or not. - optional bool namespaced = 2; - - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - optional string kind = 3; -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -message APIResourceList { - // groupVersion is the group and version this APIResourceList is for. - optional string groupVersion = 1; - - // resources contains the name of the resources and if they are namespaced. - repeated APIResource resources = 2; -} - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message APIVersions { - // versions are the api versions that are available. - repeated string versions = 1; - - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; -} - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -message Duration { - optional int64 duration = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify.` - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupKind { - optional string group = 1; - - optional string kind = 2; -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupResource { - optional string group = 1; - - optional string resource = 2; -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersion { - optional string group = 1; - - optional string version = 2; -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensiblity. -message GroupVersionForDiscovery { - // groupVersion specifies the API group and version in the form "group/version" - optional string groupVersion = 1; - - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - optional string version = 2; -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionKind { - optional string group = 1; - - optional string version = 2; - - optional string kind = 3; -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -message GroupVersionResource { - optional string group = 1; - - optional string version = 2; - - optional string resource = 3; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map<string, string> matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -message ListMeta { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - optional string selfLink = 1; - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 2; -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -message RootPaths { - // paths are the paths available at root. - repeated string paths = 1; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - -// Status is a return value for calls that don't return other objects. -message Status { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional ListMeta metadata = 1; - - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional string status = 2; - - // A human-readable description of the status of this operation. - optional string message = 3; - - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - optional string reason = 4; - - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - optional StatusDetails details = 5; - - // Suggested HTTP return code for this status, 0 if not set. - optional int32 code = 6; -} - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -message StatusCause { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - optional string reason = 1; - - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - optional string message = 2; - - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - optional string field = 3; -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -message StatusDetails { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - optional string name = 1; - - // The group attribute of the resource associated with the status StatusReason. - optional string group = 2; - - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional string kind = 3; - - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - repeated StatusCause causes = 4; - - // If specified, the time in seconds before the operation should be retried. - optional int32 retryAfterSeconds = 5; -} - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -message Time { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// Timestamp is a struct that is equivalent to Time, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Time. Do not use in Go structs. -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - optional int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - optional int32 nanos = 2; -} - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -message TypeMeta { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#resources - optional string apiVersion = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go deleted file mode 100644 index 167002c3f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/group_version.go +++ /dev/null @@ -1,287 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` -// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended -// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then -// `*GroupVersionResource` is nil. -// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` -func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { - var gvr *GroupVersionResource - if strings.Count(arg, ".") >= 2 { - s := strings.SplitN(arg, ".", 3) - gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} - } - - return gvr, ParseGroupResource(arg) -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupResource struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Resource string `protobuf:"bytes,2,opt,name=resource"` -} - -func (gr GroupResource) WithVersion(version string) GroupVersionResource { - return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} -} - -func (gr GroupResource) IsEmpty() bool { - return len(gr.Group) == 0 && len(gr.Resource) == 0 -} - -func (gr *GroupResource) String() string { - if len(gr.Group) == 0 { - return gr.Resource - } - return gr.Resource + "." + gr.Group -} - -// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed -// for each field. -func ParseGroupResource(gr string) GroupResource { - if i := strings.Index(gr, "."); i == -1 { - return GroupResource{Resource: gr} - } else { - return GroupResource{Group: gr[i+1:], Resource: gr[:i]} - } -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionResource struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` - Resource string `protobuf:"bytes,3,opt,name=resource"` -} - -func (gvr GroupVersionResource) IsEmpty() bool { - return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 -} - -func (gvr GroupVersionResource) GroupResource() GroupResource { - return GroupResource{Group: gvr.Group, Resource: gvr.Resource} -} - -func (gvr GroupVersionResource) GroupVersion() GroupVersion { - return GroupVersion{Group: gvr.Group, Version: gvr.Version} -} - -func (gvr *GroupVersionResource) String() string { - return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupKind struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Kind string `protobuf:"bytes,2,opt,name=kind"` -} - -func (gk GroupKind) IsEmpty() bool { - return len(gk.Group) == 0 && len(gk.Kind) == 0 -} - -func (gk GroupKind) WithVersion(version string) GroupVersionKind { - return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} -} - -func (gk *GroupKind) String() string { - if len(gk.Group) == 0 { - return gk.Kind - } - return gk.Kind + "." + gk.Group -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionKind struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` - Kind string `protobuf:"bytes,3,opt,name=kind"` -} - -// IsEmpty returns true if group, version, and kind are empty -func (gvk GroupVersionKind) IsEmpty() bool { - return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 -} - -func (gvk GroupVersionKind) GroupKind() GroupKind { - return GroupKind{Group: gvk.Group, Kind: gvk.Kind} -} - -func (gvk GroupVersionKind) GroupVersion() GroupVersion { - return GroupVersion{Group: gvk.Group, Version: gvk.Version} -} - -func (gvk GroupVersionKind) String() string { - return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersion struct { - Group string `protobuf:"bytes,1,opt,name=group"` - Version string `protobuf:"bytes,2,opt,name=version"` -} - -// IsEmpty returns true if group and version are empty -func (gv GroupVersion) IsEmpty() bool { - return len(gv.Group) == 0 && len(gv.Version) == 0 -} - -// String puts "group" and "version" into a single "group/version" string. For the legacy v1 -// it returns "v1". -func (gv GroupVersion) String() string { - // special case the internal apiVersion for the legacy kube types - if gv.IsEmpty() { - return "" - } - - // special case of "v1" for backward compatibility - if len(gv.Group) == 0 && gv.Version == "v1" { - return gv.Version - } - if len(gv.Group) > 0 { - return gv.Group + "/" + gv.Version - } - return gv.Version -} - -// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error -// if it cannot parse the string. -func ParseGroupVersion(gv string) (GroupVersion, error) { - // this can be the internal version for the legacy kube types - // TODO once we've cleared the last uses as strings, this special case should be removed. - if (len(gv) == 0) || (gv == "/") { - return GroupVersion{}, nil - } - - switch strings.Count(gv, "/") { - case 0: - return GroupVersion{"", gv}, nil - case 1: - i := strings.Index(gv, "/") - return GroupVersion{gv[:i], gv[i+1:]}, nil - default: - return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) - } -} - -// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. -func (gv GroupVersion) WithKind(kind string) GroupVersionKind { - return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} -} - -// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. -func (gv GroupVersion) WithResource(resource string) GroupVersionResource { - return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} -} - -// MarshalJSON implements the json.Marshaller interface. -func (gv GroupVersion) MarshalJSON() ([]byte, error) { - s := gv.String() - if strings.Count(s, "/") > 1 { - return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) - } - return json.Marshal(s) -} - -func (gv *GroupVersion) unmarshal(value []byte) error { - var s string - if err := json.Unmarshal(value, &s); err != nil { - return err - } - parsed, err := ParseGroupVersion(s) - if err != nil { - return err - } - *gv = parsed - return nil -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (gv *GroupVersion) UnmarshalJSON(value []byte) error { - return gv.unmarshal(value) -} - -// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. -func (gv *GroupVersion) UnmarshalText(value []byte) error { - return gv.unmarshal(value) -} - -// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that -// do not use TypeMeta. -func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk == nil { - return "", "" - } - return gvk.GroupVersion().String(), gvk.Kind -} - -// FromAPIVersionAndKind returns a GVK representing the provided fields for types that -// do not use TypeMeta. This method exists to support test types and legacy serializations -// that have a distinct group and kind. -// TODO: further reduce usage of this method. -func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { - if gv, err := ParseGroupVersion(apiVersion); err == nil { - return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} - } - return GroupVersionKind{Kind: kind} -} - -// All objects that are serialized from a Scheme encode their type information. This interface is used -// by serialization to set type information from the Scheme onto the serialized version of an object. -// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. -// TODO: this belongs in pkg/runtime, move unversioned.GVK into runtime. -type ObjectKind interface { - // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil - // should clear the current setting. - SetGroupVersionKind(kind GroupVersionKind) - // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does - // not expose or provide these fields. - GroupVersionKind() GroupVersionKind -} - -// EmptyObjectKind implements the ObjectKind interface as a noop -// TODO: this belongs in pkg/runtime, move unversioned.GVK into runtime. -var EmptyObjectKind = emptyObjectKind{} - -type emptyObjectKind struct{} - -// SetGroupVersionKind implements the ObjectKind interface -func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} - -// GroupVersionKind implements the ObjectKind interface -func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go deleted file mode 100644 index b71297ec5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/helpers.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/sets" -) - -// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements -// labels.Selector -// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go -func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { - if ps == nil { - return labels.Nothing(), nil - } - if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { - return labels.Everything(), nil - } - selector := labels.NewSelector() - for k, v := range ps.MatchLabels { - r, err := labels.NewRequirement(k, labels.EqualsOperator, sets.NewString(v)) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - for _, expr := range ps.MatchExpressions { - var op labels.Operator - switch expr.Operator { - case LabelSelectorOpIn: - op = labels.InOperator - case LabelSelectorOpNotIn: - op = labels.NotInOperator - case LabelSelectorOpExists: - op = labels.ExistsOperator - case LabelSelectorOpDoesNotExist: - op = labels.DoesNotExistOperator - default: - return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) - } - r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...)) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - } - return selector, nil -} - -// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. -// Note: This function should be kept in sync with the parser in pkg/labels/selector.go -func ParseToLabelSelector(selector string) (*LabelSelector, error) { - reqs, err := labels.ParseToRequirements(selector) - if err != nil { - return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) - } - - labelSelector := &LabelSelector{ - MatchLabels: map[string]string{}, - MatchExpressions: []LabelSelectorRequirement{}, - } - for _, req := range reqs { - var op LabelSelectorOperator - switch req.Operator() { - case labels.EqualsOperator, labels.DoubleEqualsOperator: - vals := req.Values() - if vals.Len() != 1 { - return nil, fmt.Errorf("equals operator must have exactly one value") - } - val, ok := vals.PopAny() - if !ok { - return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") - } - labelSelector.MatchLabels[req.Key()] = val - continue - case labels.InOperator: - op = LabelSelectorOpIn - case labels.NotInOperator: - op = LabelSelectorOpNotIn - case labels.ExistsOperator: - op = LabelSelectorOpExists - case labels.DoesNotExistOperator: - op = LabelSelectorOpDoesNotExist - case labels.GreaterThanOperator, labels.LessThanOperator: - // Adding a separate case for these operators to indicate that this is deliberate - return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) - default: - return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) - } - labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ - Key: req.Key(), - Operator: op, - Values: req.Values().List(), - }) - } - return labelSelector, nil -} - -// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. -func SetAsLabelSelector(ls labels.Set) *LabelSelector { - if ls == nil { - return nil - } - - selector := &LabelSelector{ - MatchLabels: make(map[string]string), - } - for label, value := range ls { - selector.MatchLabels[label] = value - } - - return selector -} - -// FormatLabelSelector convert labelSelector into plain string -func FormatLabelSelector(labelSelector *LabelSelector) string { - selector, err := LabelSelectorAsSelector(labelSelector) - if err != nil { - return "<error>" - } - - l := selector.String() - if len(l) == 0 { - l = "<none>" - } - return l -} - -func ExtractGroupVersions(l *APIGroupList) []string { - var groupVersions []string - for _, g := range l.Groups { - for _, gv := range g.Versions { - groupVersions = append(groupVersions, gv.GroupVersion) - } - } - return groupVersions -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go deleted file mode 100644 index b82b1990e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/meta.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// ListMetaAccessor retrieves the list interface from an object -// TODO: move this, and TypeMeta and ListMeta, to a different package -type ListMetaAccessor interface { - GetListMeta() List -} - -// List lets you work with list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field will be a no-op and return a default value. -// TODO: move this, and TypeMeta and ListMeta, to a different package -type List interface { - GetResourceVersion() string - SetResourceVersion(version string) - GetSelfLink() string - SetSelfLink(selfLink string) -} - -// Type exposes the type and APIVersion of versioned or internal API objects. -// TODO: move this, and TypeMeta and ListMeta, to a different package -type Type interface { - GetAPIVersion() string - SetAPIVersion(version string) - GetKind() string - SetKind(kind string) -} - -func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } - -func (obj *TypeMeta) GetObjectKind() ObjectKind { return obj } - -// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) SetGroupVersionKind(gvk GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) GroupVersionKind() GroupVersionKind { - return FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ListMeta) GetListMeta() List { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go deleted file mode 100644 index f8dbd8371..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/register.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = GroupVersion{Group: "", Version: ""} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go deleted file mode 100644 index df94bbe72..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/time.go +++ /dev/null @@ -1,153 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "encoding/json" - "time" - - "github.com/google/gofuzz" -) - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -type Time struct { - time.Time `protobuf:"-"` -} - -// NewTime returns a wrapped instance of the provided time -func NewTime(time time.Time) Time { - return Time{time} -} - -// Date returns the Time corresponding to the supplied parameters -// by wrapping time.Date. -func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { - return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} -} - -// Now returns the current local time. -func Now() Time { - return Time{time.Now()} -} - -// IsZero returns true if the value is nil or time is zero. -func (t *Time) IsZero() bool { - if t == nil { - return true - } - return t.Time.IsZero() -} - -// Before reports whether the time instant t is before u. -func (t Time) Before(u Time) bool { - return t.Time.Before(u.Time) -} - -// Equal reports whether the time instant t is equal to u. -func (t Time) Equal(u Time) bool { - return t.Time.Equal(u.Time) -} - -// Unix returns the local time corresponding to the given Unix time -// by wrapping time.Unix. -func Unix(sec int64, nsec int64) Time { - return Time{time.Unix(sec, nsec)} -} - -// Rfc3339Copy returns a copy of the Time at second-level precision. -func (t Time) Rfc3339Copy() Time { - copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) - return Time{copied} -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (t *Time) UnmarshalJSON(b []byte) error { - if len(b) == 4 && string(b) == "null" { - t.Time = time.Time{} - return nil - } - - var str string - json.Unmarshal(b, &str) - - pt, err := time.Parse(time.RFC3339, str) - if err != nil { - return err - } - - t.Time = pt.Local() - return nil -} - -// UnmarshalQueryParameter converts from a URL query parameter value to an object -func (t *Time) UnmarshalQueryParameter(str string) error { - if len(str) == 0 { - t.Time = time.Time{} - return nil - } - // Tolerate requests from older clients that used JSON serialization to build query params - if len(str) == 4 && str == "null" { - t.Time = time.Time{} - return nil - } - - pt, err := time.Parse(time.RFC3339, str) - if err != nil { - return err - } - - t.Time = pt.Local() - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - if t.IsZero() { - // Encode unset/nil objects as JSON's "null". - return []byte("null"), nil - } - - return json.Marshal(t.UTC().Format(time.RFC3339)) -} - -// MarshalQueryParameter converts to a URL query parameter value -func (t Time) MarshalQueryParameter() (string, error) { - if t.IsZero() { - // Encode unset/nil objects as an empty string - return "", nil - } - - return t.UTC().Format(time.RFC3339), nil -} - -// Fuzz satisfies fuzz.Interface. -func (t *Time) Fuzz(c fuzz.Continue) { - if t == nil { - return - } - // Allow for about 1000 years of randomness. Leave off nanoseconds - // because JSON doesn't represent them so they can't round-trip - // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) -} - -var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go deleted file mode 100644 index 11afcc2b7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types.go +++ /dev/null @@ -1,460 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package unversioned contains API types that are common to all versions. -// -// The package contains two categories of types: -// - external (serialized) types that lack their own version (e.g TypeMeta) -// - internal (never-serialized) types that are needed by several different -// api groups, and so live here, to avoid duplication and/or import loops -// (e.g. LabelSelector). -// In the future, we will probably move these categories of objects into -// separate packages. -package unversioned - -import "strings" - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -type TypeMeta struct { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#resources - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -type ListMeta struct { - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` - - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - TypeMeta `json:",inline"` - // Should this value be exported. Export strips fields that a user can not specify.` - Export bool `json:"export" protobuf:"varint,1,opt,name=export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` -} - -// Status is a return value for calls that don't return other objects. -type Status struct { - TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Status of the operation. - // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` - // A human-readable description of the status of this operation. - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` - // Suggested HTTP return code for this status, 0 if not set. - Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -type StatusDetails struct { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // The group attribute of the resource associated with the status StatusReason. - Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` - // If specified, the time in seconds before the operation should be retried. - RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` -} - -// Values of Status.Status -const ( - StatusSuccess = "Success" - StatusFailure = "Failure" -) - -// StatusReason is an enumeration of possible failure causes. Each StatusReason -// must map to a single HTTP status code, but multiple reasons may map -// to the same HTTP status code. -// TODO: move to apiserver -type StatusReason string - -const ( - // StatusReasonUnknown means the server has declined to indicate a specific reason. - // The details field may contain other information about this error. - // Status code 500. - StatusReasonUnknown StatusReason = "" - - // StatusReasonUnauthorized means the server can be reached and understood the request, but requires - // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) - // in order for the action to be completed. If the user has specified credentials on the request, the - // server considers them insufficient. - // Status code 401 - StatusReasonUnauthorized StatusReason = "Unauthorized" - - // StatusReasonForbidden means the server can be reached and understood the request, but refuses - // to take any further action. It is the result of the server being configured to deny access for some reason - // to the requested resource by the client. - // Details (optional): - // "kind" string - the kind attribute of the forbidden resource - // on some operations may differ from the requested - // resource. - // "id" string - the identifier of the forbidden resource - // Status code 403 - StatusReasonForbidden StatusReason = "Forbidden" - - // StatusReasonNotFound means one or more resources required for this operation - // could not be found. - // Details (optional): - // "kind" string - the kind attribute of the missing resource - // on some operations may differ from the requested - // resource. - // "id" string - the identifier of the missing resource - // Status code 404 - StatusReasonNotFound StatusReason = "NotFound" - - // StatusReasonAlreadyExists means the resource you are creating already exists. - // Details (optional): - // "kind" string - the kind attribute of the conflicting resource - // "id" string - the identifier of the conflicting resource - // Status code 409 - StatusReasonAlreadyExists StatusReason = "AlreadyExists" - - // StatusReasonConflict means the requested operation cannot be completed - // due to a conflict in the operation. The client may need to alter the - // request. Each resource may define custom details that indicate the - // nature of the conflict. - // Status code 409 - StatusReasonConflict StatusReason = "Conflict" - - // StatusReasonGone means the item is no longer available at the server and no - // forwarding address is known. - // Status code 410 - StatusReasonGone StatusReason = "Gone" - - // StatusReasonInvalid means the requested create or update operation cannot be - // completed due to invalid data provided as part of the request. The client may - // need to alter the request. When set, the client may use the StatusDetails - // message field as a summary of the issues encountered. - // Details (optional): - // "kind" string - the kind attribute of the invalid resource - // "id" string - the identifier of the invalid resource - // "causes" - one or more StatusCause entries indicating the data in the - // provided resource that was invalid. The code, message, and - // field attributes will be set. - // Status code 422 - StatusReasonInvalid StatusReason = "Invalid" - - // StatusReasonServerTimeout means the server can be reached and understood the request, - // but cannot complete the action in a reasonable time. The client should retry the request. - // This is may be due to temporary server load or a transient communication issue with - // another server. Status code 500 is used because the HTTP spec provides no suitable - // server-requested client retry and the 5xx class represents actionable errors. - // Details (optional): - // "kind" string - the kind attribute of the resource being acted on. - // "id" string - the operation that is being attempted. - // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried - // Status code 500 - StatusReasonServerTimeout StatusReason = "ServerTimeout" - - // StatusReasonTimeout means that the request could not be completed within the given time. - // Clients can get this response only when they specified a timeout param in the request, - // or if the server cannot complete the operation within a reasonable amount of time. - // The request might succeed with an increased value of timeout param. The client *should* - // wait at least the number of seconds specified by the retryAfterSeconds field. - // Details (optional): - // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried - // Status code 504 - StatusReasonTimeout StatusReason = "Timeout" - - // StatusReasonBadRequest means that the request itself was invalid, because the request - // doesn't make any sense, for example deleting a read-only object. This is different than - // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the - // data was invalid. API calls that return BadRequest can never succeed. - StatusReasonBadRequest StatusReason = "BadRequest" - - // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the - // resource was not supported by the code - for instance, attempting to delete a resource that - // can only be created. API calls that return MethodNotAllowed can never succeed. - StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" - - // StatusReasonInternalError indicates that an internal error occurred, it is unexpected - // and the outcome of the call is unknown. - // Details (optional): - // "causes" - The original error - // Status code 500 - StatusReasonInternalError StatusReason = "InternalError" - - // StatusReasonExpired indicates that the request is invalid because the content you are requesting - // has expired and is no longer available. It is typically associated with watches that can't be - // serviced. - // Status code 410 (gone) - StatusReasonExpired StatusReason = "Expired" - - // StatusReasonServiceUnavailable means that the request itself was valid, - // but the requested service is unavailable at this time. - // Retrying the request after some time might succeed. - // Status code 503 - StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" -) - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -type StatusCause struct { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` -} - -// CauseType is a machine readable value providing more detail about what -// occurred in a status response. An operation may have multiple causes for a -// status (whether Failure or Success). -type CauseType string - -const ( - // CauseTypeFieldValueNotFound is used to report failure to find a requested value - // (e.g. looking up an ID). - CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" - // CauseTypeFieldValueRequired is used to report required values that are not - // provided (e.g. empty strings, null values, or empty arrays). - CauseTypeFieldValueRequired CauseType = "FieldValueRequired" - // CauseTypeFieldValueDuplicate is used to report collisions of values that must be - // unique (e.g. unique IDs). - CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" - // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex - // match). - CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" - // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) - // values that can not be handled (e.g. an enumerated string). - CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" - // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client - // without the expected return type. The presence of this cause indicates the error may be - // due to an intervening proxy or the server software malfunctioning. - CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" -) - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type APIVersions struct { - TypeMeta `json:",inline"` - // versions are the api versions that are available. - Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -type APIGroupList struct { - TypeMeta `json:",inline"` - // groups is a list of APIGroup. - Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` -} - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -type APIGroup struct { - TypeMeta `json:",inline"` - // name is the name of the group. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // versions are the versions supported in this group. - Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensiblity. -type GroupVersionForDiscovery struct { - // groupVersion specifies the API group and version in the form "group/version" - GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - Version string `json:"version" protobuf:"bytes,2,opt,name=version"` -} - -// APIResource specifies the name of a resource and whether it is namespaced. -type APIResource struct { - // name is the name of the resource. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // namespaced indicates if a resource is namespaced or not. - Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -type APIResourceList struct { - TypeMeta `json:",inline"` - // groupVersion is the group and version this APIResourceList is for. - GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` - // resources contains the name of the resources and if they are namespaced. - APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -type RootPaths struct { - // paths are the paths available at root. - Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` -} - -// TODO: remove me when watch is refactored -func LabelSelectorQueryParam(version string) string { - return "labelSelector" -} - -// TODO: remove me when watch is refactored -func FieldSelectorQueryParam(version string) string { - return "fieldSelector" -} - -// String returns available api versions as a human-friendly version string. -func (apiVersions APIVersions) String() string { - return strings.Join(apiVersions.Versions, ",") -} - -func (apiVersions APIVersions) GoString() string { - return apiVersions.String() -} - -// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. -type Patch struct{} - -// Note: -// There are two different styles of label selectors used in versioned types: -// an older style which is represented as just a string in versioned types, and a -// newer style that is structured. LabelSelector is an internal representation for the -// latter style. - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A label selector operator is the set of operators that can be used in a selector requirement. -type LabelSelectorOperator string - -const ( - LabelSelectorOpIn LabelSelectorOperator = "In" - LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" - LabelSelectorOpExists LabelSelectorOperator = "Exists" - LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go deleted file mode 100644 index 2cad1b535..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/types_swagger_doc_generated.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_APIGroup = map[string]string{ - "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", - "name": "name is the name of the group.", - "versions": "versions are the versions supported in this group.", - "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", - "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", -} - -func (APIGroup) SwaggerDoc() map[string]string { - return map_APIGroup -} - -var map_APIGroupList = map[string]string{ - "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", - "groups": "groups is a list of APIGroup.", -} - -func (APIGroupList) SwaggerDoc() map[string]string { - return map_APIGroupList -} - -var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the name of the resource.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", -} - -func (APIResource) SwaggerDoc() map[string]string { - return map_APIResource -} - -var map_APIResourceList = map[string]string{ - "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", - "groupVersion": "groupVersion is the group and version this APIResourceList is for.", - "resources": "resources contains the name of the resources and if they are namespaced.", -} - -func (APIResourceList) SwaggerDoc() map[string]string { - return map_APIResourceList -} - -var map_APIVersions = map[string]string{ - "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", - "versions": "versions are the api versions that are available.", - "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", -} - -func (APIVersions) SwaggerDoc() map[string]string { - return map_APIVersions -} - -var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.`", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", -} - -func (ExportOptions) SwaggerDoc() map[string]string { - return map_ExportOptions -} - -var map_GroupVersionForDiscovery = map[string]string{ - "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensiblity.", - "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", - "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", -} - -func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { - return map_GroupVersionForDiscovery -} - -var map_LabelSelector = map[string]string{ - "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", -} - -func (LabelSelector) SwaggerDoc() map[string]string { - return map_LabelSelector -} - -var map_LabelSelectorRequirement = map[string]string{ - "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", - "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", -} - -func (LabelSelectorRequirement) SwaggerDoc() map[string]string { - return map_LabelSelectorRequirement -} - -var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency", -} - -func (ListMeta) SwaggerDoc() map[string]string { - return map_ListMeta -} - -var map_Patch = map[string]string{ - "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", -} - -func (Patch) SwaggerDoc() map[string]string { - return map_Patch -} - -var map_RootPaths = map[string]string{ - "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", - "paths": "paths are the paths available at root.", -} - -func (RootPaths) SwaggerDoc() map[string]string { - return map_RootPaths -} - -var map_ServerAddressByClientCIDR = map[string]string{ - "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", - "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", - "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", -} - -func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { - return map_ServerAddressByClientCIDR -} - -var map_Status = map[string]string{ - "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "message": "A human-readable description of the status of this operation.", - "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", - "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", - "code": "Suggested HTTP return code for this status, 0 if not set.", -} - -func (Status) SwaggerDoc() map[string]string { - return map_Status -} - -var map_StatusCause = map[string]string{ - "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", - "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", - "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", - "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", -} - -func (StatusCause) SwaggerDoc() map[string]string { - return map_StatusCause -} - -var map_StatusDetails = map[string]string{ - "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", - "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", - "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", - "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", -} - -func (StatusDetails) SwaggerDoc() map[string]string { - return map_StatusDetails -} - -var map_TypeMeta = map[string]string{ - "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#resources", -} - -func (TypeMeta) SwaggerDoc() map[string]string { - return map_TypeMeta -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go deleted file mode 100644 index 47852e3e2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/validation/validation.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ps == nil { - return allErrs - } - allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) - for i, expr := range ps.MatchExpressions { - allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) - } - return allErrs -} - -func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch sr.Operator { - case unversioned.LabelSelectorOpIn, unversioned.LabelSelectorOpNotIn: - if len(sr.Values) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) - } - case unversioned.LabelSelectorOpExists, unversioned.LabelSelectorOpDoesNotExist: - if len(sr.Values) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) - } - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) - } - allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) - return allErrs -} - -// ValidateLabelName validates that the label name is correctly defined. -func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(labelName) { - allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) - } - return allErrs -} - -// ValidateLabels validates that a set of labels are correctly defined. -func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, v := range labels { - allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) - for _, msg := range validation.IsValidLabelValue(v) { - allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) - } - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go b/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go deleted file mode 100644 index 08e4f6889..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/unversioned/well_known_labels.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -const ( - // If you add a new topology domain here, also consider adding it to the set of default values - // for the scheduler's --failure-domain command-line argument. - LabelHostname = "kubernetes.io/hostname" - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - - LabelInstanceType = "beta.kubernetes.io/instance-type" - - LabelOS = "beta.kubernetes.io/os" - LabelArch = "beta.kubernetes.io/arch" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go b/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go deleted file mode 100644 index 0cee25023..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/util/group_version.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced -// with a GroupAndVersion type. -package util - -import "strings" - -func GetVersion(groupVersion string) string { - s := strings.Split(groupVersion, "/") - if len(s) != 2 { - // e.g. return "v1" for groupVersion="v1" - return s[len(s)-1] - } - return s[1] -} - -func GetGroup(groupVersion string) string { - s := strings.Split(groupVersion, "/") - if len(s) == 1 { - // e.g. return "" for groupVersion="v1" - return "" - } - return s[0] -} - -// GetGroupVersion returns the "group/version". It returns "version" is if group -// is empty. It returns "group/" if version is empty. -func GetGroupVersion(group, version string) string { - if len(group) == 0 { - return version - } - return group + "/" + version -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go deleted file mode 100644 index 98d8b7ed2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ /dev/null @@ -1,661 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "encoding/json" - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -const ( - // Annotation key used to identify mirror pods. - mirrorAnnotationKey = "kubernetes.io/config.mirror" - - // Value used to identify mirror pods from pre-v1.1 kubelet. - mirrorAnnotationValue_1_0 = "mirror" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_api_Pod_To_v1_Pod, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_Pod_To_api_Pod, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_v1_ResourceList_To_api_ResourceList, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, kind := range []string{ - "Endpoints", - "ResourceQuota", - "PersistentVolumeClaim", - "Service", - "ServiceAccount", - "ConfigMap", - } { - err = api.Scheme.AddFieldLabelConversionFunc("v1", kind, - func(label, value string) (string, string, error) { - switch label { - case "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - } - - // Add field conversion funcs. - err = api.Scheme.AddFieldLabelConversionFunc("v1", "Pod", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", - "metadata.namespace", - "metadata.labels", - "metadata.annotations", - "status.phase", - "status.podIP", - "spec.nodeName", - "spec.restartPolicy": - return label, value, nil - // This is for backwards compatibility with old v1 clients which send spec.host - case "spec.host": - return "spec.nodeName", value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - err = api.Scheme.AddFieldLabelConversionFunc("v1", "Node", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - case "spec.unschedulable": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - err = api.Scheme.AddFieldLabelConversionFunc("v1", "ReplicationController", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", - "metadata.namespace", - "status.replicas": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - err = api.Scheme.AddFieldLabelConversionFunc("v1", "Event", - func(label, value string) (string, string, error) { - switch label { - case "involvedObject.kind", - "involvedObject.namespace", - "involvedObject.name", - "involvedObject.uid", - "involvedObject.apiVersion", - "involvedObject.resourceVersion", - "involvedObject.fieldPath", - "reason", - "source", - "type", - "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - err = api.Scheme.AddFieldLabelConversionFunc("v1", "Namespace", - func(label, value string) (string, string, error) { - switch label { - case "status.phase", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - err = api.Scheme.AddFieldLabelConversionFunc("v1", "PersistentVolume", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - err = api.Scheme.AddFieldLabelConversionFunc("v1", "Secret", - func(label, value string) (string, string, error) { - switch label { - case "type", - "metadata.namespace", - "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *ReplicationControllerSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - out.Selector = in.Selector - //if in.TemplateRef != nil { - // out.TemplateRef = new(ObjectReference) - // if err := Convert_api_ObjectReference_To_v1_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { - // return err - // } - //} else { - // out.TemplateRef = nil - //} - if in.Template != nil { - out.Template = new(PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - out.Replicas = *in.Replicas - out.Selector = in.Selector - - //if in.TemplateRef != nil { - // out.TemplateRef = new(api.ObjectReference) - // if err := Convert_v1_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { - // return err - // } - //} else { - // out.TemplateRef = nil - //} - if in.Template != nil { - out.Template = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil { - return err - } - - if old := out.Annotations; old != nil { - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - } - if len(out.Status.InitContainerStatuses) > 0 { - if out.Annotations == nil { - out.Annotations = make(map[string]string) - } - value, err := json.Marshal(out.Status.InitContainerStatuses) - if err != nil { - return err - } - out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) - } else { - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - } - return nil -} - -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - // TODO: when we move init container to beta, remove these conversions - if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { - var values []ContainerStatus - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Status.InitContainerStatuses = values - } - - if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil { - return err - } - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - } - return nil -} - -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { - return err - } - - // TODO: when we move init container to beta, remove these conversions - if old := out.Annotations; old != nil { - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - } - if len(out.Spec.InitContainers) > 0 { - if out.Annotations == nil { - out.Annotations = make(map[string]string) - } - value, err := json.Marshal(out.Spec.InitContainers) - if err != nil { - return err - } - out.Annotations[PodInitContainersAnnotationKey] = string(value) - } else { - delete(out.Annotations, PodInitContainersAnnotationKey) - } - return nil -} - -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - // TODO: when we move init container to beta, remove these conversions - if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { - var values []Container - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Spec.InitContainers = values - } - - if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { - return err - } - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainersAnnotationKey) - } - return nil -} - -// The following two PodSpec conversions are done here to support ServiceAccount -// as an alias for ServiceAccountName. -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - out.Volumes = make([]Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_api_Volume_To_v1_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - out.InitContainers = make([]Container, len(in.InitContainers)) - for i := range in.InitContainers { - if err := Convert_api_Container_To_v1_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - out.Containers = make([]Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_api_Container_To_v1_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - - out.RestartPolicy = RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.DNSPolicy = DNSPolicy(in.DNSPolicy) - out.NodeSelector = in.NodeSelector - - out.ServiceAccountName = in.ServiceAccountName - // DeprecatedServiceAccount is an alias for ServiceAccountName. - out.DeprecatedServiceAccount = in.ServiceAccountName - out.NodeName = in.NodeName - if in.SecurityContext != nil { - out.SecurityContext = new(PodSecurityContext) - if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - - // the host namespace fields have to be handled here for backward compatibility - // with v1.0.0 - out.HostPID = in.SecurityContext.HostPID - out.HostNetwork = in.SecurityContext.HostNetwork - out.HostIPC = in.SecurityContext.HostIPC - } - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - return nil -} - -func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { - SetDefaults_PodSpec(in) - if in.Volumes != nil { - out.Volumes = make([]api.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := Convert_v1_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - out.InitContainers = make([]api.Container, len(in.InitContainers)) - for i := range in.InitContainers { - if err := Convert_v1_Container_To_api_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - out.Containers = make([]api.Container, len(in.Containers)) - for i := range in.Containers { - if err := Convert_v1_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - out.NodeSelector = in.NodeSelector - // We support DeprecatedServiceAccount as an alias for ServiceAccountName. - // If both are specified, ServiceAccountName (the new field) wins. - out.ServiceAccountName = in.ServiceAccountName - if in.ServiceAccountName == "" { - out.ServiceAccountName = in.DeprecatedServiceAccount - } - out.NodeName = in.NodeName - if in.SecurityContext != nil { - out.SecurityContext = new(api.PodSecurityContext) - if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil { - return err - } - } - - // the host namespace fields have to be handled specially for backward compatibility - // with v1.0.0 - if out.SecurityContext == nil { - out.SecurityContext = new(api.PodSecurityContext) - } - out.SecurityContext.HostNetwork = in.HostNetwork - out.SecurityContext.HostPID = in.HostPID - out.SecurityContext.HostIPC = in.HostIPC - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - return nil -} - -func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { - if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { - return err - } - - // TODO: when we move init container to beta, remove these conversions - if len(out.Spec.InitContainers) > 0 || len(out.Status.InitContainerStatuses) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainersAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - } - if len(out.Spec.InitContainers) > 0 { - value, err := json.Marshal(out.Spec.InitContainers) - if err != nil { - return err - } - out.Annotations[PodInitContainersAnnotationKey] = string(value) - } - if len(out.Status.InitContainerStatuses) > 0 { - value, err := json.Marshal(out.Status.InitContainerStatuses) - if err != nil { - return err - } - out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) - } - - // We need to reset certain fields for mirror pods from pre-v1.1 kubelet - // (#15960). - // TODO: Remove this code after we drop support for v1.0 kubelets. - if value, ok := in.Annotations[mirrorAnnotationKey]; ok && value == mirrorAnnotationValue_1_0 { - // Reset the TerminationGracePeriodSeconds. - out.Spec.TerminationGracePeriodSeconds = nil - // Reset the resource requests. - for i := range out.Spec.Containers { - out.Spec.Containers[i].Resources.Requests = nil - } - } - return nil -} - -func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - // TODO: when we move init container to beta, remove these conversions - if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok { - var values []Container - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Spec.InitContainers = values - } - if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { - var values []ContainerStatus - if err := json.Unmarshal([]byte(value), &values); err != nil { - return err - } - // Conversion from external to internal version exists more to - // satisfy the needs of the decoder than it does to be a general - // purpose tool. And Decode always creates an intermediate object - // to decode to. Thus the caller of UnsafeConvertToVersion is - // taking responsibility to ensure mutation of in is not exposed - // back to the caller. - in.Status.InitContainerStatuses = values - } - - if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil { - return err - } - if len(out.Annotations) > 0 { - old := out.Annotations - out.Annotations = make(map[string]string, len(old)) - for k, v := range old { - out.Annotations[k] = v - } - delete(out.Annotations, PodInitContainersAnnotationKey) - delete(out.Annotations, PodInitContainerStatusesAnnotationKey) - } - return nil -} - -func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { - if err := autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s); err != nil { - return err - } - // Publish both externalIPs and deprecatedPublicIPs fields in v1. - out.DeprecatedPublicIPs = in.ExternalIPs - return nil -} - -func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - if err := autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s); err != nil { - return err - } - // Prefer the legacy deprecatedPublicIPs field, if provided. - if len(in.DeprecatedPublicIPs) > 0 { - out.ExternalIPs = in.DeprecatedPublicIPs - } - return nil -} - -func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *PodSecurityContext, s conversion.Scope) error { - out.SupplementalGroups = in.SupplementalGroups - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.FSGroup = in.FSGroup - return nil -} - -func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SupplementalGroups = in.SupplementalGroups - if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.FSGroup = in.FSGroup - return nil -} - -func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.ResourceList, s conversion.Scope) error { - if *in == nil { - return nil - } - - if *out == nil { - *out = make(api.ResourceList, len(*in)) - } - for key, val := range *in { - // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. - // In the future, we should instead reject values that need rounding. - const milliScale = -3 - val.RoundUp(milliScale) - - (*out)[api.ResourceName(key)] = val - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go deleted file mode 100644 index d3aad1552..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion_generated.go +++ /dev/null @@ -1,6739 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - Convert_v1_Affinity_To_api_Affinity, - Convert_api_Affinity_To_v1_Affinity, - Convert_v1_AttachedVolume_To_api_AttachedVolume, - Convert_api_AttachedVolume_To_v1_AttachedVolume, - Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - Convert_v1_Binding_To_api_Binding, - Convert_api_Binding_To_v1_Binding, - Convert_v1_Capabilities_To_api_Capabilities, - Convert_api_Capabilities_To_v1_Capabilities, - Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - Convert_v1_ComponentCondition_To_api_ComponentCondition, - Convert_api_ComponentCondition_To_v1_ComponentCondition, - Convert_v1_ComponentStatus_To_api_ComponentStatus, - Convert_api_ComponentStatus_To_v1_ComponentStatus, - Convert_v1_ComponentStatusList_To_api_ComponentStatusList, - Convert_api_ComponentStatusList_To_v1_ComponentStatusList, - Convert_v1_ConfigMap_To_api_ConfigMap, - Convert_api_ConfigMap_To_v1_ConfigMap, - Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - Convert_v1_ConfigMapList_To_api_ConfigMapList, - Convert_api_ConfigMapList_To_v1_ConfigMapList, - Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - Convert_v1_Container_To_api_Container, - Convert_api_Container_To_v1_Container, - Convert_v1_ContainerImage_To_api_ContainerImage, - Convert_api_ContainerImage_To_v1_ContainerImage, - Convert_v1_ContainerPort_To_api_ContainerPort, - Convert_api_ContainerPort_To_v1_ContainerPort, - Convert_v1_ContainerState_To_api_ContainerState, - Convert_api_ContainerState_To_v1_ContainerState, - Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, - Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, - Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, - Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, - Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, - Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, - Convert_v1_ContainerStatus_To_api_ContainerStatus, - Convert_api_ContainerStatus_To_v1_ContainerStatus, - Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, - Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, - Convert_v1_DeleteOptions_To_api_DeleteOptions, - Convert_api_DeleteOptions_To_v1_DeleteOptions, - Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - Convert_v1_EndpointAddress_To_api_EndpointAddress, - Convert_api_EndpointAddress_To_v1_EndpointAddress, - Convert_v1_EndpointPort_To_api_EndpointPort, - Convert_api_EndpointPort_To_v1_EndpointPort, - Convert_v1_EndpointSubset_To_api_EndpointSubset, - Convert_api_EndpointSubset_To_v1_EndpointSubset, - Convert_v1_Endpoints_To_api_Endpoints, - Convert_api_Endpoints_To_v1_Endpoints, - Convert_v1_EndpointsList_To_api_EndpointsList, - Convert_api_EndpointsList_To_v1_EndpointsList, - Convert_v1_EnvVar_To_api_EnvVar, - Convert_api_EnvVar_To_v1_EnvVar, - Convert_v1_EnvVarSource_To_api_EnvVarSource, - Convert_api_EnvVarSource_To_v1_EnvVarSource, - Convert_v1_Event_To_api_Event, - Convert_api_Event_To_v1_Event, - Convert_v1_EventList_To_api_EventList, - Convert_api_EventList_To_v1_EventList, - Convert_v1_EventSource_To_api_EventSource, - Convert_api_EventSource_To_v1_EventSource, - Convert_v1_ExecAction_To_api_ExecAction, - Convert_api_ExecAction_To_v1_ExecAction, - Convert_v1_ExportOptions_To_api_ExportOptions, - Convert_api_ExportOptions_To_v1_ExportOptions, - Convert_v1_FCVolumeSource_To_api_FCVolumeSource, - Convert_api_FCVolumeSource_To_v1_FCVolumeSource, - Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - Convert_v1_HTTPGetAction_To_api_HTTPGetAction, - Convert_api_HTTPGetAction_To_v1_HTTPGetAction, - Convert_v1_HTTPHeader_To_api_HTTPHeader, - Convert_api_HTTPHeader_To_v1_HTTPHeader, - Convert_v1_Handler_To_api_Handler, - Convert_api_Handler_To_v1_Handler, - Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - Convert_v1_KeyToPath_To_api_KeyToPath, - Convert_api_KeyToPath_To_v1_KeyToPath, - Convert_v1_Lifecycle_To_api_Lifecycle, - Convert_api_Lifecycle_To_v1_Lifecycle, - Convert_v1_LimitRange_To_api_LimitRange, - Convert_api_LimitRange_To_v1_LimitRange, - Convert_v1_LimitRangeItem_To_api_LimitRangeItem, - Convert_api_LimitRangeItem_To_v1_LimitRangeItem, - Convert_v1_LimitRangeList_To_api_LimitRangeList, - Convert_api_LimitRangeList_To_v1_LimitRangeList, - Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, - Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, - Convert_v1_List_To_api_List, - Convert_api_List_To_v1_List, - Convert_v1_ListOptions_To_api_ListOptions, - Convert_api_ListOptions_To_v1_ListOptions, - Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - Convert_v1_LocalObjectReference_To_api_LocalObjectReference, - Convert_api_LocalObjectReference_To_v1_LocalObjectReference, - Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - Convert_v1_Namespace_To_api_Namespace, - Convert_api_Namespace_To_v1_Namespace, - Convert_v1_NamespaceList_To_api_NamespaceList, - Convert_api_NamespaceList_To_v1_NamespaceList, - Convert_v1_NamespaceSpec_To_api_NamespaceSpec, - Convert_api_NamespaceSpec_To_v1_NamespaceSpec, - Convert_v1_NamespaceStatus_To_api_NamespaceStatus, - Convert_api_NamespaceStatus_To_v1_NamespaceStatus, - Convert_v1_Node_To_api_Node, - Convert_api_Node_To_v1_Node, - Convert_v1_NodeAddress_To_api_NodeAddress, - Convert_api_NodeAddress_To_v1_NodeAddress, - Convert_v1_NodeAffinity_To_api_NodeAffinity, - Convert_api_NodeAffinity_To_v1_NodeAffinity, - Convert_v1_NodeCondition_To_api_NodeCondition, - Convert_api_NodeCondition_To_v1_NodeCondition, - Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, - Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, - Convert_v1_NodeList_To_api_NodeList, - Convert_api_NodeList_To_v1_NodeList, - Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, - Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, - Convert_v1_NodeSelector_To_api_NodeSelector, - Convert_api_NodeSelector_To_v1_NodeSelector, - Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, - Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, - Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, - Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, - Convert_v1_NodeSpec_To_api_NodeSpec, - Convert_api_NodeSpec_To_v1_NodeSpec, - Convert_v1_NodeStatus_To_api_NodeStatus, - Convert_api_NodeStatus_To_v1_NodeStatus, - Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, - Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, - Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - Convert_v1_ObjectMeta_To_api_ObjectMeta, - Convert_api_ObjectMeta_To_v1_ObjectMeta, - Convert_v1_ObjectReference_To_api_ObjectReference, - Convert_api_ObjectReference_To_v1_ObjectReference, - Convert_v1_OwnerReference_To_api_OwnerReference, - Convert_api_OwnerReference_To_v1_OwnerReference, - Convert_v1_PersistentVolume_To_api_PersistentVolume, - Convert_api_PersistentVolume_To_v1_PersistentVolume, - Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, - Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, - Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, - Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, - Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, - Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, - Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, - Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, - Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, - Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, - Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, - Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, - Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, - Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, - Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, - Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, - Convert_v1_Pod_To_api_Pod, - Convert_api_Pod_To_v1_Pod, - Convert_v1_PodAffinity_To_api_PodAffinity, - Convert_api_PodAffinity_To_v1_PodAffinity, - Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, - Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, - Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, - Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, - Convert_v1_PodAttachOptions_To_api_PodAttachOptions, - Convert_api_PodAttachOptions_To_v1_PodAttachOptions, - Convert_v1_PodCondition_To_api_PodCondition, - Convert_api_PodCondition_To_v1_PodCondition, - Convert_v1_PodExecOptions_To_api_PodExecOptions, - Convert_api_PodExecOptions_To_v1_PodExecOptions, - Convert_v1_PodList_To_api_PodList, - Convert_api_PodList_To_v1_PodList, - Convert_v1_PodLogOptions_To_api_PodLogOptions, - Convert_api_PodLogOptions_To_v1_PodLogOptions, - Convert_v1_PodProxyOptions_To_api_PodProxyOptions, - Convert_api_PodProxyOptions_To_v1_PodProxyOptions, - Convert_v1_PodSecurityContext_To_api_PodSecurityContext, - Convert_api_PodSecurityContext_To_v1_PodSecurityContext, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodStatus_To_api_PodStatus, - Convert_api_PodStatus_To_v1_PodStatus, - Convert_v1_PodStatusResult_To_api_PodStatusResult, - Convert_api_PodStatusResult_To_v1_PodStatusResult, - Convert_v1_PodTemplate_To_api_PodTemplate, - Convert_api_PodTemplate_To_v1_PodTemplate, - Convert_v1_PodTemplateList_To_api_PodTemplateList, - Convert_api_PodTemplateList_To_v1_PodTemplateList, - Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - Convert_v1_Preconditions_To_api_Preconditions, - Convert_api_Preconditions_To_v1_Preconditions, - Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, - Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, - Convert_v1_Probe_To_api_Probe, - Convert_api_Probe_To_v1_Probe, - Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - Convert_v1_RangeAllocation_To_api_RangeAllocation, - Convert_api_RangeAllocation_To_v1_RangeAllocation, - Convert_v1_ReplicationController_To_api_ReplicationController, - Convert_api_ReplicationController_To_v1_ReplicationController, - Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, - Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, - Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, - Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, - Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, - Convert_v1_ResourceQuota_To_api_ResourceQuota, - Convert_api_ResourceQuota_To_v1_ResourceQuota, - Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, - Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, - Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, - Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, - Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, - Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, - Convert_v1_ResourceRequirements_To_api_ResourceRequirements, - Convert_api_ResourceRequirements_To_v1_ResourceRequirements, - Convert_v1_SELinuxOptions_To_api_SELinuxOptions, - Convert_api_SELinuxOptions_To_v1_SELinuxOptions, - Convert_v1_Secret_To_api_Secret, - Convert_api_Secret_To_v1_Secret, - Convert_v1_SecretKeySelector_To_api_SecretKeySelector, - Convert_api_SecretKeySelector_To_v1_SecretKeySelector, - Convert_v1_SecretList_To_api_SecretList, - Convert_api_SecretList_To_v1_SecretList, - Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - Convert_v1_SecurityContext_To_api_SecurityContext, - Convert_api_SecurityContext_To_v1_SecurityContext, - Convert_v1_SerializedReference_To_api_SerializedReference, - Convert_api_SerializedReference_To_v1_SerializedReference, - Convert_v1_Service_To_api_Service, - Convert_api_Service_To_v1_Service, - Convert_v1_ServiceAccount_To_api_ServiceAccount, - Convert_api_ServiceAccount_To_v1_ServiceAccount, - Convert_v1_ServiceAccountList_To_api_ServiceAccountList, - Convert_api_ServiceAccountList_To_v1_ServiceAccountList, - Convert_v1_ServiceList_To_api_ServiceList, - Convert_api_ServiceList_To_v1_ServiceList, - Convert_v1_ServicePort_To_api_ServicePort, - Convert_api_ServicePort_To_v1_ServicePort, - Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, - Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_ServiceStatus_To_api_ServiceStatus, - Convert_api_ServiceStatus_To_v1_ServiceStatus, - Convert_v1_TCPSocketAction_To_api_TCPSocketAction, - Convert_api_TCPSocketAction_To_v1_TCPSocketAction, - Convert_v1_Taint_To_api_Taint, - Convert_api_Taint_To_v1_Taint, - Convert_v1_Toleration_To_api_Toleration, - Convert_api_Toleration_To_v1_Toleration, - Convert_v1_Volume_To_api_Volume, - Convert_api_Volume_To_v1_Volume, - Convert_v1_VolumeMount_To_api_VolumeMount, - Convert_api_VolumeMount_To_v1_VolumeMount, - Convert_v1_VolumeSource_To_api_VolumeSource, - Convert_api_VolumeSource_To_v1_VolumeSource, - Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, - Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, - Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, - Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(api.NodeAffinity) - if err := Convert_v1_NodeAffinity_To_api_NodeAffinity(*in, *out, s); err != nil { - return err - } - } else { - out.NodeAffinity = nil - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(api.PodAffinity) - if err := Convert_v1_PodAffinity_To_api_PodAffinity(*in, *out, s); err != nil { - return err - } - } else { - out.PodAffinity = nil - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(api.PodAntiAffinity) - if err := Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(*in, *out, s); err != nil { - return err - } - } else { - out.PodAntiAffinity = nil - } - return nil -} - -func Convert_v1_Affinity_To_api_Affinity(in *Affinity, out *api.Affinity, s conversion.Scope) error { - return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) -} - -func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - if err := Convert_api_NodeAffinity_To_v1_NodeAffinity(*in, *out, s); err != nil { - return err - } - } else { - out.NodeAffinity = nil - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - if err := Convert_api_PodAffinity_To_v1_PodAffinity(*in, *out, s); err != nil { - return err - } - } else { - out.PodAffinity = nil - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - if err := Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(*in, *out, s); err != nil { - return err - } - } else { - out.PodAntiAffinity = nil - } - return nil -} - -func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *Affinity, s conversion.Scope) error { - return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) -} - -func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - out.Name = api.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) -} - -func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { - out.Name = UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *AttachedVolume, s conversion.Scope) error { - return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Binding_To_api_Binding(in *Binding, out *api.Binding, s conversion.Scope) error { - return autoConvert_v1_Binding_To_api_Binding(in, out, s) -} - -func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *Binding, s conversion.Scope) error { - return autoConvert_api_Binding_To_v1_Binding(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = api.Capability((*in)[i]) - } - } else { - out.Add = nil - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = api.Capability((*in)[i]) - } - } else { - out.Drop = nil - } - return nil -} - -func Convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - for i := range *in { - (*out)[i] = Capability((*in)[i]) - } - } else { - out.Add = nil - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - for i := range *in { - (*out)[i] = Capability((*in)[i]) - } - } else { - out.Drop = nil - } - return nil -} - -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = in.Monitors - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = in.Monitors - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - out.Type = api.ComponentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) -} - -func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { - out.Type = ComponentConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error { - return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) -} - -func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]api.ComponentCondition, len(*in)) - for i := range *in { - if err := Convert_v1_ComponentCondition_To_api_ComponentCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - return nil -} - -func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) -} - -func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - for i := range *in { - if err := Convert_api_ComponentCondition_To_v1_ComponentCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - return nil -} - -func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *ComponentStatus, s conversion.Scope) error { - return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) -} - -func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ComponentStatus, len(*in)) - for i := range *in { - if err := Convert_v1_ComponentStatus_To_api_ComponentStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) -} - -func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - if err := Convert_api_ComponentStatus_To_v1_ComponentStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *ComponentStatusList, s conversion.Scope) error { - return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) -} - -func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - SetDefaults_ConfigMap(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Data = in.Data - return nil -} - -func Convert_v1_ConfigMap_To_api_ConfigMap(in *ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) -} - -func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Data = in.Data - return nil -} - -func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *ConfigMap, s conversion.Scope) error { - return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ConfigMap, len(*in)) - for i := range *in { - if err := Convert_v1_ConfigMap_To_api_ConfigMap(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) -} - -func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - if err := Convert_api_ConfigMap_To_v1_ConfigMap(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *ConfigMapList, s conversion.Scope) error { - return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.KeyToPath, len(*in)) - for i := range *in { - if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { - SetDefaults_Container(in) - out.Name = in.Name - out.Image = in.Image - out.Command = in.Command - out.Args = in.Args - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]api.ContainerPort, len(*in)) - for i := range *in { - if err := Convert_v1_ContainerPort_To_api_ContainerPort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]api.EnvVar, len(*in)) - for i := range *in { - if err := Convert_v1_EnvVar_To_api_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]api.VolumeMount, len(*in)) - for i := range *in { - if err := Convert_v1_VolumeMount_To_api_VolumeMount(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(*in, *out, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(api.Probe) - if err := Convert_v1_Probe_To_api_Probe(*in, *out, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(api.Lifecycle) - if err := Convert_v1_Lifecycle_To_api_Lifecycle(*in, *out, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_v1_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = in.Command - out.Args = in.Args - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - for i := range *in { - if err := Convert_api_ContainerPort_To_v1_ContainerPort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - if err := Convert_api_EnvVar_To_v1_EnvVar(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - if err := Convert_api_VolumeMount_To_v1_VolumeMount(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - if err := Convert_api_Probe_To_v1_Probe(*in, *out, s); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - if err := Convert_api_Probe_To_v1_Probe(*in, *out, s); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - if err := Convert_api_Lifecycle_To_v1_Lifecycle(*in, *out, s); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func Convert_api_Container_To_v1_Container(in *api.Container, out *Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) -} - -func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - out.Names = in.Names - out.SizeBytes = in.SizeBytes - return nil -} - -func Convert_v1_ContainerImage_To_api_ContainerImage(in *ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) -} - -func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { - out.Names = in.Names - out.SizeBytes = in.SizeBytes - return nil -} - -func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *ContainerImage, s conversion.Scope) error { - return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - SetDefaults_ContainerPort(in) - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_v1_ContainerPort_To_api_ContainerPort(in *ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) -} - -func autoConvert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(api.ContainerStateWaiting) - if err := Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(*in, *out, s); err != nil { - return err - } - } else { - out.Waiting = nil - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(api.ContainerStateRunning) - if err := Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(*in, *out, s); err != nil { - return err - } - } else { - out.Running = nil - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(api.ContainerStateTerminated) - if err := Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(*in, *out, s); err != nil { - return err - } - } else { - out.Terminated = nil - } - return nil -} - -func Convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) -} - -func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - if err := Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(*in, *out, s); err != nil { - return err - } - } else { - out.Waiting = nil - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(ContainerStateRunning) - if err := Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(*in, *out, s); err != nil { - return err - } - } else { - out.Running = nil - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - if err := Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(*in, *out, s); err != nil { - return err - } - } else { - out.Terminated = nil - } - return nil -} - -func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { - return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) -} - -func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) -} - -func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { - return err - } - return nil -} - -func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { - return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) -} - -func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FinishedAt, &out.FinishedAt, s); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - -func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) -} - -func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.StartedAt, &out.StartedAt, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FinishedAt, &out.FinishedAt, s); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - -func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) -} - -func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) -} - -func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) -} - -func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) -} - -func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *ContainerStatus, s conversion.Scope) error { - return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) -} - -func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) -} - -func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *DaemonEndpoint, s conversion.Scope) error { - return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) -} - -func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.GracePeriodSeconds = in.GracePeriodSeconds - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - *out = new(api.Preconditions) - if err := Convert_v1_Preconditions_To_api_Preconditions(*in, *out, s); err != nil { - return err - } - } else { - out.Preconditions = nil - } - out.OrphanDependents = in.OrphanDependents - return nil -} - -func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) -} - -func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.GracePeriodSeconds = in.GracePeriodSeconds - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - *out = new(Preconditions) - if err := Convert_api_Preconditions_To_v1_Preconditions(*in, *out, s); err != nil { - return err - } - } else { - out.Preconditions = nil - } - out.OrphanDependents = in.OrphanDependents - return nil -} - -func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *DeleteOptions, s conversion.Scope) error { - return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(api.ObjectFieldSelector) - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(api.ResourceFieldSelector) - if err := Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - return nil -} - -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - return nil -} - -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.DownwardAPIVolumeFile, len(*in)) - for i := range *in { - if err := Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - if err := Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = api.StorageMedium(in.Medium) - return nil -} - -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = StorageMedium(in.Medium) - return nil -} - -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(api.ObjectReference) - if err := Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.TargetRef = nil - } - return nil -} - -func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) -} - -func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - if err := Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.TargetRef = nil - } - return nil -} - -func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *EndpointAddress, s conversion.Scope) error { - return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) -} - -func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = api.Protocol(in.Protocol) - return nil -} - -func Convert_v1_EndpointPort_To_api_EndpointPort(in *EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) -} - -func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = Protocol(in.Protocol) - return nil -} - -func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *EndpointPort, s conversion.Scope) error { - return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) -} - -func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]api.EndpointAddress, len(*in)) - for i := range *in { - if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]api.EndpointAddress, len(*in)) - for i := range *in { - if err := Convert_v1_EndpointAddress_To_api_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.NotReadyAddresses = nil - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]api.EndpointPort, len(*in)) - for i := range *in { - if err := Convert_v1_EndpointPort_To_api_EndpointPort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - return nil -} - -func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) -} - -func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - if err := Convert_api_EndpointAddress_To_v1_EndpointAddress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.NotReadyAddresses = nil - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - for i := range *in { - if err := Convert_api_EndpointPort_To_v1_EndpointPort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - return nil -} - -func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *EndpointSubset, s conversion.Scope) error { - return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) -} - -func autoConvert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - SetDefaults_Endpoints(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]api.EndpointSubset, len(*in)) - for i := range *in { - if err := Convert_v1_EndpointSubset_To_api_EndpointSubset(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subsets = nil - } - return nil -} - -func Convert_v1_Endpoints_To_api_Endpoints(in *Endpoints, out *api.Endpoints, s conversion.Scope) error { - return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) -} - -func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - if err := Convert_api_EndpointSubset_To_v1_EndpointSubset(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subsets = nil - } - return nil -} - -func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *Endpoints, s conversion.Scope) error { - return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) -} - -func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Endpoints, len(*in)) - for i := range *in { - if err := Convert_v1_Endpoints_To_api_Endpoints(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_EndpointsList_To_api_EndpointsList(in *EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) -} - -func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - if err := Convert_api_Endpoints_To_v1_Endpoints(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *EndpointsList, s conversion.Scope) error { - return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(api.EnvVarSource) - if err := Convert_v1_EnvVarSource_To_api_EnvVarSource(*in, *out, s); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func Convert_v1_EnvVar_To_api_EnvVar(in *EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - if err := Convert_api_EnvVarSource_To_v1_EnvVarSource(*in, *out, s); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(api.ObjectFieldSelector) - if err := Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(api.ResourceFieldSelector) - if err := Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(api.ConfigMapKeySelector) - if err := Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(*in, *out, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(api.SecretKeySelector) - if err := Convert_v1_SecretKeySelector_To_api_SecretKeySelector(*in, *out, s); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - if err := Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - if err := Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(*in, *out, s); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - if err := Convert_api_SecretKeySelector_To_v1_SecretKeySelector(*in, *out, s); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) -} - -func autoConvert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil { - return err - } - out.Count = in.Count - out.Type = in.Type - return nil -} - -func Convert_v1_Event_To_api_Event(in *Event, out *api.Event, s conversion.Scope) error { - return autoConvert_v1_Event_To_api_Event(in, out, s) -} - -func autoConvert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.FirstTimestamp, &out.FirstTimestamp, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTimestamp, &out.LastTimestamp, s); err != nil { - return err - } - out.Count = in.Count - out.Type = in.Type - return nil -} - -func Convert_api_Event_To_v1_Event(in *api.Event, out *Event, s conversion.Scope) error { - return autoConvert_api_Event_To_v1_Event(in, out, s) -} - -func autoConvert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Event, len(*in)) - for i := range *in { - if err := Convert_v1_Event_To_api_Event(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_EventList_To_api_EventList(in *EventList, out *api.EventList, s conversion.Scope) error { - return autoConvert_v1_EventList_To_api_EventList(in, out, s) -} - -func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - if err := Convert_api_Event_To_v1_Event(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *EventList, s conversion.Scope) error { - return autoConvert_api_EventList_To_v1_EventList(in, out, s) -} - -func autoConvert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -func Convert_v1_EventSource_To_api_EventSource(in *EventSource, out *api.EventSource, s conversion.Scope) error { - return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) -} - -func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *EventSource, s conversion.Scope) error { - return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { - out.Command = in.Command - return nil -} - -func Convert_v1_ExecAction_To_api_ExecAction(in *ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { - out.Command = in.Command - return nil -} - -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) -} - -func autoConvert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_v1_ExportOptions_To_api_ExportOptions(in *ExportOptions, out *api.ExportOptions, s conversion.Scope) error { - return autoConvert_v1_ExportOptions_To_api_ExportOptions(in, out, s) -} - -func autoConvert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func Convert_api_ExportOptions_To_v1_ExportOptions(in *api.ExportOptions, out *ExportOptions, s conversion.Scope) error { - return autoConvert_api_ExportOptions_To_v1_ExportOptions(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = in.TargetWWNs - out.Lun = in.Lun - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = in.TargetWWNs - out.Lun = in.Lun - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - out.Options = in.Options - return nil -} - -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - out.Options = in.Options - return nil -} - -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - return nil -} - -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - return nil -} - -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - SetDefaults_HTTPGetAction(in) - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]api.HTTPHeader, len(*in)) - for i := range *in { - if err := Convert_v1_HTTPHeader_To_api_HTTPHeader(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - out.Host = in.Host - out.Scheme = URIScheme(in.Scheme) - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - for i := range *in { - if err := Convert_api_HTTPHeader_To_v1_HTTPHeader(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(api.ExecAction) - if err := Convert_v1_ExecAction_To_api_ExecAction(*in, *out, s); err != nil { - return err - } - } else { - out.Exec = nil - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(api.HTTPGetAction) - if err := Convert_v1_HTTPGetAction_To_api_HTTPGetAction(*in, *out, s); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(api.TCPSocketAction) - if err := Convert_v1_TCPSocketAction_To_api_TCPSocketAction(*in, *out, s); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func Convert_v1_Handler_To_api_Handler(in *Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - if err := Convert_api_ExecAction_To_v1_ExecAction(*in, *out, s); err != nil { - return err - } - } else { - out.Exec = nil - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - if err := Convert_api_HTTPGetAction_To_v1_HTTPGetAction(*in, *out, s); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - if err := Convert_api_TCPSocketAction_To_v1_TCPSocketAction(*in, *out, s); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - SetDefaults_ISCSIVolumeSource(in) - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_v1_KeyToPath_To_api_KeyToPath(in *KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - return nil -} - -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(*in, *out, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(api.Handler) - if err := Convert_v1_Handler_To_api_Handler(*in, *out, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_v1_Lifecycle_To_api_Lifecycle(in *Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(Handler) - if err := Convert_api_Handler_To_v1_Handler(*in, *out, s); err != nil { - return err - } - } else { - out.PostStart = nil - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(Handler) - if err := Convert_api_Handler_To_v1_Handler(*in, *out, s); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1_LimitRange_To_api_LimitRange(in *LimitRange, out *api.LimitRange, s conversion.Scope) error { - return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) -} - -func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *LimitRange, s conversion.Scope) error { - return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) -} - -func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - SetDefaults_LimitRangeItem(in) - out.Type = api.LimitType(in.Type) - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Max, &out.Max, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Min, &out.Min, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Default, &out.Default, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.DefaultRequest, &out.DefaultRequest, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio, s); err != nil { - return err - } - return nil -} - -func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) -} - -func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { - out.Type = LimitType(in.Type) - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Max = nil - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Min = nil - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Default = nil - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.DefaultRequest = nil - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.MaxLimitRequestRatio = nil - } - return nil -} - -func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *LimitRangeItem, s conversion.Scope) error { - return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) -} - -func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.LimitRange, len(*in)) - for i := range *in { - if err := Convert_v1_LimitRange_To_api_LimitRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) -} - -func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - if err := Convert_api_LimitRange_To_v1_LimitRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *LimitRangeList, s conversion.Scope) error { - return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) -} - -func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]api.LimitRangeItem, len(*in)) - for i := range *in { - if err := Convert_v1_LimitRangeItem_To_api_LimitRangeItem(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Limits = nil - } - return nil -} - -func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) -} - -func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - if err := Convert_api_LimitRangeItem_To_v1_LimitRangeItem(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Limits = nil - } - return nil -} - -func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *LimitRangeSpec, s conversion.Scope) error { - return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) -} - -func autoConvert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_List_To_api_List(in *List, out *api.List, s conversion.Scope) error { - return autoConvert_v1_List_To_api_List(in, out, s) -} - -func autoConvert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_List_To_v1_List(in *api.List, out *List, s conversion.Scope) error { - return autoConvert_api_List_To_v1_List(in, out, s) -} - -func autoConvert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := api.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - return nil -} - -func Convert_v1_ListOptions_To_api_ListOptions(in *ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) -} - -func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := api.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = in.TimeoutSeconds - return nil -} - -func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *ListOptions, s conversion.Scope) error { - return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]api.LoadBalancerIngress, len(*in)) - for i := range *in { - if err := Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - for i := range *in { - if err := Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Namespace_To_api_Namespace(in *Namespace, out *api.Namespace, s conversion.Scope) error { - return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) -} - -func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *Namespace, s conversion.Scope) error { - return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) -} - -func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Namespace, len(*in)) - for i := range *in { - if err := Convert_v1_Namespace_To_api_Namespace(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_NamespaceList_To_api_NamespaceList(in *NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) -} - -func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - if err := Convert_api_Namespace_To_v1_Namespace(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *NamespaceList, s conversion.Scope) error { - return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) -} - -func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]api.FinalizerName, len(*in)) - for i := range *in { - (*out)[i] = api.FinalizerName((*in)[i]) - } - } else { - out.Finalizers = nil - } - return nil -} - -func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) -} - -func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - for i := range *in { - (*out)[i] = FinalizerName((*in)[i]) - } - } else { - out.Finalizers = nil - } - return nil -} - -func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *NamespaceSpec, s conversion.Scope) error { - return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) -} - -func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - SetDefaults_NamespaceStatus(in) - out.Phase = api.NamespacePhase(in.Phase) - return nil -} - -func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) -} - -func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { - out.Phase = NamespacePhase(in.Phase) - return nil -} - -func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *NamespaceStatus, s conversion.Scope) error { - return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) -} - -func autoConvert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - SetDefaults_Node(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Node_To_api_Node(in *Node, out *api.Node, s conversion.Scope) error { - return autoConvert_v1_Node_To_api_Node(in, out, s) -} - -func autoConvert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Node_To_v1_Node(in *api.Node, out *Node, s conversion.Scope) error { - return autoConvert_api_Node_To_v1_Node(in, out, s) -} - -func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - out.Type = api.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -func Convert_v1_NodeAddress_To_api_NodeAddress(in *NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) -} - -func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { - out.Type = NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *NodeAddress, s conversion.Scope) error { - return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) -} - -func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(api.NodeSelector) - if err := Convert_v1_NodeSelector_To_api_NodeSelector(*in, *out, s); err != nil { - return err - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]api.PreferredSchedulingTerm, len(*in)) - for i := range *in { - if err := Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) -} - -func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - if err := Convert_api_NodeSelector_To_v1_NodeSelector(*in, *out, s); err != nil { - return err - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - if err := Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *NodeAffinity, s conversion.Scope) error { - return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) -} - -func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - out.Type = api.NodeConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_NodeCondition_To_api_NodeCondition(in *NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) -} - -func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { - out.Type = NodeConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastHeartbeatTime, &out.LastHeartbeatTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *NodeCondition, s conversion.Scope) error { - return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) -} - -func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Node, len(*in)) - for i := range *in { - if err := Convert_v1_Node_To_api_Node(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_NodeList_To_api_NodeList(in *NodeList, out *api.NodeList, s conversion.Scope) error { - return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) -} - -func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - if err := Convert_api_Node_To_v1_Node(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *NodeList, s conversion.Scope) error { - return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) -} - -func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) -} - -func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *NodeProxyOptions, s conversion.Scope) error { - return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) -} - -func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]api.NodeSelectorTerm, len(*in)) - for i := range *in { - if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.NodeSelectorTerms = nil - } - return nil -} - -func Convert_v1_NodeSelector_To_api_NodeSelector(in *NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) -} - -func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.NodeSelectorTerms = nil - } - return nil -} - -func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *NodeSelector, s conversion.Scope) error { - return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) -} - -func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.NodeSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = NodeSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]api.NodeSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) -} - -func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) -} - -func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - return nil -} - -func Convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) -} - -func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - return nil -} - -func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conversion.Scope) error { - return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) -} - -func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - SetDefaults_NodeStatus(in) - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Allocatable, &out.Allocatable, s); err != nil { - return err - } - out.Phase = api.NodePhase(in.Phase) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]api.NodeCondition, len(*in)) - for i := range *in { - if err := Convert_v1_NodeCondition_To_api_NodeCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]api.NodeAddress, len(*in)) - for i := range *in { - if err := Convert_v1_NodeAddress_To_api_NodeAddress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]api.ContainerImage, len(*in)) - for i := range *in { - if err := Convert_v1_ContainerImage_To_api_ContainerImage(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Images = nil - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]api.UniqueVolumeName, len(*in)) - for i := range *in { - (*out)[i] = api.UniqueVolumeName((*in)[i]) - } - } else { - out.VolumesInUse = nil - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]api.AttachedVolume, len(*in)) - for i := range *in { - if err := Convert_v1_AttachedVolume_To_api_AttachedVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.VolumesAttached = nil - } - return nil -} - -func Convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) -} - -func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Capacity = nil - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Allocatable = nil - } - out.Phase = NodePhase(in.Phase) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - if err := Convert_api_NodeCondition_To_v1_NodeCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - for i := range *in { - if err := Convert_api_NodeAddress_To_v1_NodeAddress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - if err := Convert_api_ContainerImage_To_v1_ContainerImage(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Images = nil - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - for i := range *in { - (*out)[i] = UniqueVolumeName((*in)[i]) - } - } else { - out.VolumesInUse = nil - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - for i := range *in { - if err := Convert_api_AttachedVolume_To_v1_AttachedVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.VolumesAttached = nil - } - return nil -} - -func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus, s conversion.Scope) error { - return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) -} - -func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) -} - -func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *NodeSystemInfo, s conversion.Scope) error { - return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - SetDefaults_ObjectFieldSelector(in) - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - out.DeletionTimestamp = in.DeletionTimestamp - out.DeletionGracePeriodSeconds = in.DeletionGracePeriodSeconds - out.Labels = in.Labels - out.Annotations = in.Annotations - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]api.OwnerReference, len(*in)) - for i := range *in { - if err := Convert_v1_OwnerReference_To_api_OwnerReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.OwnerReferences = nil - } - out.Finalizers = in.Finalizers - return nil -} - -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.CreationTimestamp, &out.CreationTimestamp, s); err != nil { - return err - } - out.DeletionTimestamp = in.DeletionTimestamp - out.DeletionGracePeriodSeconds = in.DeletionGracePeriodSeconds - out.Labels = in.Labels - out.Annotations = in.Annotations - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(*in)) - for i := range *in { - if err := Convert_api_OwnerReference_To_v1_OwnerReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.OwnerReferences = nil - } - out.Finalizers = in.Finalizers - return nil -} - -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_v1_ObjectReference_To_api_ObjectReference(in *ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) -} - -func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *ObjectReference, s conversion.Scope) error { - return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) -} - -func autoConvert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = in.UID - out.Controller = in.Controller - return nil -} - -func Convert_v1_OwnerReference_To_api_OwnerReference(in *OwnerReference, out *api.OwnerReference, s conversion.Scope) error { - return autoConvert_v1_OwnerReference_To_api_OwnerReference(in, out, s) -} - -func autoConvert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = in.UID - out.Controller = in.Controller - return nil -} - -func Convert_api_OwnerReference_To_v1_OwnerReference(in *api.OwnerReference, out *OwnerReference, s conversion.Scope) error { - return autoConvert_api_OwnerReference_To_v1_OwnerReference(in, out, s) -} - -func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - SetDefaults_PersistentVolume(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) -} - -func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *PersistentVolume, s conversion.Scope) error { - return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - SetDefaults_PersistentVolumeClaim(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]api.PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = api.PersistentVolumeAccessMode((*in)[i]) - } - } else { - out.AccessModes = nil - } - out.Selector = in.Selector - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - return nil -} - -func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = PersistentVolumeAccessMode((*in)[i]) - } - } else { - out.AccessModes = nil - } - out.Selector = in.Selector - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - return nil -} - -func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumeClaimPhase(in.Phase) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]api.PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = api.PersistentVolumeAccessMode((*in)[i]) - } - } else { - out.AccessModes = nil - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = PersistentVolumeClaimPhase(in.Phase) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = PersistentVolumeAccessMode((*in)[i]) - } - } else { - out.AccessModes = nil - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Capacity = nil - } - return nil -} - -func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) -} - -func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *PersistentVolumeList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(api.GCEPersistentDiskVolumeSource) - if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(api.AWSElasticBlockStoreVolumeSource) - if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(api.HostPathVolumeSource) - if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(api.GlusterfsVolumeSource) - if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(api.NFSVolumeSource) - if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(api.RBDVolumeSource) - if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(api.ISCSIVolumeSource) - if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(api.CinderVolumeSource) - if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(api.CephFSVolumeSource) - if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(api.FCVolumeSource) - if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FC = nil - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(api.FlockerVolumeSource) - if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(api.FlexVolumeSource) - if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(api.AzureFileVolumeSource) - if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(api.VsphereVirtualDiskVolumeSource) - if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FC = nil - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Capacity, &out.Capacity, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]api.PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = api.PersistentVolumeAccessMode((*in)[i]) - } - } else { - out.AccessModes = nil - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(api.ObjectReference) - if err := Convert_v1_ObjectReference_To_api_ObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.ClaimRef = nil - } - out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - return nil -} - -func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Capacity = nil - } - if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - for i := range *in { - (*out)[i] = PersistentVolumeAccessMode((*in)[i]) - } - } else { - out.AccessModes = nil - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - if err := Convert_api_ObjectReference_To_v1_ObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.ClaimRef = nil - } - out.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - return nil -} - -func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error { - SetDefaults_Pod(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]api.PodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]api.WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func Convert_v1_PodAffinity_To_api_PodAffinity(in *PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) -} - -func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *PodAffinity, s conversion.Scope) error { - return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) -} - -func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = in.LabelSelector - out.Namespaces = in.Namespaces - out.TopologyKey = in.TopologyKey - return nil -} - -func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) -} - -func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = in.LabelSelector - out.Namespaces = in.Namespaces - out.TopologyKey = in.TopologyKey - return nil -} - -func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *PodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) -} - -func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]api.PodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]api.WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) -} - -func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - if err := Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *PodAntiAffinity, s conversion.Scope) error { - return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) -} - -func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - SetDefaults_PodAttachOptions(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) -} - -func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { - return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) -} - -func autoConvert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { - out.Type = api.PodConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { - return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) -} - -func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { - out.Type = PodConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { - return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - SetDefaults_PodExecOptions(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = in.Command - return nil -} - -func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) -} - -func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = in.Command - return nil -} - -func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *PodExecOptions, s conversion.Scope) error { - return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) -} - -func autoConvert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Pod, len(*in)) - for i := range *in { - if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PodList_To_api_PodList(in *PodList, out *api.PodList, s conversion.Scope) error { - return autoConvert_v1_PodList_To_api_PodList(in, out, s) -} - -func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *PodList, s conversion.Scope) error { - return autoConvert_api_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = in.SinceSeconds - out.SinceTime = in.SinceTime - out.Timestamps = in.Timestamps - out.TailLines = in.TailLines - out.LimitBytes = in.LimitBytes - return nil -} - -func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) -} - -func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = in.SinceSeconds - out.SinceTime = in.SinceTime - out.Timestamps = in.Timestamps - out.TailLines = in.TailLines - out.LimitBytes = in.LimitBytes - return nil -} - -func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *PodLogOptions, s conversion.Scope) error { - return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) -} - -func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) -} - -func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *PodProxyOptions, s conversion.Scope) error { - return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) -} - -func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(*in, *out, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.SupplementalGroups = in.SupplementalGroups - out.FSGroup = in.FSGroup - return nil -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = in.TerminationGracePeriodSeconds - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - out.DNSPolicy = DNSPolicy(in.DNSPolicy) - out.NodeSelector = in.NodeSelector - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - for i := range *in { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - return nil -} - -func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { - out.Phase = api.PodPhase(in.Phase) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]api.PodCondition, len(*in)) - for i := range *in { - if err := Convert_v1_PodCondition_To_api_PodCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = in.StartTime - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]api.ContainerStatus, len(*in)) - for i := range *in { - if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainerStatuses = nil - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]api.ContainerStatus, len(*in)) - for i := range *in { - if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ContainerStatuses = nil - } - return nil -} - -func Convert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { - return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) -} - -func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { - out.Phase = PodPhase(in.Phase) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - if err := Convert_api_PodCondition_To_v1_PodCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = in.StartTime - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainerStatuses = nil - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ContainerStatuses = nil - } - return nil -} - -func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { - return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) -} - -func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) -} - -func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *PodTemplate, s conversion.Scope) error { - return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) -} - -func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) -} - -func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *PodTemplateList, s conversion.Scope) error { - return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error { - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { - out.UID = in.UID - return nil -} - -func Convert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error { - return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) -} - -func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { - out.UID = in.UID - return nil -} - -func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *Preconditions, s conversion.Scope) error { - return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) -} - -func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { - SetDefaults_Probe(in) - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func Convert_v1_Probe_To_api_Probe(in *Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - SetDefaults_RBDVolumeSource(in) - out.CephMonitors = in.CephMonitors - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = in.CephMonitors - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(*in, *out, s); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Range = in.Range - if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) -} - -func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Range = in.Range - if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *RangeAllocation, s conversion.Scope) error { - return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) -} - -func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - SetDefaults_ReplicationController(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ReplicationController_To_api_ReplicationController(in *ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) -} - -func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *ReplicationController, s conversion.Scope) error { - return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) -} - -func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) -} - -func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *ReplicationControllerList, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) -} - -func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.Divisor, &out.Divisor, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) -} - -func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.Divisor, &out.Divisor, s); err != nil { - return err - } - return nil -} - -func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) -} - -func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) -} - -func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *ResourceQuota, s conversion.Scope) error { - return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) -} - -func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ResourceQuota, len(*in)) - for i := range *in { - if err := Convert_v1_ResourceQuota_To_api_ResourceQuota(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) -} - -func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - if err := Convert_api_ResourceQuota_To_v1_ResourceQuota(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *ResourceQuotaList, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) -} - -func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Hard, &out.Hard, s); err != nil { - return err - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]api.ResourceQuotaScope, len(*in)) - for i := range *in { - (*out)[i] = api.ResourceQuotaScope((*in)[i]) - } - } else { - out.Scopes = nil - } - return nil -} - -func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Hard = nil - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - for i := range *in { - (*out)[i] = ResourceQuotaScope((*in)[i]) - } - } else { - out.Scopes = nil - } - return nil -} - -func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Hard, &out.Hard, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Used, &out.Used, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Hard = nil - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Used = nil - } - return nil -} - -func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Limits, &out.Limits, s); err != nil { - return err - } - if err := Convert_v1_ResourceList_To_api_ResourceList(&in.Requests, &out.Requests, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - newVal := new(resource.Quantity) - if err := api.Convert_resource_Quantity_To_resource_Quantity(&val, newVal, s); err != nil { - return err - } - (*out)[ResourceName(key)] = *newVal - } - } else { - out.Requests = nil - } - return nil -} - -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { - SetDefaults_Secret(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Data = in.Data - out.Type = api.SecretType(in.Type) - return nil -} - -func Convert_v1_Secret_To_api_Secret(in *Secret, out *api.Secret, s conversion.Scope) error { - return autoConvert_v1_Secret_To_api_Secret(in, out, s) -} - -func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - out.Data = in.Data - out.Type = SecretType(in.Type) - return nil -} - -func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *Secret, s conversion.Scope) error { - return autoConvert_api_Secret_To_v1_Secret(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Secret, len(*in)) - for i := range *in { - if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_SecretList_To_api_SecretList(in *SecretList, out *api.SecretList, s conversion.Scope) error { - return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) -} - -func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList, s conversion.Scope) error { - return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.KeyToPath, len(*in)) - for i := range *in { - if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(api.Capabilities) - if err := Convert_v1_Capabilities_To_api_Capabilities(*in, *out, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - out.Privileged = in.Privileged - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(*in, *out, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func Convert_v1_SecurityContext_To_api_SecurityContext(in *SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(*in, *out, s); err != nil { - return err - } - } else { - out.Capabilities = nil - } - out.Privileged = in.Privileged - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(*in, *out, s); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - out.RunAsUser = in.RunAsUser - out.RunAsNonRoot = in.RunAsNonRoot - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *SecurityContext, s conversion.Scope) error { - return autoConvert_api_SecurityContext_To_v1_SecurityContext(in, out, s) -} - -func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -func Convert_v1_SerializedReference_To_api_SerializedReference(in *SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) -} - -func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *SerializedReference, s conversion.Scope) error { - return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) -} - -func autoConvert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Service_To_api_Service(in *Service, out *api.Service, s conversion.Scope) error { - return autoConvert_v1_Service_To_api_Service(in, out, s) -} - -func autoConvert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_api_Service_To_v1_Service(in *api.Service, out *Service, s conversion.Scope) error { - return autoConvert_api_Service_To_v1_Service(in, out, s) -} - -func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]api.ObjectReference, len(*in)) - for i := range *in { - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Secrets = nil - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]api.LocalObjectReference, len(*in)) - for i := range *in { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) -} - -func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { - return err - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - for i := range *in { - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Secrets = nil - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - for i := range *in { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *ServiceAccount, s conversion.Scope) error { - return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) -} - -func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ServiceAccount, len(*in)) - for i := range *in { - if err := Convert_v1_ServiceAccount_To_api_ServiceAccount(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) -} - -func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - if err := Convert_api_ServiceAccount_To_v1_ServiceAccount(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *ServiceAccountList, s conversion.Scope) error { - return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) -} - -func autoConvert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Service, len(*in)) - for i := range *in { - if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_ServiceList_To_api_ServiceList(in *ServiceList, out *api.ServiceList, s conversion.Scope) error { - return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) -} - -func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *ServiceList, s conversion.Scope) error { - return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) -} - -func autoConvert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = api.Protocol(in.Protocol) - out.Port = in.Port - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil { - return err - } - out.NodePort = in.NodePort - return nil -} - -func Convert_v1_ServicePort_To_api_ServicePort(in *ServicePort, out *api.ServicePort, s conversion.Scope) error { - return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) -} - -func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = Protocol(in.Protocol) - out.Port = in.Port - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.TargetPort, &out.TargetPort, s); err != nil { - return err - } - out.NodePort = in.NodePort - return nil -} - -func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *ServicePort, s conversion.Scope) error { - return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) -} - -func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) -} - -func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) -} - -func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - SetDefaults_ServiceSpec(in) - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]api.ServicePort, len(*in)) - for i := range *in { - if err := Convert_v1_ServicePort_To_api_ServicePort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - out.Selector = in.Selector - out.ClusterIP = in.ClusterIP - out.Type = api.ServiceType(in.Type) - out.ExternalIPs = in.ExternalIPs - out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) - out.LoadBalancerIP = in.LoadBalancerIP - out.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges - return nil -} - -func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error { - out.Type = ServiceType(in.Type) - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - for i := range *in { - if err := Convert_api_ServicePort_To_v1_ServicePort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - out.Selector = in.Selector - out.ClusterIP = in.ClusterIP - out.ExternalIPs = in.ExternalIPs - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = ServiceAffinity(in.SessionAffinity) - out.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges - return nil -} - -func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) -} - -func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *ServiceStatus, s conversion.Scope) error { - return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.Port, &out.Port, s); err != nil { - return err - } - return nil -} - -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - return nil -} - -func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error { - return autoConvert_v1_Taint_To_api_Taint(in, out, s) -} - -func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = TaintEffect(in.Effect) - return nil -} - -func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error { - return autoConvert_api_Taint_To_v1_Taint(in, out, s) -} - -func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - return nil -} - -func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error { - return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) -} - -func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = TaintEffect(in.Effect) - return nil -} - -func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error { - return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { - SetDefaults_Volume(in) - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - return nil -} - -func Convert_v1_VolumeMount_To_api_VolumeMount(in *VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - return nil -} - -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(api.HostPathVolumeSource) - if err := Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(api.EmptyDirVolumeSource) - if err := Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(api.GCEPersistentDiskVolumeSource) - if err := Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(api.AWSElasticBlockStoreVolumeSource) - if err := Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(api.GitRepoVolumeSource) - if err := Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(api.SecretVolumeSource) - if err := Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Secret = nil - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(api.NFSVolumeSource) - if err := Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(api.ISCSIVolumeSource) - if err := Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(api.GlusterfsVolumeSource) - if err := Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(api.PersistentVolumeClaimVolumeSource) - if err := Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(api.RBDVolumeSource) - if err := Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(api.FlexVolumeSource) - if err := Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(api.CinderVolumeSource) - if err := Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(api.CephFSVolumeSource) - if err := Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(api.FlockerVolumeSource) - if err := Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(api.DownwardAPIVolumeSource) - if err := Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(api.FCVolumeSource) - if err := Convert_v1_FCVolumeSource_To_api_FCVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FC = nil - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(api.AzureFileVolumeSource) - if err := Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(api.ConfigMapVolumeSource) - if err := Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(api.VsphereVirtualDiskVolumeSource) - if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func Convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - if err := Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - if err := Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - if err := Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - if err := Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - if err := Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.GitRepo = nil - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretVolumeSource) - if err := Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Secret = nil - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - if err := Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - if err := Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - if err := Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - if err := Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - if err := Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - if err := Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - if err := Convert_api_FCVolumeSource_To_v1_FCVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.FC = nil - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - if err := Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - if err := Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - return nil -} - -func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - return nil -} - -func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = int(in.Weight) - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) -} - -func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = int32(in.Weight) - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go deleted file mode 100644 index e7d59684a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/deep_copy_generated.go +++ /dev/null @@ -1,3261 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" - types "k8s.io/kubernetes/pkg/types" - intstr "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1_AWSElasticBlockStoreVolumeSource, - DeepCopy_v1_Affinity, - DeepCopy_v1_AttachedVolume, - DeepCopy_v1_AzureFileVolumeSource, - DeepCopy_v1_Binding, - DeepCopy_v1_Capabilities, - DeepCopy_v1_CephFSVolumeSource, - DeepCopy_v1_CinderVolumeSource, - DeepCopy_v1_ComponentCondition, - DeepCopy_v1_ComponentStatus, - DeepCopy_v1_ComponentStatusList, - DeepCopy_v1_ConfigMap, - DeepCopy_v1_ConfigMapKeySelector, - DeepCopy_v1_ConfigMapList, - DeepCopy_v1_ConfigMapVolumeSource, - DeepCopy_v1_Container, - DeepCopy_v1_ContainerImage, - DeepCopy_v1_ContainerPort, - DeepCopy_v1_ContainerState, - DeepCopy_v1_ContainerStateRunning, - DeepCopy_v1_ContainerStateTerminated, - DeepCopy_v1_ContainerStateWaiting, - DeepCopy_v1_ContainerStatus, - DeepCopy_v1_DaemonEndpoint, - DeepCopy_v1_DeleteOptions, - DeepCopy_v1_DownwardAPIVolumeFile, - DeepCopy_v1_DownwardAPIVolumeSource, - DeepCopy_v1_EmptyDirVolumeSource, - DeepCopy_v1_EndpointAddress, - DeepCopy_v1_EndpointPort, - DeepCopy_v1_EndpointSubset, - DeepCopy_v1_Endpoints, - DeepCopy_v1_EndpointsList, - DeepCopy_v1_EnvVar, - DeepCopy_v1_EnvVarSource, - DeepCopy_v1_Event, - DeepCopy_v1_EventList, - DeepCopy_v1_EventSource, - DeepCopy_v1_ExecAction, - DeepCopy_v1_ExportOptions, - DeepCopy_v1_FCVolumeSource, - DeepCopy_v1_FlexVolumeSource, - DeepCopy_v1_FlockerVolumeSource, - DeepCopy_v1_GCEPersistentDiskVolumeSource, - DeepCopy_v1_GitRepoVolumeSource, - DeepCopy_v1_GlusterfsVolumeSource, - DeepCopy_v1_HTTPGetAction, - DeepCopy_v1_HTTPHeader, - DeepCopy_v1_Handler, - DeepCopy_v1_HostPathVolumeSource, - DeepCopy_v1_ISCSIVolumeSource, - DeepCopy_v1_KeyToPath, - DeepCopy_v1_Lifecycle, - DeepCopy_v1_LimitRange, - DeepCopy_v1_LimitRangeItem, - DeepCopy_v1_LimitRangeList, - DeepCopy_v1_LimitRangeSpec, - DeepCopy_v1_List, - DeepCopy_v1_ListOptions, - DeepCopy_v1_LoadBalancerIngress, - DeepCopy_v1_LoadBalancerStatus, - DeepCopy_v1_LocalObjectReference, - DeepCopy_v1_NFSVolumeSource, - DeepCopy_v1_Namespace, - DeepCopy_v1_NamespaceList, - DeepCopy_v1_NamespaceSpec, - DeepCopy_v1_NamespaceStatus, - DeepCopy_v1_Node, - DeepCopy_v1_NodeAddress, - DeepCopy_v1_NodeAffinity, - DeepCopy_v1_NodeCondition, - DeepCopy_v1_NodeDaemonEndpoints, - DeepCopy_v1_NodeList, - DeepCopy_v1_NodeProxyOptions, - DeepCopy_v1_NodeSelector, - DeepCopy_v1_NodeSelectorRequirement, - DeepCopy_v1_NodeSelectorTerm, - DeepCopy_v1_NodeSpec, - DeepCopy_v1_NodeStatus, - DeepCopy_v1_NodeSystemInfo, - DeepCopy_v1_ObjectFieldSelector, - DeepCopy_v1_ObjectMeta, - DeepCopy_v1_ObjectReference, - DeepCopy_v1_OwnerReference, - DeepCopy_v1_PersistentVolume, - DeepCopy_v1_PersistentVolumeClaim, - DeepCopy_v1_PersistentVolumeClaimList, - DeepCopy_v1_PersistentVolumeClaimSpec, - DeepCopy_v1_PersistentVolumeClaimStatus, - DeepCopy_v1_PersistentVolumeClaimVolumeSource, - DeepCopy_v1_PersistentVolumeList, - DeepCopy_v1_PersistentVolumeSource, - DeepCopy_v1_PersistentVolumeSpec, - DeepCopy_v1_PersistentVolumeStatus, - DeepCopy_v1_Pod, - DeepCopy_v1_PodAffinity, - DeepCopy_v1_PodAffinityTerm, - DeepCopy_v1_PodAntiAffinity, - DeepCopy_v1_PodAttachOptions, - DeepCopy_v1_PodCondition, - DeepCopy_v1_PodExecOptions, - DeepCopy_v1_PodList, - DeepCopy_v1_PodLogOptions, - DeepCopy_v1_PodProxyOptions, - DeepCopy_v1_PodSecurityContext, - DeepCopy_v1_PodSpec, - DeepCopy_v1_PodStatus, - DeepCopy_v1_PodStatusResult, - DeepCopy_v1_PodTemplate, - DeepCopy_v1_PodTemplateList, - DeepCopy_v1_PodTemplateSpec, - DeepCopy_v1_Preconditions, - DeepCopy_v1_PreferredSchedulingTerm, - DeepCopy_v1_Probe, - DeepCopy_v1_RBDVolumeSource, - DeepCopy_v1_RangeAllocation, - DeepCopy_v1_ReplicationController, - DeepCopy_v1_ReplicationControllerList, - DeepCopy_v1_ReplicationControllerSpec, - DeepCopy_v1_ReplicationControllerStatus, - DeepCopy_v1_ResourceFieldSelector, - DeepCopy_v1_ResourceQuota, - DeepCopy_v1_ResourceQuotaList, - DeepCopy_v1_ResourceQuotaSpec, - DeepCopy_v1_ResourceQuotaStatus, - DeepCopy_v1_ResourceRequirements, - DeepCopy_v1_SELinuxOptions, - DeepCopy_v1_Secret, - DeepCopy_v1_SecretKeySelector, - DeepCopy_v1_SecretList, - DeepCopy_v1_SecretVolumeSource, - DeepCopy_v1_SecurityContext, - DeepCopy_v1_SerializedReference, - DeepCopy_v1_Service, - DeepCopy_v1_ServiceAccount, - DeepCopy_v1_ServiceAccountList, - DeepCopy_v1_ServiceList, - DeepCopy_v1_ServicePort, - DeepCopy_v1_ServiceProxyOptions, - DeepCopy_v1_ServiceSpec, - DeepCopy_v1_ServiceStatus, - DeepCopy_v1_TCPSocketAction, - DeepCopy_v1_Taint, - DeepCopy_v1_Toleration, - DeepCopy_v1_Volume, - DeepCopy_v1_VolumeMount, - DeepCopy_v1_VolumeSource, - DeepCopy_v1_VsphereVirtualDiskVolumeSource, - DeepCopy_v1_WeightedPodAffinityTerm, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_Affinity(in Affinity, out *Affinity, c *conversion.Cloner) error { - if in.NodeAffinity != nil { - in, out := in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - if err := DeepCopy_v1_NodeAffinity(*in, *out, c); err != nil { - return err - } - } else { - out.NodeAffinity = nil - } - if in.PodAffinity != nil { - in, out := in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - if err := DeepCopy_v1_PodAffinity(*in, *out, c); err != nil { - return err - } - } else { - out.PodAffinity = nil - } - if in.PodAntiAffinity != nil { - in, out := in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - if err := DeepCopy_v1_PodAntiAffinity(*in, *out, c); err != nil { - return err - } - } else { - out.PodAntiAffinity = nil - } - return nil -} - -func DeepCopy_v1_AttachedVolume(in AttachedVolume, out *AttachedVolume, c *conversion.Cloner) error { - out.Name = in.Name - out.DevicePath = in.DevicePath - return nil -} - -func DeepCopy_v1_AzureFileVolumeSource(in AzureFileVolumeSource, out *AzureFileVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_Binding(in Binding, out *Binding, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectReference(in.Target, &out.Target, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.Cloner) error { - if in.Add != nil { - in, out := in.Add, &out.Add - *out = make([]Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Add = nil - } - if in.Drop != nil { - in, out := in.Drop, &out.Drop - *out = make([]Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Drop = nil - } - return nil -} - -func DeepCopy_v1_CephFSVolumeSource(in CephFSVolumeSource, out *CephFSVolumeSource, c *conversion.Cloner) error { - if in.Monitors != nil { - in, out := in.Monitors, &out.Monitors - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Monitors = nil - } - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - out.Message = in.Message - out.Error = in.Error - return nil -} - -func DeepCopy_v1_ComponentStatus(in ComponentStatus, out *ComponentStatus, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(in)) - for i := range in { - if err := DeepCopy_v1_ComponentCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - return nil -} - -func DeepCopy_v1_ComponentStatusList(in ComponentStatusList, out *ComponentStatusList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ComponentStatus, len(in)) - for i := range in { - if err := DeepCopy_v1_ComponentStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ConfigMap(in ConfigMap, out *ConfigMap, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Data = nil - } - return nil -} - -func DeepCopy_v1_ConfigMapKeySelector(in ConfigMapKeySelector, out *ConfigMapKeySelector, c *conversion.Cloner) error { - if err := DeepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func DeepCopy_v1_ConfigMapList(in ConfigMapList, out *ConfigMapList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ConfigMap, len(in)) - for i := range in { - if err := DeepCopy_v1_ConfigMap(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ConfigMapVolumeSource(in ConfigMapVolumeSource, out *ConfigMapVolumeSource, c *conversion.Cloner) error { - if err := DeepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]KeyToPath, len(in)) - for i := range in { - if err := DeepCopy_v1_KeyToPath(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_Container(in Container, out *Container, c *conversion.Cloner) error { - out.Name = in.Name - out.Image = in.Image - if in.Command != nil { - in, out := in.Command, &out.Command - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Command = nil - } - if in.Args != nil { - in, out := in.Args, &out.Args - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Args = nil - } - out.WorkingDir = in.WorkingDir - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]ContainerPort, len(in)) - for i := range in { - if err := DeepCopy_v1_ContainerPort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Env != nil { - in, out := in.Env, &out.Env - *out = make([]EnvVar, len(in)) - for i := range in { - if err := DeepCopy_v1_EnvVar(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Env = nil - } - if err := DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { - return err - } - if in.VolumeMounts != nil { - in, out := in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(in)) - for i := range in { - if err := DeepCopy_v1_VolumeMount(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.VolumeMounts = nil - } - if in.LivenessProbe != nil { - in, out := in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { - return err - } - } else { - out.LivenessProbe = nil - } - if in.ReadinessProbe != nil { - in, out := in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - if err := DeepCopy_v1_Probe(*in, *out, c); err != nil { - return err - } - } else { - out.ReadinessProbe = nil - } - if in.Lifecycle != nil { - in, out := in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - if err := DeepCopy_v1_Lifecycle(*in, *out, c); err != nil { - return err - } - } else { - out.Lifecycle = nil - } - out.TerminationMessagePath = in.TerminationMessagePath - out.ImagePullPolicy = in.ImagePullPolicy - if in.SecurityContext != nil { - in, out := in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - if err := DeepCopy_v1_SecurityContext(*in, *out, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -func DeepCopy_v1_ContainerImage(in ContainerImage, out *ContainerImage, c *conversion.Cloner) error { - if in.Names != nil { - in, out := in.Names, &out.Names - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Names = nil - } - out.SizeBytes = in.SizeBytes - return nil -} - -func DeepCopy_v1_ContainerPort(in ContainerPort, out *ContainerPort, c *conversion.Cloner) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = in.Protocol - out.HostIP = in.HostIP - return nil -} - -func DeepCopy_v1_ContainerState(in ContainerState, out *ContainerState, c *conversion.Cloner) error { - if in.Waiting != nil { - in, out := in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - if err := DeepCopy_v1_ContainerStateWaiting(*in, *out, c); err != nil { - return err - } - } else { - out.Waiting = nil - } - if in.Running != nil { - in, out := in.Running, &out.Running - *out = new(ContainerStateRunning) - if err := DeepCopy_v1_ContainerStateRunning(*in, *out, c); err != nil { - return err - } - } else { - out.Running = nil - } - if in.Terminated != nil { - in, out := in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - if err := DeepCopy_v1_ContainerStateTerminated(*in, *out, c); err != nil { - return err - } - } else { - out.Terminated = nil - } - return nil -} - -func DeepCopy_v1_ContainerStateRunning(in ContainerStateRunning, out *ContainerStateRunning, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_ContainerStateTerminated(in ContainerStateTerminated, out *ContainerStateTerminated, c *conversion.Cloner) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - if err := unversioned.DeepCopy_unversioned_Time(in.StartedAt, &out.StartedAt, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.FinishedAt, &out.FinishedAt, c); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - -func DeepCopy_v1_ContainerStateWaiting(in ContainerStateWaiting, out *ContainerStateWaiting, c *conversion.Cloner) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v1_ContainerStatus(in ContainerStatus, out *ContainerStatus, c *conversion.Cloner) error { - out.Name = in.Name - if err := DeepCopy_v1_ContainerState(in.State, &out.State, c); err != nil { - return err - } - if err := DeepCopy_v1_ContainerState(in.LastTerminationState, &out.LastTerminationState, c); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -func DeepCopy_v1_DaemonEndpoint(in DaemonEndpoint, out *DaemonEndpoint, c *conversion.Cloner) error { - out.Port = in.Port - return nil -} - -func DeepCopy_v1_DeleteOptions(in DeleteOptions, out *DeleteOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if in.GracePeriodSeconds != nil { - in, out := in.GracePeriodSeconds, &out.GracePeriodSeconds - *out = new(int64) - **out = *in - } else { - out.GracePeriodSeconds = nil - } - if in.Preconditions != nil { - in, out := in.Preconditions, &out.Preconditions - *out = new(Preconditions) - if err := DeepCopy_v1_Preconditions(*in, *out, c); err != nil { - return err - } - } else { - out.Preconditions = nil - } - if in.OrphanDependents != nil { - in, out := in.OrphanDependents, &out.OrphanDependents - *out = new(bool) - **out = *in - } else { - out.OrphanDependents = nil - } - return nil -} - -func DeepCopy_v1_DownwardAPIVolumeFile(in DownwardAPIVolumeFile, out *DownwardAPIVolumeFile, c *conversion.Cloner) error { - out.Path = in.Path - if in.FieldRef != nil { - in, out := in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - if err := DeepCopy_v1_ObjectFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - return nil -} - -func DeepCopy_v1_DownwardAPIVolumeSource(in DownwardAPIVolumeSource, out *DownwardAPIVolumeSource, c *conversion.Cloner) error { - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(in)) - for i := range in { - if err := DeepCopy_v1_DownwardAPIVolumeFile(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_EmptyDirVolumeSource(in EmptyDirVolumeSource, out *EmptyDirVolumeSource, c *conversion.Cloner) error { - out.Medium = in.Medium - return nil -} - -func DeepCopy_v1_EndpointAddress(in EndpointAddress, out *EndpointAddress, c *conversion.Cloner) error { - out.IP = in.IP - out.Hostname = in.Hostname - if in.TargetRef != nil { - in, out := in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - if err := DeepCopy_v1_ObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.TargetRef = nil - } - return nil -} - -func DeepCopy_v1_EndpointPort(in EndpointPort, out *EndpointPort, c *conversion.Cloner) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = in.Protocol - return nil -} - -func DeepCopy_v1_EndpointSubset(in EndpointSubset, out *EndpointSubset, c *conversion.Cloner) error { - if in.Addresses != nil { - in, out := in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(in)) - for i := range in { - if err := DeepCopy_v1_EndpointAddress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if in.NotReadyAddresses != nil { - in, out := in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(in)) - for i := range in { - if err := DeepCopy_v1_EndpointAddress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.NotReadyAddresses = nil - } - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]EndpointPort, len(in)) - for i := range in { - if err := DeepCopy_v1_EndpointPort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - return nil -} - -func DeepCopy_v1_Endpoints(in Endpoints, out *Endpoints, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Subsets != nil { - in, out := in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(in)) - for i := range in { - if err := DeepCopy_v1_EndpointSubset(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Subsets = nil - } - return nil -} - -func DeepCopy_v1_EndpointsList(in EndpointsList, out *EndpointsList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Endpoints, len(in)) - for i := range in { - if err := DeepCopy_v1_Endpoints(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_EnvVar(in EnvVar, out *EnvVar, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - if in.ValueFrom != nil { - in, out := in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - if err := DeepCopy_v1_EnvVarSource(*in, *out, c); err != nil { - return err - } - } else { - out.ValueFrom = nil - } - return nil -} - -func DeepCopy_v1_EnvVarSource(in EnvVarSource, out *EnvVarSource, c *conversion.Cloner) error { - if in.FieldRef != nil { - in, out := in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - if err := DeepCopy_v1_ObjectFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.FieldRef = nil - } - if in.ResourceFieldRef != nil { - in, out := in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - if err := DeepCopy_v1_ResourceFieldSelector(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceFieldRef = nil - } - if in.ConfigMapKeyRef != nil { - in, out := in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - if err := DeepCopy_v1_ConfigMapKeySelector(*in, *out, c); err != nil { - return err - } - } else { - out.ConfigMapKeyRef = nil - } - if in.SecretKeyRef != nil { - in, out := in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - if err := DeepCopy_v1_SecretKeySelector(*in, *out, c); err != nil { - return err - } - } else { - out.SecretKeyRef = nil - } - return nil -} - -func DeepCopy_v1_Event(in Event, out *Event, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectReference(in.InvolvedObject, &out.InvolvedObject, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := DeepCopy_v1_EventSource(in.Source, &out.Source, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.FirstTimestamp, &out.FirstTimestamp, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTimestamp, &out.LastTimestamp, c); err != nil { - return err - } - out.Count = in.Count - out.Type = in.Type - return nil -} - -func DeepCopy_v1_EventList(in EventList, out *EventList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Event, len(in)) - for i := range in { - if err := DeepCopy_v1_Event(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_EventSource(in EventSource, out *EventSource, c *conversion.Cloner) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -func DeepCopy_v1_ExecAction(in ExecAction, out *ExecAction, c *conversion.Cloner) error { - if in.Command != nil { - in, out := in.Command, &out.Command - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Command = nil - } - return nil -} - -func DeepCopy_v1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func DeepCopy_v1_FCVolumeSource(in FCVolumeSource, out *FCVolumeSource, c *conversion.Cloner) error { - if in.TargetWWNs != nil { - in, out := in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.TargetWWNs = nil - } - if in.Lun != nil { - in, out := in.Lun, &out.Lun - *out = new(int32) - **out = *in - } else { - out.Lun = nil - } - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_FlexVolumeSource(in FlexVolumeSource, out *FlexVolumeSource, c *conversion.Cloner) error { - out.Driver = in.Driver - out.FSType = in.FSType - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - if in.Options != nil { - in, out := in.Options, &out.Options - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Options = nil - } - return nil -} - -func DeepCopy_v1_FlockerVolumeSource(in FlockerVolumeSource, out *FlockerVolumeSource, c *conversion.Cloner) error { - out.DatasetName = in.DatasetName - return nil -} - -func DeepCopy_v1_GCEPersistentDiskVolumeSource(in GCEPersistentDiskVolumeSource, out *GCEPersistentDiskVolumeSource, c *conversion.Cloner) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_GitRepoVolumeSource(in GitRepoVolumeSource, out *GitRepoVolumeSource, c *conversion.Cloner) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -func DeepCopy_v1_GlusterfsVolumeSource(in GlusterfsVolumeSource, out *GlusterfsVolumeSource, c *conversion.Cloner) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_HTTPGetAction(in HTTPGetAction, out *HTTPGetAction, c *conversion.Cloner) error { - out.Path = in.Path - if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - out.Host = in.Host - out.Scheme = in.Scheme - if in.HTTPHeaders != nil { - in, out := in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(in)) - for i := range in { - if err := DeepCopy_v1_HTTPHeader(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.HTTPHeaders = nil - } - return nil -} - -func DeepCopy_v1_HTTPHeader(in HTTPHeader, out *HTTPHeader, c *conversion.Cloner) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -func DeepCopy_v1_Handler(in Handler, out *Handler, c *conversion.Cloner) error { - if in.Exec != nil { - in, out := in.Exec, &out.Exec - *out = new(ExecAction) - if err := DeepCopy_v1_ExecAction(*in, *out, c); err != nil { - return err - } - } else { - out.Exec = nil - } - if in.HTTPGet != nil { - in, out := in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - if err := DeepCopy_v1_HTTPGetAction(*in, *out, c); err != nil { - return err - } - } else { - out.HTTPGet = nil - } - if in.TCPSocket != nil { - in, out := in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - if err := DeepCopy_v1_TCPSocketAction(*in, *out, c); err != nil { - return err - } - } else { - out.TCPSocket = nil - } - return nil -} - -func DeepCopy_v1_HostPathVolumeSource(in HostPathVolumeSource, out *HostPathVolumeSource, c *conversion.Cloner) error { - out.Path = in.Path - return nil -} - -func DeepCopy_v1_ISCSIVolumeSource(in ISCSIVolumeSource, out *ISCSIVolumeSource, c *conversion.Cloner) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_KeyToPath(in KeyToPath, out *KeyToPath, c *conversion.Cloner) error { - out.Key = in.Key - out.Path = in.Path - return nil -} - -func DeepCopy_v1_Lifecycle(in Lifecycle, out *Lifecycle, c *conversion.Cloner) error { - if in.PostStart != nil { - in, out := in.PostStart, &out.PostStart - *out = new(Handler) - if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { - return err - } - } else { - out.PostStart = nil - } - if in.PreStop != nil { - in, out := in.PreStop, &out.PreStop - *out = new(Handler) - if err := DeepCopy_v1_Handler(*in, *out, c); err != nil { - return err - } - } else { - out.PreStop = nil - } - return nil -} - -func DeepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_LimitRangeSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { - out.Type = in.Type - if in.Max != nil { - in, out := in.Max, &out.Max - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Max = nil - } - if in.Min != nil { - in, out := in.Min, &out.Min - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Min = nil - } - if in.Default != nil { - in, out := in.Default, &out.Default - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Default = nil - } - if in.DefaultRequest != nil { - in, out := in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.DefaultRequest = nil - } - if in.MaxLimitRequestRatio != nil { - in, out := in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.MaxLimitRequestRatio = nil - } - return nil -} - -func DeepCopy_v1_LimitRangeList(in LimitRangeList, out *LimitRangeList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]LimitRange, len(in)) - for i := range in { - if err := DeepCopy_v1_LimitRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_LimitRangeSpec(in LimitRangeSpec, out *LimitRangeSpec, c *conversion.Cloner) error { - if in.Limits != nil { - in, out := in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(in)) - for i := range in { - if err := DeepCopy_v1_LimitRangeItem(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Limits = nil - } - return nil -} - -func DeepCopy_v1_List(in List, out *List, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]runtime.RawExtension, len(in)) - for i := range in { - if err := runtime.DeepCopy_runtime_RawExtension(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.LabelSelector = in.LabelSelector - out.FieldSelector = in.FieldSelector - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - in, out := in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = *in - } else { - out.TimeoutSeconds = nil - } - return nil -} - -func DeepCopy_v1_LoadBalancerIngress(in LoadBalancerIngress, out *LoadBalancerIngress, c *conversion.Cloner) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -func DeepCopy_v1_LoadBalancerStatus(in LoadBalancerStatus, out *LoadBalancerStatus, c *conversion.Cloner) error { - if in.Ingress != nil { - in, out := in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(in)) - for i := range in { - if err := DeepCopy_v1_LoadBalancerIngress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func DeepCopy_v1_LocalObjectReference(in LocalObjectReference, out *LocalObjectReference, c *conversion.Cloner) error { - out.Name = in.Name - return nil -} - -func DeepCopy_v1_NFSVolumeSource(in NFSVolumeSource, out *NFSVolumeSource, c *conversion.Cloner) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_Namespace(in Namespace, out *Namespace, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_NamespaceSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_NamespaceStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_NamespaceList(in NamespaceList, out *NamespaceList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Namespace, len(in)) - for i := range in { - if err := DeepCopy_v1_Namespace(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_NamespaceSpec(in NamespaceSpec, out *NamespaceSpec, c *conversion.Cloner) error { - if in.Finalizers != nil { - in, out := in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Finalizers = nil - } - return nil -} - -func DeepCopy_v1_NamespaceStatus(in NamespaceStatus, out *NamespaceStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - return nil -} - -func DeepCopy_v1_Node(in Node, out *Node, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_NodeSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_NodeStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_NodeAddress(in NodeAddress, out *NodeAddress, c *conversion.Cloner) error { - out.Type = in.Type - out.Address = in.Address - return nil -} - -func DeepCopy_v1_NodeAffinity(in NodeAffinity, out *NodeAffinity, c *conversion.Cloner) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - if err := DeepCopy_v1_NodeSelector(*in, *out, c); err != nil { - return err - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(in)) - for i := range in { - if err := DeepCopy_v1_PreferredSchedulingTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func DeepCopy_v1_NodeCondition(in NodeCondition, out *NodeCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastHeartbeatTime, &out.LastHeartbeatTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v1_NodeDaemonEndpoints(in NodeDaemonEndpoints, out *NodeDaemonEndpoints, c *conversion.Cloner) error { - if err := DeepCopy_v1_DaemonEndpoint(in.KubeletEndpoint, &out.KubeletEndpoint, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Node, len(in)) - for i := range in { - if err := DeepCopy_v1_Node(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_NodeProxyOptions(in NodeProxyOptions, out *NodeProxyOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func DeepCopy_v1_NodeSelector(in NodeSelector, out *NodeSelector, c *conversion.Cloner) error { - if in.NodeSelectorTerms != nil { - in, out := in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(in)) - for i := range in { - if err := DeepCopy_v1_NodeSelectorTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.NodeSelectorTerms = nil - } - return nil -} - -func DeepCopy_v1_NodeSelectorRequirement(in NodeSelectorRequirement, out *NodeSelectorRequirement, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := in.Values, &out.Values - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Values = nil - } - return nil -} - -func DeepCopy_v1_NodeSelectorTerm(in NodeSelectorTerm, out *NodeSelectorTerm, c *conversion.Cloner) error { - if in.MatchExpressions != nil { - in, out := in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(in)) - for i := range in { - if err := DeepCopy_v1_NodeSelectorRequirement(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func DeepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - return nil -} - -func DeepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - if in.Allocatable != nil { - in, out := in.Allocatable, &out.Allocatable - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Allocatable = nil - } - out.Phase = in.Phase - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(in)) - for i := range in { - if err := DeepCopy_v1_NodeCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Addresses != nil { - in, out := in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(in)) - for i := range in { - if err := DeepCopy_v1_NodeAddress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Addresses = nil - } - if err := DeepCopy_v1_NodeDaemonEndpoints(in.DaemonEndpoints, &out.DaemonEndpoints, c); err != nil { - return err - } - if err := DeepCopy_v1_NodeSystemInfo(in.NodeInfo, &out.NodeInfo, c); err != nil { - return err - } - if in.Images != nil { - in, out := in.Images, &out.Images - *out = make([]ContainerImage, len(in)) - for i := range in { - if err := DeepCopy_v1_ContainerImage(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Images = nil - } - if in.VolumesInUse != nil { - in, out := in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.VolumesInUse = nil - } - if in.VolumesAttached != nil { - in, out := in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(in)) - for i := range in { - if err := DeepCopy_v1_AttachedVolume(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.VolumesAttached = nil - } - return nil -} - -func DeepCopy_v1_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conversion.Cloner) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -func DeepCopy_v1_ObjectFieldSelector(in ObjectFieldSelector, out *ObjectFieldSelector, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -func DeepCopy_v1_ObjectMeta(in ObjectMeta, out *ObjectMeta, c *conversion.Cloner) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = in.UID - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - if err := unversioned.DeepCopy_unversioned_Time(in.CreationTimestamp, &out.CreationTimestamp, c); err != nil { - return err - } - if in.DeletionTimestamp != nil { - in, out := in.DeletionTimestamp, &out.DeletionTimestamp - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.DeletionTimestamp = nil - } - if in.DeletionGracePeriodSeconds != nil { - in, out := in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - *out = new(int64) - **out = *in - } else { - out.DeletionGracePeriodSeconds = nil - } - if in.Labels != nil { - in, out := in.Labels, &out.Labels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Labels = nil - } - if in.Annotations != nil { - in, out := in.Annotations, &out.Annotations - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Annotations = nil - } - if in.OwnerReferences != nil { - in, out := in.OwnerReferences, &out.OwnerReferences - *out = make([]OwnerReference, len(in)) - for i := range in { - if err := DeepCopy_v1_OwnerReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.OwnerReferences = nil - } - if in.Finalizers != nil { - in, out := in.Finalizers, &out.Finalizers - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Finalizers = nil - } - return nil -} - -func DeepCopy_v1_ObjectReference(in ObjectReference, out *ObjectReference, c *conversion.Cloner) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = in.UID - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -func DeepCopy_v1_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = in.UID - if in.Controller != nil { - in, out := in.Controller, &out.Controller - *out = new(bool) - **out = *in - } else { - out.Controller = nil - } - return nil -} - -func DeepCopy_v1_PersistentVolume(in PersistentVolume, out *PersistentVolume, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_PersistentVolumeSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_PersistentVolumeStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_PersistentVolumeClaim(in PersistentVolumeClaim, out *PersistentVolumeClaim, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_PersistentVolumeClaimSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_PersistentVolumeClaimStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_PersistentVolumeClaimList(in PersistentVolumeClaimList, out *PersistentVolumeClaimList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(in)) - for i := range in { - if err := DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_PersistentVolumeClaimSpec(in PersistentVolumeClaimSpec, out *PersistentVolumeClaimSpec, c *conversion.Cloner) error { - if in.AccessModes != nil { - in, out := in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AccessModes = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := DeepCopy_v1_ResourceRequirements(in.Resources, &out.Resources, c); err != nil { - return err - } - out.VolumeName = in.VolumeName - return nil -} - -func DeepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out *PersistentVolumeClaimStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - if in.AccessModes != nil { - in, out := in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AccessModes = nil - } - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - return nil -} - -func DeepCopy_v1_PersistentVolumeClaimVolumeSource(in PersistentVolumeClaimVolumeSource, out *PersistentVolumeClaimVolumeSource, c *conversion.Cloner) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_PersistentVolumeList(in PersistentVolumeList, out *PersistentVolumeList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PersistentVolume, len(in)) - for i := range in { - if err := DeepCopy_v1_PersistentVolume(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *PersistentVolumeSource, c *conversion.Cloner) error { - if in.GCEPersistentDisk != nil { - in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - if err := DeepCopy_v1_GCEPersistentDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - if err := DeepCopy_v1_AWSElasticBlockStoreVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.HostPath != nil { - in, out := in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - if err := DeepCopy_v1_HostPathVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.Glusterfs != nil { - in, out := in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - if err := DeepCopy_v1_GlusterfsVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.NFS != nil { - in, out := in.NFS, &out.NFS - *out = new(NFSVolumeSource) - if err := DeepCopy_v1_NFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.RBD != nil { - in, out := in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.ISCSI != nil { - in, out := in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Cinder != nil { - in, out := in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - if err := DeepCopy_v1_CinderVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.FC != nil { - in, out := in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FC = nil - } - if in.Flocker != nil { - in, out := in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - if err := DeepCopy_v1_FlockerVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.FlexVolume != nil { - in, out := in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.AzureFile != nil { - in, out := in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - if err := DeepCopy_v1_AzureFileVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.VsphereVolume != nil { - in, out := in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - if err := DeepCopy_v1_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func DeepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { - if in.Capacity != nil { - in, out := in.Capacity, &out.Capacity - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Capacity = nil - } - if err := DeepCopy_v1_PersistentVolumeSource(in.PersistentVolumeSource, &out.PersistentVolumeSource, c); err != nil { - return err - } - if in.AccessModes != nil { - in, out := in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AccessModes = nil - } - if in.ClaimRef != nil { - in, out := in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - if err := DeepCopy_v1_ObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.ClaimRef = nil - } - out.PersistentVolumeReclaimPolicy = in.PersistentVolumeReclaimPolicy - return nil -} - -func DeepCopy_v1_PersistentVolumeStatus(in PersistentVolumeStatus, out *PersistentVolumeStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -func DeepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_PodAffinity(in PodAffinity, out *PodAffinity, c *conversion.Cloner) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func DeepCopy_v1_PodAffinityTerm(in PodAffinityTerm, out *PodAffinityTerm, c *conversion.Cloner) error { - if in.LabelSelector != nil { - in, out := in.LabelSelector, &out.LabelSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.LabelSelector = nil - } - if in.Namespaces != nil { - in, out := in.Namespaces, &out.Namespaces - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Namespaces = nil - } - out.TopologyKey = in.TopologyKey - return nil -} - -func DeepCopy_v1_PodAntiAffinity(in PodAntiAffinity, out *PodAntiAffinity, c *conversion.Cloner) error { - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_v1_PodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(in)) - for i := range in { - if err := DeepCopy_v1_WeightedPodAffinityTerm(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.PreferredDuringSchedulingIgnoredDuringExecution = nil - } - return nil -} - -func DeepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -func DeepCopy_v1_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v1_PodExecOptions(in PodExecOptions, out *PodExecOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - if in.Command != nil { - in, out := in.Command, &out.Command - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Command = nil - } - return nil -} - -func DeepCopy_v1_PodList(in PodList, out *PodList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Pod, len(in)) - for i := range in { - if err := DeepCopy_v1_Pod(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_PodLogOptions(in PodLogOptions, out *PodLogOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - if in.SinceSeconds != nil { - in, out := in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = *in - } else { - out.SinceSeconds = nil - } - if in.SinceTime != nil { - in, out := in.SinceTime, &out.SinceTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.SinceTime = nil - } - out.Timestamps = in.Timestamps - if in.TailLines != nil { - in, out := in.TailLines, &out.TailLines - *out = new(int64) - **out = *in - } else { - out.TailLines = nil - } - if in.LimitBytes != nil { - in, out := in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = *in - } else { - out.LimitBytes = nil - } - return nil -} - -func DeepCopy_v1_PodProxyOptions(in PodProxyOptions, out *PodProxyOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func DeepCopy_v1_PodSecurityContext(in PodSecurityContext, out *PodSecurityContext, c *conversion.Cloner) error { - if in.SELinuxOptions != nil { - in, out := in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - if err := DeepCopy_v1_SELinuxOptions(*in, *out, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - in, out := in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = *in - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - in, out := in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = *in - } else { - out.RunAsNonRoot = nil - } - if in.SupplementalGroups != nil { - in, out := in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(in)) - copy(*out, in) - } else { - out.SupplementalGroups = nil - } - if in.FSGroup != nil { - in, out := in.FSGroup, &out.FSGroup - *out = new(int64) - **out = *in - } else { - out.FSGroup = nil - } - return nil -} - -func DeepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { - if in.Volumes != nil { - in, out := in.Volumes, &out.Volumes - *out = make([]Volume, len(in)) - for i := range in { - if err := DeepCopy_v1_Volume(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := in.InitContainers, &out.InitContainers - *out = make([]Container, len(in)) - for i := range in { - if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := in.Containers, &out.Containers - *out = make([]Container, len(in)) - for i := range in { - if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = in.RestartPolicy - if in.TerminationGracePeriodSeconds != nil { - in, out := in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = *in - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = in.DNSPolicy - if in.NodeSelector != nil { - in, out := in.NodeSelector, &out.NodeSelector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccountName = in.ServiceAccountName - out.DeprecatedServiceAccount = in.DeprecatedServiceAccount - out.NodeName = in.NodeName - out.HostNetwork = in.HostNetwork - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if in.SecurityContext != nil { - in, out := in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - if err := DeepCopy_v1_PodSecurityContext(*in, *out, c); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - if in.ImagePullSecrets != nil { - in, out := in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(in)) - for i := range in { - if err := DeepCopy_v1_LocalObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - return nil -} - -func DeepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) error { - out.Phase = in.Phase - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]PodCondition, len(in)) - for i := range in { - if err := DeepCopy_v1_PodCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - if in.StartTime != nil { - in, out := in.StartTime, &out.StartTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.InitContainerStatuses != nil { - in, out := in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(in)) - for i := range in { - if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.InitContainerStatuses = nil - } - if in.ContainerStatuses != nil { - in, out := in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(in)) - for i := range in { - if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ContainerStatuses = nil - } - return nil -} - -func DeepCopy_v1_PodStatusResult(in PodStatusResult, out *PodStatusResult, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_PodStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_PodTemplate(in PodTemplate, out *PodTemplate, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_PodTemplateList(in PodTemplateList, out *PodTemplateList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PodTemplate, len(in)) - for i := range in { - if err := DeepCopy_v1_PodTemplate(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_PodTemplateSpec(in PodTemplateSpec, out *PodTemplateSpec, c *conversion.Cloner) error { - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_PodSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_Preconditions(in Preconditions, out *Preconditions, c *conversion.Cloner) error { - if in.UID != nil { - in, out := in.UID, &out.UID - *out = new(types.UID) - **out = *in - } else { - out.UID = nil - } - return nil -} - -func DeepCopy_v1_PreferredSchedulingTerm(in PreferredSchedulingTerm, out *PreferredSchedulingTerm, c *conversion.Cloner) error { - out.Weight = in.Weight - if err := DeepCopy_v1_NodeSelectorTerm(in.Preference, &out.Preference, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_Probe(in Probe, out *Probe, c *conversion.Cloner) error { - if err := DeepCopy_v1_Handler(in.Handler, &out.Handler, c); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -func DeepCopy_v1_RBDVolumeSource(in RBDVolumeSource, out *RBDVolumeSource, c *conversion.Cloner) error { - if in.CephMonitors != nil { - in, out := in.CephMonitors, &out.CephMonitors - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.CephMonitors = nil - } - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - if in.SecretRef != nil { - in, out := in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - if err := DeepCopy_v1_LocalObjectReference(*in, *out, c); err != nil { - return err - } - } else { - out.SecretRef = nil - } - out.ReadOnly = in.ReadOnly - return nil -} - -func DeepCopy_v1_RangeAllocation(in RangeAllocation, out *RangeAllocation, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - out.Range = in.Range - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make([]byte, len(in)) - copy(*out, in) - } else { - out.Data = nil - } - return nil -} - -func DeepCopy_v1_ReplicationController(in ReplicationController, out *ReplicationController, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ReplicationControllerSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ReplicationControllerStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_ReplicationControllerList(in ReplicationControllerList, out *ReplicationControllerList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ReplicationController, len(in)) - for i := range in { - if err := DeepCopy_v1_ReplicationController(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *ReplicationControllerSpec, c *conversion.Cloner) error { - if in.Replicas != nil { - in, out := in.Replicas, &out.Replicas - *out = new(int32) - **out = *in - } else { - out.Replicas = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Selector = nil - } - if in.Template != nil { - in, out := in.Template, &out.Template - *out = new(PodTemplateSpec) - if err := DeepCopy_v1_PodTemplateSpec(*in, *out, c); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func DeepCopy_v1_ReplicationControllerStatus(in ReplicationControllerStatus, out *ReplicationControllerStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func DeepCopy_v1_ResourceFieldSelector(in ResourceFieldSelector, out *ResourceFieldSelector, c *conversion.Cloner) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - if err := resource.DeepCopy_resource_Quantity(in.Divisor, &out.Divisor, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_ResourceQuota(in ResourceQuota, out *ResourceQuota, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ResourceQuotaSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ResourceQuotaStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ResourceQuota, len(in)) - for i := range in { - if err := DeepCopy_v1_ResourceQuota(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { - if in.Hard != nil { - in, out := in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Hard = nil - } - if in.Scopes != nil { - in, out := in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Scopes = nil - } - return nil -} - -func DeepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { - if in.Hard != nil { - in, out := in.Hard, &out.Hard - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Hard = nil - } - if in.Used != nil { - in, out := in.Used, &out.Used - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Used = nil - } - return nil -} - -func DeepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { - if in.Limits != nil { - in, out := in.Limits, &out.Limits - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Limits = nil - } - if in.Requests != nil { - in, out := in.Requests, &out.Requests - *out = make(ResourceList) - for key, val := range in { - newVal := new(resource.Quantity) - if err := resource.DeepCopy_resource_Quantity(val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Requests = nil - } - return nil -} - -func DeepCopy_v1_SELinuxOptions(in SELinuxOptions, out *SELinuxOptions, c *conversion.Cloner) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -func DeepCopy_v1_Secret(in Secret, out *Secret, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make(map[string][]byte) - for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { - return err - } else { - (*out)[key] = newVal.([]byte) - } - } - } else { - out.Data = nil - } - out.Type = in.Type - return nil -} - -func DeepCopy_v1_SecretKeySelector(in SecretKeySelector, out *SecretKeySelector, c *conversion.Cloner) error { - if err := DeepCopy_v1_LocalObjectReference(in.LocalObjectReference, &out.LocalObjectReference, c); err != nil { - return err - } - out.Key = in.Key - return nil -} - -func DeepCopy_v1_SecretList(in SecretList, out *SecretList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Secret, len(in)) - for i := range in { - if err := DeepCopy_v1_Secret(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error { - out.SecretName = in.SecretName - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]KeyToPath, len(in)) - for i := range in { - if err := DeepCopy_v1_KeyToPath(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_SecurityContext(in SecurityContext, out *SecurityContext, c *conversion.Cloner) error { - if in.Capabilities != nil { - in, out := in.Capabilities, &out.Capabilities - *out = new(Capabilities) - if err := DeepCopy_v1_Capabilities(*in, *out, c); err != nil { - return err - } - } else { - out.Capabilities = nil - } - if in.Privileged != nil { - in, out := in.Privileged, &out.Privileged - *out = new(bool) - **out = *in - } else { - out.Privileged = nil - } - if in.SELinuxOptions != nil { - in, out := in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - if err := DeepCopy_v1_SELinuxOptions(*in, *out, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - if in.RunAsUser != nil { - in, out := in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = *in - } else { - out.RunAsUser = nil - } - if in.RunAsNonRoot != nil { - in, out := in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = *in - } else { - out.RunAsNonRoot = nil - } - if in.ReadOnlyRootFilesystem != nil { - in, out := in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = *in - } else { - out.ReadOnlyRootFilesystem = nil - } - return nil -} - -func DeepCopy_v1_SerializedReference(in SerializedReference, out *SerializedReference, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectReference(in.Reference, &out.Reference, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_Service(in Service, out *Service, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ServiceSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ServiceStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_ServiceAccount(in ServiceAccount, out *ServiceAccount, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Secrets != nil { - in, out := in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(in)) - for i := range in { - if err := DeepCopy_v1_ObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Secrets = nil - } - if in.ImagePullSecrets != nil { - in, out := in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(in)) - for i := range in { - if err := DeepCopy_v1_LocalObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - -func DeepCopy_v1_ServiceAccountList(in ServiceAccountList, out *ServiceAccountList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ServiceAccount, len(in)) - for i := range in { - if err := DeepCopy_v1_ServiceAccount(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ServiceList(in ServiceList, out *ServiceList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Service, len(in)) - for i := range in { - if err := DeepCopy_v1_Service(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_ServicePort(in ServicePort, out *ServicePort, c *conversion.Cloner) error { - out.Name = in.Name - out.Protocol = in.Protocol - out.Port = in.Port - if err := intstr.DeepCopy_intstr_IntOrString(in.TargetPort, &out.TargetPort, c); err != nil { - return err - } - out.NodePort = in.NodePort - return nil -} - -func DeepCopy_v1_ServiceProxyOptions(in ServiceProxyOptions, out *ServiceProxyOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Path = in.Path - return nil -} - -func DeepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cloner) error { - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]ServicePort, len(in)) - for i := range in { - if err := DeepCopy_v1_ServicePort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Selector = nil - } - out.ClusterIP = in.ClusterIP - out.Type = in.Type - if in.ExternalIPs != nil { - in, out := in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.ExternalIPs = nil - } - if in.DeprecatedPublicIPs != nil { - in, out := in.DeprecatedPublicIPs, &out.DeprecatedPublicIPs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.DeprecatedPublicIPs = nil - } - out.SessionAffinity = in.SessionAffinity - out.LoadBalancerIP = in.LoadBalancerIP - if in.LoadBalancerSourceRanges != nil { - in, out := in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.LoadBalancerSourceRanges = nil - } - return nil -} - -func DeepCopy_v1_ServiceStatus(in ServiceStatus, out *ServiceStatus, c *conversion.Cloner) error { - if err := DeepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *conversion.Cloner) error { - if err := intstr.DeepCopy_intstr_IntOrString(in.Port, &out.Port, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_Taint(in Taint, out *Taint, c *conversion.Cloner) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = in.Effect - return nil -} - -func DeepCopy_v1_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - out.Value = in.Value - out.Effect = in.Effect - return nil -} - -func DeepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error { - out.Name = in.Name - if err := DeepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_VolumeMount(in VolumeMount, out *VolumeMount, c *conversion.Cloner) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - return nil -} - -func DeepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.Cloner) error { - if in.HostPath != nil { - in, out := in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - if err := DeepCopy_v1_HostPathVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.HostPath = nil - } - if in.EmptyDir != nil { - in, out := in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - if err := DeepCopy_v1_EmptyDirVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.EmptyDir = nil - } - if in.GCEPersistentDisk != nil { - in, out := in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - if err := DeepCopy_v1_GCEPersistentDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.GCEPersistentDisk = nil - } - if in.AWSElasticBlockStore != nil { - in, out := in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - if err := DeepCopy_v1_AWSElasticBlockStoreVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AWSElasticBlockStore = nil - } - if in.GitRepo != nil { - in, out := in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - if err := DeepCopy_v1_GitRepoVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.GitRepo = nil - } - if in.Secret != nil { - in, out := in.Secret, &out.Secret - *out = new(SecretVolumeSource) - if err := DeepCopy_v1_SecretVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Secret = nil - } - if in.NFS != nil { - in, out := in.NFS, &out.NFS - *out = new(NFSVolumeSource) - if err := DeepCopy_v1_NFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.NFS = nil - } - if in.ISCSI != nil { - in, out := in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - if err := DeepCopy_v1_ISCSIVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.ISCSI = nil - } - if in.Glusterfs != nil { - in, out := in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - if err := DeepCopy_v1_GlusterfsVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Glusterfs = nil - } - if in.PersistentVolumeClaim != nil { - in, out := in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - if err := DeepCopy_v1_PersistentVolumeClaimVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.PersistentVolumeClaim = nil - } - if in.RBD != nil { - in, out := in.RBD, &out.RBD - *out = new(RBDVolumeSource) - if err := DeepCopy_v1_RBDVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.RBD = nil - } - if in.FlexVolume != nil { - in, out := in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - if err := DeepCopy_v1_FlexVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FlexVolume = nil - } - if in.Cinder != nil { - in, out := in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - if err := DeepCopy_v1_CinderVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Cinder = nil - } - if in.CephFS != nil { - in, out := in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - if err := DeepCopy_v1_CephFSVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.CephFS = nil - } - if in.Flocker != nil { - in, out := in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - if err := DeepCopy_v1_FlockerVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.Flocker = nil - } - if in.DownwardAPI != nil { - in, out := in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - if err := DeepCopy_v1_DownwardAPIVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.DownwardAPI = nil - } - if in.FC != nil { - in, out := in.FC, &out.FC - *out = new(FCVolumeSource) - if err := DeepCopy_v1_FCVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.FC = nil - } - if in.AzureFile != nil { - in, out := in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - if err := DeepCopy_v1_AzureFileVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.AzureFile = nil - } - if in.ConfigMap != nil { - in, out := in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - if err := DeepCopy_v1_ConfigMapVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.ConfigMap = nil - } - if in.VsphereVolume != nil { - in, out := in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - if err := DeepCopy_v1_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil { - return err - } - } else { - out.VsphereVolume = nil - } - return nil -} - -func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - return nil -} - -func DeepCopy_v1_WeightedPodAffinityTerm(in WeightedPodAffinityTerm, out *WeightedPodAffinityTerm, c *conversion.Cloner) error { - out.Weight = in.Weight - if err := DeepCopy_v1_PodAffinityTerm(in.PodAffinityTerm, &out.PodAffinityTerm, c); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go deleted file mode 100644 index c6f11b4ee..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/parsers" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_PodExecOptions, - SetDefaults_PodAttachOptions, - SetDefaults_ReplicationController, - SetDefaults_Volume, - SetDefaults_ContainerPort, - SetDefaults_Container, - SetDefaults_ServiceSpec, - SetDefaults_Pod, - SetDefaults_PodSpec, - SetDefaults_Probe, - SetDefaults_Secret, - SetDefaults_PersistentVolume, - SetDefaults_PersistentVolumeClaim, - SetDefaults_ISCSIVolumeSource, - SetDefaults_Endpoints, - SetDefaults_HTTPGetAction, - SetDefaults_NamespaceStatus, - SetDefaults_Node, - SetDefaults_NodeStatus, - SetDefaults_ObjectFieldSelector, - SetDefaults_LimitRangeItem, - SetDefaults_ConfigMap, - SetDefaults_RBDVolumeSource, - ) -} - -func SetDefaults_PodExecOptions(obj *PodExecOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_PodAttachOptions(obj *PodAttachOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_ReplicationController(obj *ReplicationController) { - var labels map[string]string - if obj.Spec.Template != nil { - labels = obj.Spec.Template.Labels - } - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if len(obj.Spec.Selector) == 0 { - obj.Spec.Selector = labels - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} -func SetDefaults_Volume(obj *Volume) { - if util.AllPtrFieldsNil(&obj.VolumeSource) { - obj.VolumeSource = VolumeSource{ - EmptyDir: &EmptyDirVolumeSource{}, - } - } -} -func SetDefaults_ContainerPort(obj *ContainerPort) { - if obj.Protocol == "" { - obj.Protocol = ProtocolTCP - } -} -func SetDefaults_Container(obj *Container) { - if obj.ImagePullPolicy == "" { - // Ignore error and assume it has been validated elsewhere - _, tag, _, _ := parsers.ParseImageName(obj.Image) - - // Check image tag - - if tag == "latest" { - obj.ImagePullPolicy = PullAlways - } else { - obj.ImagePullPolicy = PullIfNotPresent - } - } - if obj.TerminationMessagePath == "" { - obj.TerminationMessagePath = TerminationMessagePathDefault - } -} -func SetDefaults_ServiceSpec(obj *ServiceSpec) { - if obj.SessionAffinity == "" { - obj.SessionAffinity = ServiceAffinityNone - } - if obj.Type == "" { - obj.Type = ServiceTypeClusterIP - } - for i := range obj.Ports { - sp := &obj.Ports[i] - if sp.Protocol == "" { - sp.Protocol = ProtocolTCP - } - if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { - sp.TargetPort = intstr.FromInt(int(sp.Port)) - } - } -} -func SetDefaults_Pod(obj *Pod) { - // If limits are specified, but requests are not, default requests to limits - // This is done here rather than a more specific defaulting pass on ResourceRequirements - // because we only want this defaulting semantic to take place on a Pod and not a PodTemplate - for i := range obj.Spec.Containers { - // set requests to limits if requests are not specified, but limits are - if obj.Spec.Containers[i].Resources.Limits != nil { - if obj.Spec.Containers[i].Resources.Requests == nil { - obj.Spec.Containers[i].Resources.Requests = make(ResourceList) - } - for key, value := range obj.Spec.Containers[i].Resources.Limits { - if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { - obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) - } - } - } - } -} -func SetDefaults_PodSpec(obj *PodSpec) { - if obj.DNSPolicy == "" { - obj.DNSPolicy = DNSClusterFirst - } - if obj.RestartPolicy == "" { - obj.RestartPolicy = RestartPolicyAlways - } - if obj.HostNetwork { - defaultHostNetworkPorts(&obj.Containers) - } - if obj.SecurityContext == nil { - obj.SecurityContext = &PodSecurityContext{} - } - if obj.TerminationGracePeriodSeconds == nil { - period := int64(DefaultTerminationGracePeriodSeconds) - obj.TerminationGracePeriodSeconds = &period - } -} -func SetDefaults_Probe(obj *Probe) { - if obj.TimeoutSeconds == 0 { - obj.TimeoutSeconds = 1 - } - if obj.PeriodSeconds == 0 { - obj.PeriodSeconds = 10 - } - if obj.SuccessThreshold == 0 { - obj.SuccessThreshold = 1 - } - if obj.FailureThreshold == 0 { - obj.FailureThreshold = 3 - } -} -func SetDefaults_Secret(obj *Secret) { - if obj.Type == "" { - obj.Type = SecretTypeOpaque - } -} -func SetDefaults_PersistentVolume(obj *PersistentVolume) { - if obj.Status.Phase == "" { - obj.Status.Phase = VolumePending - } - if obj.Spec.PersistentVolumeReclaimPolicy == "" { - obj.Spec.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimRetain - } -} -func SetDefaults_PersistentVolumeClaim(obj *PersistentVolumeClaim) { - if obj.Status.Phase == "" { - obj.Status.Phase = ClaimPending - } -} -func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) { - if obj.ISCSIInterface == "" { - obj.ISCSIInterface = "default" - } -} -func SetDefaults_Endpoints(obj *Endpoints) { - for i := range obj.Subsets { - ss := &obj.Subsets[i] - for i := range ss.Ports { - ep := &ss.Ports[i] - if ep.Protocol == "" { - ep.Protocol = ProtocolTCP - } - } - } -} -func SetDefaults_HTTPGetAction(obj *HTTPGetAction) { - if obj.Path == "" { - obj.Path = "/" - } - if obj.Scheme == "" { - obj.Scheme = URISchemeHTTP - } -} -func SetDefaults_NamespaceStatus(obj *NamespaceStatus) { - if obj.Phase == "" { - obj.Phase = NamespaceActive - } -} -func SetDefaults_Node(obj *Node) { - if obj.Spec.ExternalID == "" { - obj.Spec.ExternalID = obj.Name - } -} -func SetDefaults_NodeStatus(obj *NodeStatus) { - if obj.Allocatable == nil && obj.Capacity != nil { - obj.Allocatable = make(ResourceList, len(obj.Capacity)) - for key, value := range obj.Capacity { - obj.Allocatable[key] = *(value.Copy()) - } - obj.Allocatable = obj.Capacity - } -} -func SetDefaults_ObjectFieldSelector(obj *ObjectFieldSelector) { - if obj.APIVersion == "" { - obj.APIVersion = "v1" - } -} -func SetDefaults_LimitRangeItem(obj *LimitRangeItem) { - // for container limits, we apply default values - if obj.Type == LimitTypeContainer { - - if obj.Default == nil { - obj.Default = make(ResourceList) - } - if obj.DefaultRequest == nil { - obj.DefaultRequest = make(ResourceList) - } - - // If a default limit is unspecified, but the max is specified, default the limit to the max - for key, value := range obj.Max { - if _, exists := obj.Default[key]; !exists { - obj.Default[key] = *(value.Copy()) - } - } - // If a default limit is specified, but the default request is not, default request to limit - for key, value := range obj.Default { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - // If a default request is not specified, but the min is provided, default request to the min - for key, value := range obj.Min { - if _, exists := obj.DefaultRequest[key]; !exists { - obj.DefaultRequest[key] = *(value.Copy()) - } - } - } -} -func SetDefaults_ConfigMap(obj *ConfigMap) { - if obj.Data == nil { - obj.Data = make(map[string]string) - } -} - -// With host networking default all container ports to host ports. -func defaultHostNetworkPorts(containers *[]Container) { - for i := range *containers { - for j := range (*containers)[i].Ports { - if (*containers)[i].Ports[j].HostPort == 0 { - (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort - } - } - } -} - -func SetDefaults_RBDVolumeSource(obj *RBDVolumeSource) { - if obj.RBDPool == "" { - obj.RBDPool = "rbd" - } - if obj.RadosUser == "" { - obj.RadosUser = "admin" - } - if obj.Keyring == "" { - obj.Keyring = "/etc/ceph/keyring" - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go deleted file mode 100644 index bf85d77a1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1 is the v1 version of the API. -// +genconversion=true -package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go deleted file mode 100644 index 78477e4bf..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.pb.go +++ /dev/null @@ -1,34661 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/api/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/api/v1/generated.proto - - It has these top-level messages: - AWSElasticBlockStoreVolumeSource - Affinity - AttachedVolume - AzureFileVolumeSource - Binding - Capabilities - CephFSVolumeSource - CinderVolumeSource - ComponentCondition - ComponentStatus - ComponentStatusList - ConfigMap - ConfigMapKeySelector - ConfigMapList - ConfigMapVolumeSource - Container - ContainerImage - ContainerPort - ContainerState - ContainerStateRunning - ContainerStateTerminated - ContainerStateWaiting - ContainerStatus - DaemonEndpoint - DeleteOptions - DownwardAPIVolumeFile - DownwardAPIVolumeSource - EmptyDirVolumeSource - EndpointAddress - EndpointPort - EndpointSubset - Endpoints - EndpointsList - EnvVar - EnvVarSource - Event - EventList - EventSource - ExecAction - ExportOptions - FCVolumeSource - FlexVolumeSource - FlockerVolumeSource - GCEPersistentDiskVolumeSource - GitRepoVolumeSource - GlusterfsVolumeSource - HTTPGetAction - HTTPHeader - Handler - HostPathVolumeSource - ISCSIVolumeSource - KeyToPath - Lifecycle - LimitRange - LimitRangeItem - LimitRangeList - LimitRangeSpec - List - ListOptions - LoadBalancerIngress - LoadBalancerStatus - LocalObjectReference - NFSVolumeSource - Namespace - NamespaceList - NamespaceSpec - NamespaceStatus - Node - NodeAddress - NodeAffinity - NodeCondition - NodeDaemonEndpoints - NodeList - NodeProxyOptions - NodeSelector - NodeSelectorRequirement - NodeSelectorTerm - NodeSpec - NodeStatus - NodeSystemInfo - ObjectFieldSelector - ObjectMeta - ObjectReference - OwnerReference - PersistentVolume - PersistentVolumeClaim - PersistentVolumeClaimList - PersistentVolumeClaimSpec - PersistentVolumeClaimStatus - PersistentVolumeClaimVolumeSource - PersistentVolumeList - PersistentVolumeSource - PersistentVolumeSpec - PersistentVolumeStatus - Pod - PodAffinity - PodAffinityTerm - PodAntiAffinity - PodAttachOptions - PodCondition - PodExecOptions - PodList - PodLogOptions - PodProxyOptions - PodSecurityContext - PodSpec - PodStatus - PodStatusResult - PodTemplate - PodTemplateList - PodTemplateSpec - Preconditions - PreferredSchedulingTerm - Probe - RBDVolumeSource - RangeAllocation - ReplicationController - ReplicationControllerList - ReplicationControllerSpec - ReplicationControllerStatus - ResourceFieldSelector - ResourceQuota - ResourceQuotaList - ResourceQuotaSpec - ResourceQuotaStatus - ResourceRequirements - SELinuxOptions - Secret - SecretKeySelector - SecretList - SecretVolumeSource - SecurityContext - SerializedReference - Service - ServiceAccount - ServiceAccountList - ServiceList - ServicePort - ServiceProxyOptions - ServiceSpec - ServiceStatus - TCPSocketAction - Taint - Toleration - Volume - VolumeMount - VolumeSource - VsphereVirtualDiskVolumeSource - WeightedPodAffinityTerm -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_resource "k8s.io/kubernetes/pkg/api/resource" -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_runtime "k8s.io/kubernetes/pkg/runtime" - -import k8s_io_kubernetes_pkg_types "k8s.io/kubernetes/pkg/types" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } -func (m *AWSElasticBlockStoreVolumeSource) String() string { return proto.CompactTextString(m) } -func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} - -func (m *Affinity) Reset() { *m = Affinity{} } -func (m *Affinity) String() string { return proto.CompactTextString(m) } -func (*Affinity) ProtoMessage() {} - -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (m *AttachedVolume) String() string { return proto.CompactTextString(m) } -func (*AttachedVolume) ProtoMessage() {} - -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (m *AzureFileVolumeSource) String() string { return proto.CompactTextString(m) } -func (*AzureFileVolumeSource) ProtoMessage() {} - -func (m *Binding) Reset() { *m = Binding{} } -func (m *Binding) String() string { return proto.CompactTextString(m) } -func (*Binding) ProtoMessage() {} - -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (m *Capabilities) String() string { return proto.CompactTextString(m) } -func (*Capabilities) ProtoMessage() {} - -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (m *CephFSVolumeSource) String() string { return proto.CompactTextString(m) } -func (*CephFSVolumeSource) ProtoMessage() {} - -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (m *CinderVolumeSource) String() string { return proto.CompactTextString(m) } -func (*CinderVolumeSource) ProtoMessage() {} - -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (m *ComponentCondition) String() string { return proto.CompactTextString(m) } -func (*ComponentCondition) ProtoMessage() {} - -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (m *ComponentStatus) String() string { return proto.CompactTextString(m) } -func (*ComponentStatus) ProtoMessage() {} - -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (m *ComponentStatusList) String() string { return proto.CompactTextString(m) } -func (*ComponentStatusList) ProtoMessage() {} - -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (m *ConfigMap) String() string { return proto.CompactTextString(m) } -func (*ConfigMap) ProtoMessage() {} - -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (m *ConfigMapKeySelector) String() string { return proto.CompactTextString(m) } -func (*ConfigMapKeySelector) ProtoMessage() {} - -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (m *ConfigMapList) String() string { return proto.CompactTextString(m) } -func (*ConfigMapList) ProtoMessage() {} - -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (m *ConfigMapVolumeSource) String() string { return proto.CompactTextString(m) } -func (*ConfigMapVolumeSource) ProtoMessage() {} - -func (m *Container) Reset() { *m = Container{} } -func (m *Container) String() string { return proto.CompactTextString(m) } -func (*Container) ProtoMessage() {} - -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (m *ContainerImage) String() string { return proto.CompactTextString(m) } -func (*ContainerImage) ProtoMessage() {} - -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (m *ContainerPort) String() string { return proto.CompactTextString(m) } -func (*ContainerPort) ProtoMessage() {} - -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (m *ContainerState) String() string { return proto.CompactTextString(m) } -func (*ContainerState) ProtoMessage() {} - -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (m *ContainerStateRunning) String() string { return proto.CompactTextString(m) } -func (*ContainerStateRunning) ProtoMessage() {} - -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (m *ContainerStateTerminated) String() string { return proto.CompactTextString(m) } -func (*ContainerStateTerminated) ProtoMessage() {} - -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (m *ContainerStateWaiting) String() string { return proto.CompactTextString(m) } -func (*ContainerStateWaiting) ProtoMessage() {} - -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (m *ContainerStatus) String() string { return proto.CompactTextString(m) } -func (*ContainerStatus) ProtoMessage() {} - -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (m *DaemonEndpoint) String() string { return proto.CompactTextString(m) } -func (*DaemonEndpoint) ProtoMessage() {} - -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (m *DeleteOptions) String() string { return proto.CompactTextString(m) } -func (*DeleteOptions) ProtoMessage() {} - -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (m *DownwardAPIVolumeFile) String() string { return proto.CompactTextString(m) } -func (*DownwardAPIVolumeFile) ProtoMessage() {} - -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (m *DownwardAPIVolumeSource) String() string { return proto.CompactTextString(m) } -func (*DownwardAPIVolumeSource) ProtoMessage() {} - -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (m *EmptyDirVolumeSource) String() string { return proto.CompactTextString(m) } -func (*EmptyDirVolumeSource) ProtoMessage() {} - -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (m *EndpointAddress) String() string { return proto.CompactTextString(m) } -func (*EndpointAddress) ProtoMessage() {} - -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (m *EndpointPort) String() string { return proto.CompactTextString(m) } -func (*EndpointPort) ProtoMessage() {} - -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (m *EndpointSubset) String() string { return proto.CompactTextString(m) } -func (*EndpointSubset) ProtoMessage() {} - -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (m *Endpoints) String() string { return proto.CompactTextString(m) } -func (*Endpoints) ProtoMessage() {} - -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (m *EndpointsList) String() string { return proto.CompactTextString(m) } -func (*EndpointsList) ProtoMessage() {} - -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (m *EnvVar) String() string { return proto.CompactTextString(m) } -func (*EnvVar) ProtoMessage() {} - -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (m *EnvVarSource) String() string { return proto.CompactTextString(m) } -func (*EnvVarSource) ProtoMessage() {} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} - -func (m *EventList) Reset() { *m = EventList{} } -func (m *EventList) String() string { return proto.CompactTextString(m) } -func (*EventList) ProtoMessage() {} - -func (m *EventSource) Reset() { *m = EventSource{} } -func (m *EventSource) String() string { return proto.CompactTextString(m) } -func (*EventSource) ProtoMessage() {} - -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (m *ExecAction) String() string { return proto.CompactTextString(m) } -func (*ExecAction) ProtoMessage() {} - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (m *ExportOptions) String() string { return proto.CompactTextString(m) } -func (*ExportOptions) ProtoMessage() {} - -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (m *FCVolumeSource) String() string { return proto.CompactTextString(m) } -func (*FCVolumeSource) ProtoMessage() {} - -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (m *FlexVolumeSource) String() string { return proto.CompactTextString(m) } -func (*FlexVolumeSource) ProtoMessage() {} - -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (m *FlockerVolumeSource) String() string { return proto.CompactTextString(m) } -func (*FlockerVolumeSource) ProtoMessage() {} - -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (m *GCEPersistentDiskVolumeSource) String() string { return proto.CompactTextString(m) } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} - -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (m *GitRepoVolumeSource) String() string { return proto.CompactTextString(m) } -func (*GitRepoVolumeSource) ProtoMessage() {} - -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (m *GlusterfsVolumeSource) String() string { return proto.CompactTextString(m) } -func (*GlusterfsVolumeSource) ProtoMessage() {} - -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (m *HTTPGetAction) String() string { return proto.CompactTextString(m) } -func (*HTTPGetAction) ProtoMessage() {} - -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (m *HTTPHeader) String() string { return proto.CompactTextString(m) } -func (*HTTPHeader) ProtoMessage() {} - -func (m *Handler) Reset() { *m = Handler{} } -func (m *Handler) String() string { return proto.CompactTextString(m) } -func (*Handler) ProtoMessage() {} - -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (m *HostPathVolumeSource) String() string { return proto.CompactTextString(m) } -func (*HostPathVolumeSource) ProtoMessage() {} - -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (m *ISCSIVolumeSource) String() string { return proto.CompactTextString(m) } -func (*ISCSIVolumeSource) ProtoMessage() {} - -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (m *KeyToPath) String() string { return proto.CompactTextString(m) } -func (*KeyToPath) ProtoMessage() {} - -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (m *Lifecycle) String() string { return proto.CompactTextString(m) } -func (*Lifecycle) ProtoMessage() {} - -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (m *LimitRange) String() string { return proto.CompactTextString(m) } -func (*LimitRange) ProtoMessage() {} - -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (m *LimitRangeItem) String() string { return proto.CompactTextString(m) } -func (*LimitRangeItem) ProtoMessage() {} - -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (m *LimitRangeList) String() string { return proto.CompactTextString(m) } -func (*LimitRangeList) ProtoMessage() {} - -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (m *LimitRangeSpec) String() string { return proto.CompactTextString(m) } -func (*LimitRangeSpec) ProtoMessage() {} - -func (m *List) Reset() { *m = List{} } -func (m *List) String() string { return proto.CompactTextString(m) } -func (*List) ProtoMessage() {} - -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (m *ListOptions) String() string { return proto.CompactTextString(m) } -func (*ListOptions) ProtoMessage() {} - -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (m *LoadBalancerIngress) String() string { return proto.CompactTextString(m) } -func (*LoadBalancerIngress) ProtoMessage() {} - -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (m *LoadBalancerStatus) String() string { return proto.CompactTextString(m) } -func (*LoadBalancerStatus) ProtoMessage() {} - -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (m *LocalObjectReference) String() string { return proto.CompactTextString(m) } -func (*LocalObjectReference) ProtoMessage() {} - -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (m *NFSVolumeSource) String() string { return proto.CompactTextString(m) } -func (*NFSVolumeSource) ProtoMessage() {} - -func (m *Namespace) Reset() { *m = Namespace{} } -func (m *Namespace) String() string { return proto.CompactTextString(m) } -func (*Namespace) ProtoMessage() {} - -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (m *NamespaceList) String() string { return proto.CompactTextString(m) } -func (*NamespaceList) ProtoMessage() {} - -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (m *NamespaceSpec) String() string { return proto.CompactTextString(m) } -func (*NamespaceSpec) ProtoMessage() {} - -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (m *NamespaceStatus) String() string { return proto.CompactTextString(m) } -func (*NamespaceStatus) ProtoMessage() {} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} - -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (m *NodeAddress) String() string { return proto.CompactTextString(m) } -func (*NodeAddress) ProtoMessage() {} - -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (m *NodeAffinity) String() string { return proto.CompactTextString(m) } -func (*NodeAffinity) ProtoMessage() {} - -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (m *NodeCondition) String() string { return proto.CompactTextString(m) } -func (*NodeCondition) ProtoMessage() {} - -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (m *NodeDaemonEndpoints) String() string { return proto.CompactTextString(m) } -func (*NodeDaemonEndpoints) ProtoMessage() {} - -func (m *NodeList) Reset() { *m = NodeList{} } -func (m *NodeList) String() string { return proto.CompactTextString(m) } -func (*NodeList) ProtoMessage() {} - -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (m *NodeProxyOptions) String() string { return proto.CompactTextString(m) } -func (*NodeProxyOptions) ProtoMessage() {} - -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (m *NodeSelector) String() string { return proto.CompactTextString(m) } -func (*NodeSelector) ProtoMessage() {} - -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (m *NodeSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*NodeSelectorRequirement) ProtoMessage() {} - -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (m *NodeSelectorTerm) String() string { return proto.CompactTextString(m) } -func (*NodeSelectorTerm) ProtoMessage() {} - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (m *NodeSpec) String() string { return proto.CompactTextString(m) } -func (*NodeSpec) ProtoMessage() {} - -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (m *NodeStatus) String() string { return proto.CompactTextString(m) } -func (*NodeStatus) ProtoMessage() {} - -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (m *NodeSystemInfo) String() string { return proto.CompactTextString(m) } -func (*NodeSystemInfo) ProtoMessage() {} - -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (m *ObjectFieldSelector) String() string { return proto.CompactTextString(m) } -func (*ObjectFieldSelector) ProtoMessage() {} - -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } -func (*ObjectMeta) ProtoMessage() {} - -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (m *ObjectReference) String() string { return proto.CompactTextString(m) } -func (*ObjectReference) ProtoMessage() {} - -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (m *OwnerReference) String() string { return proto.CompactTextString(m) } -func (*OwnerReference) ProtoMessage() {} - -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (m *PersistentVolume) String() string { return proto.CompactTextString(m) } -func (*PersistentVolume) ProtoMessage() {} - -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (m *PersistentVolumeClaim) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeClaim) ProtoMessage() {} - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (m *PersistentVolumeClaimList) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeClaimList) ProtoMessage() {} - -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (m *PersistentVolumeClaimSpec) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (m *PersistentVolumeClaimStatus) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (m *PersistentVolumeClaimVolumeSource) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} - -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (m *PersistentVolumeList) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeList) ProtoMessage() {} - -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (m *PersistentVolumeSource) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeSource) ProtoMessage() {} - -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (m *PersistentVolumeSpec) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeSpec) ProtoMessage() {} - -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (m *PersistentVolumeStatus) String() string { return proto.CompactTextString(m) } -func (*PersistentVolumeStatus) ProtoMessage() {} - -func (m *Pod) Reset() { *m = Pod{} } -func (m *Pod) String() string { return proto.CompactTextString(m) } -func (*Pod) ProtoMessage() {} - -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (m *PodAffinity) String() string { return proto.CompactTextString(m) } -func (*PodAffinity) ProtoMessage() {} - -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (m *PodAffinityTerm) String() string { return proto.CompactTextString(m) } -func (*PodAffinityTerm) ProtoMessage() {} - -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (m *PodAntiAffinity) String() string { return proto.CompactTextString(m) } -func (*PodAntiAffinity) ProtoMessage() {} - -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (m *PodAttachOptions) String() string { return proto.CompactTextString(m) } -func (*PodAttachOptions) ProtoMessage() {} - -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (m *PodCondition) String() string { return proto.CompactTextString(m) } -func (*PodCondition) ProtoMessage() {} - -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (m *PodExecOptions) String() string { return proto.CompactTextString(m) } -func (*PodExecOptions) ProtoMessage() {} - -func (m *PodList) Reset() { *m = PodList{} } -func (m *PodList) String() string { return proto.CompactTextString(m) } -func (*PodList) ProtoMessage() {} - -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (m *PodLogOptions) String() string { return proto.CompactTextString(m) } -func (*PodLogOptions) ProtoMessage() {} - -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (m *PodProxyOptions) String() string { return proto.CompactTextString(m) } -func (*PodProxyOptions) ProtoMessage() {} - -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (m *PodSecurityContext) String() string { return proto.CompactTextString(m) } -func (*PodSecurityContext) ProtoMessage() {} - -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (m *PodSpec) String() string { return proto.CompactTextString(m) } -func (*PodSpec) ProtoMessage() {} - -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (m *PodStatus) String() string { return proto.CompactTextString(m) } -func (*PodStatus) ProtoMessage() {} - -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (m *PodStatusResult) String() string { return proto.CompactTextString(m) } -func (*PodStatusResult) ProtoMessage() {} - -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (m *PodTemplate) String() string { return proto.CompactTextString(m) } -func (*PodTemplate) ProtoMessage() {} - -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (m *PodTemplateList) String() string { return proto.CompactTextString(m) } -func (*PodTemplateList) ProtoMessage() {} - -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (m *PodTemplateSpec) String() string { return proto.CompactTextString(m) } -func (*PodTemplateSpec) ProtoMessage() {} - -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (m *Preconditions) String() string { return proto.CompactTextString(m) } -func (*Preconditions) ProtoMessage() {} - -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (m *PreferredSchedulingTerm) String() string { return proto.CompactTextString(m) } -func (*PreferredSchedulingTerm) ProtoMessage() {} - -func (m *Probe) Reset() { *m = Probe{} } -func (m *Probe) String() string { return proto.CompactTextString(m) } -func (*Probe) ProtoMessage() {} - -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (m *RBDVolumeSource) String() string { return proto.CompactTextString(m) } -func (*RBDVolumeSource) ProtoMessage() {} - -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (m *RangeAllocation) String() string { return proto.CompactTextString(m) } -func (*RangeAllocation) ProtoMessage() {} - -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (m *ReplicationController) String() string { return proto.CompactTextString(m) } -func (*ReplicationController) ProtoMessage() {} - -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (m *ReplicationControllerList) String() string { return proto.CompactTextString(m) } -func (*ReplicationControllerList) ProtoMessage() {} - -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (m *ReplicationControllerSpec) String() string { return proto.CompactTextString(m) } -func (*ReplicationControllerSpec) ProtoMessage() {} - -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (m *ReplicationControllerStatus) String() string { return proto.CompactTextString(m) } -func (*ReplicationControllerStatus) ProtoMessage() {} - -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (m *ResourceFieldSelector) String() string { return proto.CompactTextString(m) } -func (*ResourceFieldSelector) ProtoMessage() {} - -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (m *ResourceQuota) String() string { return proto.CompactTextString(m) } -func (*ResourceQuota) ProtoMessage() {} - -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (m *ResourceQuotaList) String() string { return proto.CompactTextString(m) } -func (*ResourceQuotaList) ProtoMessage() {} - -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (m *ResourceQuotaSpec) String() string { return proto.CompactTextString(m) } -func (*ResourceQuotaSpec) ProtoMessage() {} - -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (m *ResourceQuotaStatus) String() string { return proto.CompactTextString(m) } -func (*ResourceQuotaStatus) ProtoMessage() {} - -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (m *ResourceRequirements) String() string { return proto.CompactTextString(m) } -func (*ResourceRequirements) ProtoMessage() {} - -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (m *SELinuxOptions) String() string { return proto.CompactTextString(m) } -func (*SELinuxOptions) ProtoMessage() {} - -func (m *Secret) Reset() { *m = Secret{} } -func (m *Secret) String() string { return proto.CompactTextString(m) } -func (*Secret) ProtoMessage() {} - -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (m *SecretKeySelector) String() string { return proto.CompactTextString(m) } -func (*SecretKeySelector) ProtoMessage() {} - -func (m *SecretList) Reset() { *m = SecretList{} } -func (m *SecretList) String() string { return proto.CompactTextString(m) } -func (*SecretList) ProtoMessage() {} - -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (m *SecretVolumeSource) String() string { return proto.CompactTextString(m) } -func (*SecretVolumeSource) ProtoMessage() {} - -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (m *SecurityContext) String() string { return proto.CompactTextString(m) } -func (*SecurityContext) ProtoMessage() {} - -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (m *SerializedReference) String() string { return proto.CompactTextString(m) } -func (*SerializedReference) ProtoMessage() {} - -func (m *Service) Reset() { *m = Service{} } -func (m *Service) String() string { return proto.CompactTextString(m) } -func (*Service) ProtoMessage() {} - -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (m *ServiceAccount) String() string { return proto.CompactTextString(m) } -func (*ServiceAccount) ProtoMessage() {} - -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (m *ServiceAccountList) String() string { return proto.CompactTextString(m) } -func (*ServiceAccountList) ProtoMessage() {} - -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (m *ServiceList) String() string { return proto.CompactTextString(m) } -func (*ServiceList) ProtoMessage() {} - -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (m *ServicePort) String() string { return proto.CompactTextString(m) } -func (*ServicePort) ProtoMessage() {} - -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (m *ServiceProxyOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceProxyOptions) ProtoMessage() {} - -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (m *ServiceSpec) String() string { return proto.CompactTextString(m) } -func (*ServiceSpec) ProtoMessage() {} - -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (m *ServiceStatus) String() string { return proto.CompactTextString(m) } -func (*ServiceStatus) ProtoMessage() {} - -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (m *TCPSocketAction) String() string { return proto.CompactTextString(m) } -func (*TCPSocketAction) ProtoMessage() {} - -func (m *Taint) Reset() { *m = Taint{} } -func (m *Taint) String() string { return proto.CompactTextString(m) } -func (*Taint) ProtoMessage() {} - -func (m *Toleration) Reset() { *m = Toleration{} } -func (m *Toleration) String() string { return proto.CompactTextString(m) } -func (*Toleration) ProtoMessage() {} - -func (m *Volume) Reset() { *m = Volume{} } -func (m *Volume) String() string { return proto.CompactTextString(m) } -func (*Volume) ProtoMessage() {} - -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (m *VolumeMount) String() string { return proto.CompactTextString(m) } -func (*VolumeMount) ProtoMessage() {} - -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (m *VolumeSource) String() string { return proto.CompactTextString(m) } -func (*VolumeSource) ProtoMessage() {} - -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (m *VsphereVirtualDiskVolumeSource) String() string { return proto.CompactTextString(m) } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} - -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (m *WeightedPodAffinityTerm) String() string { return proto.CompactTextString(m) } -func (*WeightedPodAffinityTerm) ProtoMessage() {} - -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.kubernetes.pkg.api.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.AttachedVolume") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.kubernetes.pkg.api.v1.Binding") - proto.RegisterType((*Capabilities)(nil), "k8s.io.kubernetes.pkg.api.v1.Capabilities") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.CephFSVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.CinderVolumeSource") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.kubernetes.pkg.api.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMap") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapList") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.kubernetes.pkg.api.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.kubernetes.pkg.api.v1.DaemonEndpoint") - proto.RegisterType((*DeleteOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.DeleteOptions") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.kubernetes.pkg.api.v1.EndpointsList") - proto.RegisterType((*EnvVar)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.api.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.kubernetes.pkg.api.v1.EventList") - proto.RegisterType((*EventSource)(nil), "k8s.io.kubernetes.pkg.api.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.kubernetes.pkg.api.v1.ExecAction") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ExportOptions") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FCVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.kubernetes.pkg.api.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.kubernetes.pkg.api.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.kubernetes.pkg.api.v1.Handler") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.kubernetes.pkg.api.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.kubernetes.pkg.api.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.kubernetes.pkg.api.v1.List") - proto.RegisterType((*ListOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ListOptions") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.kubernetes.pkg.api.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.LocalObjectReference") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.kubernetes.pkg.api.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.kubernetes.pkg.api.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeCondition") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeProxyOptions") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.kubernetes.pkg.api.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectMeta)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectMeta") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.kubernetes.pkg.api.v1.ObjectReference") - proto.RegisterType((*OwnerReference)(nil), "k8s.io.kubernetes.pkg.api.v1.OwnerReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PersistentVolumeStatus") - proto.RegisterType((*Pod)(nil), "k8s.io.kubernetes.pkg.api.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.kubernetes.pkg.api.v1.PodCondition") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodLogOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.PodProxyOptions") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSecurityContext") - proto.RegisterType((*PodSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.kubernetes.pkg.api.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec") - proto.RegisterType((*Preconditions)(nil), "k8s.io.kubernetes.pkg.api.v1.Preconditions") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.kubernetes.pkg.api.v1.Probe") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.kubernetes.pkg.api.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.kubernetes.pkg.api.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.SELinuxOptions") - proto.RegisterType((*Secret)(nil), "k8s.io.kubernetes.pkg.api.v1.Secret") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretList") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.kubernetes.pkg.api.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.kubernetes.pkg.api.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.kubernetes.pkg.api.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceAccountList") - proto.RegisterType((*ServiceList)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.kubernetes.pkg.api.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.kubernetes.pkg.api.v1.ServiceStatus") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.kubernetes.pkg.api.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.kubernetes.pkg.api.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.kubernetes.pkg.api.v1.Toleration") - proto.RegisterType((*Volume)(nil), "k8s.io.kubernetes.pkg.api.v1.Volume") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeMount") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.kubernetes.pkg.api.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.kubernetes.pkg.api.v1.WeightedPodAffinityTerm") -} -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) - i += copy(data[i:], m.VolumeID) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Partition)) - data[i] = 0x20 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *Affinity) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Affinity) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeAffinity != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PodAffinity != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PodAntiAffinity != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *AttachedVolume) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *AttachedVolume) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DevicePath))) - i += copy(data[i:], m.DevicePath) - return i, nil -} - -func (m *AzureFileVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *AzureFileVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ShareName))) - i += copy(data[i:], m.ShareName) - data[i] = 0x18 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *Binding) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Binding) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *Capabilities) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Capabilities) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *CephFSVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CephFSVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretFile))) - i += copy(data[i:], m.SecretFile) - if m.SecretRef != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n6, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - } - data[i] = 0x30 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *CinderVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CinderVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeID))) - i += copy(data[i:], m.VolumeID) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *ComponentCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ComponentCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Error))) - i += copy(data[i:], m.Error) - return i, nil -} - -func (m *ComponentStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ComponentStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ComponentStatusList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ComponentStatusList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ConfigMap) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfigMap) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - if len(m.Data) > 0 { - for k := range m.Data { - data[i] = 0x12 - i++ - v := m.Data[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *ConfigMapKeySelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfigMapKeySelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n10, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - return i, nil -} - -func (m *ConfigMapList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfigMapList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ConfigMapVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfigMapVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n12, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Container) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Container) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Image))) - i += copy(data[i:], m.Image) - if len(m.Command) > 0 { - for _, s := range m.Command { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.WorkingDir))) - i += copy(data[i:], m.WorkingDir) - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n13, err := m.Resources.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LivenessProbe != nil { - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LivenessProbe.Size())) - n14, err := m.LivenessProbe.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.ReadinessProbe != nil { - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ReadinessProbe.Size())) - n15, err := m.ReadinessProbe.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.Lifecycle != nil { - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Lifecycle.Size())) - n16, err := m.Lifecycle.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - } - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TerminationMessagePath))) - i += copy(data[i:], m.TerminationMessagePath) - data[i] = 0x72 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ImagePullPolicy))) - i += copy(data[i:], m.ImagePullPolicy) - if m.SecurityContext != nil { - data[i] = 0x7a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n17, err := m.SecurityContext.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n17 - } - data[i] = 0x80 - i++ - data[i] = 0x1 - i++ - if m.Stdin { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x88 - i++ - data[i] = 0x1 - i++ - if m.StdinOnce { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x90 - i++ - data[i] = 0x1 - i++ - if m.TTY { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *ContainerImage) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerImage) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SizeBytes)) - return i, nil -} - -func (m *ContainerPort) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerPort) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.HostPort)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ContainerPort)) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) - i += copy(data[i:], m.Protocol) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) - i += copy(data[i:], m.HostIP) - return i, nil -} - -func (m *ContainerState) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerState) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Waiting != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Waiting.Size())) - n18, err := m.Waiting.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Running != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Running.Size())) - n19, err := m.Running.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.Terminated != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Terminated.Size())) - n20, err := m.Terminated.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n20 - } - return i, nil -} - -func (m *ContainerStateRunning) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStateRunning) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n21, err := m.StartedAt.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n21 - return i, nil -} - -func (m *ContainerStateTerminated) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStateTerminated) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ExitCode)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Signal)) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartedAt.Size())) - n22, err := m.StartedAt.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n22 - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FinishedAt.Size())) - n23, err := m.FinishedAt.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n23 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) - i += copy(data[i:], m.ContainerID) - return i, nil -} - -func (m *ContainerStateWaiting) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStateWaiting) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ContainerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ContainerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.State.Size())) - n24, err := m.State.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n24 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTerminationState.Size())) - n25, err := m.LastTerminationState.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n25 - data[i] = 0x20 - i++ - if m.Ready { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RestartCount)) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Image))) - i += copy(data[i:], m.Image) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ImageID))) - i += copy(data[i:], m.ImageID) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerID))) - i += copy(data[i:], m.ContainerID) - return i, nil -} - -func (m *DaemonEndpoint) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonEndpoint) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port)) - return i, nil -} - -func (m *DeleteOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeleteOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GracePeriodSeconds != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Preconditions.Size())) - n26, err := m.Preconditions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n26 - } - if m.OrphanDependents != nil { - data[i] = 0x18 - i++ - if *m.OrphanDependents { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *DownwardAPIVolumeFile) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DownwardAPIVolumeFile) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - if m.FieldRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n27, err := m.FieldRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.ResourceFieldRef != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) - n28, err := m.ResourceFieldRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n28 - } - return i, nil -} - -func (m *DownwardAPIVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DownwardAPIVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EmptyDirVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EmptyDirVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Medium))) - i += copy(data[i:], m.Medium) - return i, nil -} - -func (m *EndpointAddress) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointAddress) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.IP))) - i += copy(data[i:], m.IP) - if m.TargetRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetRef.Size())) - n29, err := m.TargetRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n29 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) - i += copy(data[i:], m.Hostname) - return i, nil -} - -func (m *EndpointPort) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointPort) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port)) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) - i += copy(data[i:], m.Protocol) - return i, nil -} - -func (m *EndpointSubset) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointSubset) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Endpoints) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Endpoints) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n30, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n30 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EndpointsList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EndpointsList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n31, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n31 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EnvVar) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EnvVar) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - if m.ValueFrom != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ValueFrom.Size())) - n32, err := m.ValueFrom.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n32 - } - return i, nil -} - -func (m *EnvVarSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EnvVarSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.FieldRef != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.FieldRef.Size())) - n33, err := m.FieldRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n33 - } - if m.ResourceFieldRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ResourceFieldRef.Size())) - n34, err := m.ResourceFieldRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if m.ConfigMapKeyRef != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ConfigMapKeyRef.Size())) - n35, err := m.ConfigMapKeyRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if m.SecretKeyRef != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretKeyRef.Size())) - n36, err := m.SecretKeyRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n36 - } - return i, nil -} - -func (m *Event) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Event) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n37, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n37 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.InvolvedObject.Size())) - n38, err := m.InvolvedObject.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n38 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Source.Size())) - n39, err := m.Source.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n39 - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FirstTimestamp.Size())) - n40, err := m.FirstTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n40 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTimestamp.Size())) - n41, err := m.LastTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n41 - data[i] = 0x40 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Count)) - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - return i, nil -} - -func (m *EventList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EventList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n42, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n42 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EventSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *EventSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Component))) - i += copy(data[i:], m.Component) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) - return i, nil -} - -func (m *ExecAction) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExecAction) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ExportOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Export { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Exact { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *FCVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *FCVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if m.Lun != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Lun)) - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x20 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *FlexVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *FlexVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Driver))) - i += copy(data[i:], m.Driver) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - if m.SecretRef != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n43, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n43 - } - data[i] = 0x20 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if len(m.Options) > 0 { - for k := range m.Options { - data[i] = 0x2a - i++ - v := m.Options[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *FlockerVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *FlockerVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DatasetName))) - i += copy(data[i:], m.DatasetName) - return i, nil -} - -func (m *GCEPersistentDiskVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GCEPersistentDiskVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PDName))) - i += copy(data[i:], m.PDName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Partition)) - data[i] = 0x20 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *GitRepoVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GitRepoVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Repository))) - i += copy(data[i:], m.Repository) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Revision))) - i += copy(data[i:], m.Revision) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Directory))) - i += copy(data[i:], m.Directory) - return i, nil -} - -func (m *GlusterfsVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *GlusterfsVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.EndpointsName))) - i += copy(data[i:], m.EndpointsName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x18 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *HTTPGetAction) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HTTPGetAction) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n44, err := m.Port.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n44 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Scheme))) - i += copy(data[i:], m.Scheme) - if len(m.HTTPHeaders) > 0 { - for _, msg := range m.HTTPHeaders { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HTTPHeader) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HTTPHeader) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - return i, nil -} - -func (m *Handler) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Handler) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Exec != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Exec.Size())) - n45, err := m.Exec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n45 - } - if m.HTTPGet != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.HTTPGet.Size())) - n46, err := m.HTTPGet.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n46 - } - if m.TCPSocket != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.TCPSocket.Size())) - n47, err := m.TCPSocket.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n47 - } - return i, nil -} - -func (m *HostPathVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HostPathVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - return i, nil -} - -func (m *ISCSIVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ISCSIVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TargetPortal))) - i += copy(data[i:], m.TargetPortal) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.IQN))) - i += copy(data[i:], m.IQN) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Lun)) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ISCSIInterface))) - i += copy(data[i:], m.ISCSIInterface) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x30 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *KeyToPath) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *KeyToPath) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - return i, nil -} - -func (m *Lifecycle) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Lifecycle) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PostStart != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.PostStart.Size())) - n48, err := m.PostStart.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n48 - } - if m.PreStop != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PreStop.Size())) - n49, err := m.PreStop.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n49 - } - return i, nil -} - -func (m *LimitRange) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRange) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n50, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n50 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n51, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n51 - return i, nil -} - -func (m *LimitRangeItem) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRangeItem) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if len(m.Max) > 0 { - for k := range m.Max { - data[i] = 0x12 - i++ - v := m.Max[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n52, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n52 - } - } - if len(m.Min) > 0 { - for k := range m.Min { - data[i] = 0x1a - i++ - v := m.Min[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n53, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n53 - } - } - if len(m.Default) > 0 { - for k := range m.Default { - data[i] = 0x22 - i++ - v := m.Default[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n54, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n54 - } - } - if len(m.DefaultRequest) > 0 { - for k := range m.DefaultRequest { - data[i] = 0x2a - i++ - v := m.DefaultRequest[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n55, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n55 - } - } - if len(m.MaxLimitRequestRatio) > 0 { - for k := range m.MaxLimitRequestRatio { - data[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n56, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n56 - } - } - return i, nil -} - -func (m *LimitRangeList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRangeList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n57, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n57 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LimitRangeSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LimitRangeSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *List) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *List) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n58, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n58 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ListOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ListOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) - i += copy(data[i:], m.LabelSelector) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) - i += copy(data[i:], m.FieldSelector) - data[i] = 0x18 - i++ - if m.Watch { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) - } - return i, nil -} - -func (m *LoadBalancerIngress) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LoadBalancerIngress) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.IP))) - i += copy(data[i:], m.IP) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) - i += copy(data[i:], m.Hostname) - return i, nil -} - -func (m *LoadBalancerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LoadBalancerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LocalObjectReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LocalObjectReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - return i, nil -} - -func (m *NFSVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NFSVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Server))) - i += copy(data[i:], m.Server) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x18 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *Namespace) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Namespace) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n59 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n60 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n61 - return i, nil -} - -func (m *NamespaceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NamespaceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n62, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n62 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NamespaceSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NamespaceSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *NamespaceStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NamespaceStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - return i, nil -} - -func (m *Node) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Node) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n63, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n63 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n64, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n64 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n65, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n65 - return i, nil -} - -func (m *NodeAddress) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeAddress) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Address))) - i += copy(data[i:], m.Address) - return i, nil -} - -func (m *NodeAffinity) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeAffinity) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n66, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n66 - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastHeartbeatTime.Size())) - n67, err := m.LastHeartbeatTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n67 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n68, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n68 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *NodeDaemonEndpoints) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeDaemonEndpoints) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.KubeletEndpoint.Size())) - n69, err := m.KubeletEndpoint.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n69 - return i, nil -} - -func (m *NodeList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n70, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n70 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeProxyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeProxyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - return i, nil -} - -func (m *NodeSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, msg := range m.NodeSelectorTerms { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeSelectorRequirement) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeSelectorRequirement) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *NodeSelectorTerm) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeSelectorTerm) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PodCIDR))) - i += copy(data[i:], m.PodCIDR) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ExternalID))) - i += copy(data[i:], m.ExternalID) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ProviderID))) - i += copy(data[i:], m.ProviderID) - data[i] = 0x20 - i++ - if m.Unschedulable { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *NodeStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - for k := range m.Capacity { - data[i] = 0xa - i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n71 - } - } - if len(m.Allocatable) > 0 { - for k := range m.Allocatable { - data[i] = 0x12 - i++ - v := m.Allocatable[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n72 - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DaemonEndpoints.Size())) - n73, err := m.DaemonEndpoints.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n73 - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.NodeInfo.Size())) - n74, err := m.NodeInfo.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n74 - if len(m.Images) > 0 { - for _, msg := range m.Images { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - data[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.VolumesAttached) > 0 { - for _, msg := range m.VolumesAttached { - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeSystemInfo) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NodeSystemInfo) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MachineID))) - i += copy(data[i:], m.MachineID) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SystemUUID))) - i += copy(data[i:], m.SystemUUID) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.BootID))) - i += copy(data[i:], m.BootID) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.KernelVersion))) - i += copy(data[i:], m.KernelVersion) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.OSImage))) - i += copy(data[i:], m.OSImage) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(data[i:], m.ContainerRuntimeVersion) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.KubeletVersion))) - i += copy(data[i:], m.KubeletVersion) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.KubeProxyVersion))) - i += copy(data[i:], m.KubeProxyVersion) - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.OperatingSystem))) - i += copy(data[i:], m.OperatingSystem) - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Architecture))) - i += copy(data[i:], m.Architecture) - return i, nil -} - -func (m *ObjectFieldSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectFieldSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) - i += copy(data[i:], m.FieldPath) - return i, nil -} - -func (m *ObjectMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.GenerateName))) - i += copy(data[i:], m.GenerateName) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SelfLink))) - i += copy(data[i:], m.SelfLink) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Generation)) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CreationTimestamp.Size())) - n75, err := m.CreationTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n75 - if m.DeletionTimestamp != nil { - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(m.DeletionTimestamp.Size())) - n76, err := m.DeletionTimestamp.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n76 - } - if m.DeletionGracePeriodSeconds != nil { - data[i] = 0x50 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - data[i] = 0x5a - i++ - v := m.Labels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - data[i] = 0x62 - i++ - v := m.Annotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - data[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ObjectReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ObjectReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldPath))) - i += copy(data[i:], m.FieldPath) - return i, nil -} - -func (m *OwnerReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *OwnerReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.UID))) - i += copy(data[i:], m.UID) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - if m.Controller != nil { - data[i] = 0x30 - i++ - if *m.Controller { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PersistentVolume) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolume) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n77, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n77 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n78, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n78 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n79, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n79 - return i, nil -} - -func (m *PersistentVolumeClaim) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaim) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n80, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n80 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n81, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n81 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n82, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n82 - return i, nil -} - -func (m *PersistentVolumeClaimList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n83, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n83 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeClaimSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Resources.Size())) - n84, err := m.Resources.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n84 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumeName))) - i += copy(data[i:], m.VolumeName) - if m.Selector != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n85, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n85 - } - return i, nil -} - -func (m *PersistentVolumeClaimStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Capacity) > 0 { - for k := range m.Capacity { - data[i] = 0x1a - i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n86, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n86 - } - } - return i, nil -} - -func (m *PersistentVolumeClaimVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClaimName))) - i += copy(data[i:], m.ClaimName) - data[i] = 0x10 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *PersistentVolumeList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n87, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n87 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GCEPersistentDisk != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n88, err := m.GCEPersistentDisk.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n88 - } - if m.AWSElasticBlockStore != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n89, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n89 - } - if m.HostPath != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n90, err := m.HostPath.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n90 - } - if m.Glusterfs != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n91, err := m.Glusterfs.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n91 - } - if m.NFS != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n92, err := m.NFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n92 - } - if m.RBD != nil { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n93, err := m.RBD.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n93 - } - if m.ISCSI != nil { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n94, err := m.ISCSI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n94 - } - if m.Cinder != nil { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n95, err := m.Cinder.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n95 - } - if m.CephFS != nil { - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n96, err := m.CephFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n96 - } - if m.FC != nil { - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n97, err := m.FC.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n97 - } - if m.Flocker != nil { - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n98, err := m.Flocker.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n98 - } - if m.FlexVolume != nil { - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n99, err := m.FlexVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n99 - } - if m.AzureFile != nil { - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n100, err := m.AzureFile.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n100 - } - if m.VsphereVolume != nil { - data[i] = 0x72 - i++ - i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) - n101, err := m.VsphereVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n101 - } - return i, nil -} - -func (m *PersistentVolumeSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - for k := range m.Capacity { - data[i] = 0xa - i++ - v := m.Capacity[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n102, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n102 - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeSource.Size())) - n103, err := m.PersistentVolumeSource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n103 - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if m.ClaimRef != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ClaimRef.Size())) - n104, err := m.ClaimRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n104 - } - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(data[i:], m.PersistentVolumeReclaimPolicy) - return i, nil -} - -func (m *PersistentVolumeStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PersistentVolumeStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - return i, nil -} - -func (m *Pod) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Pod) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n105, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n105 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n106, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n106 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n107, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n107 - return i, nil -} - -func (m *PodAffinity) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodAffinity) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodAffinityTerm) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodAffinityTerm) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LabelSelector != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LabelSelector.Size())) - n108, err := m.LabelSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n108 - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TopologyKey))) - i += copy(data[i:], m.TopologyKey) - return i, nil -} - -func (m *PodAntiAffinity) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodAntiAffinity) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodAttachOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodAttachOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Stdin { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Stdout { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Stderr { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x20 - i++ - if m.TTY { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) - return i, nil -} - -func (m *PodCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n109, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n109 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n110, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n110 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *PodExecOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodExecOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Stdin { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Stdout { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Stderr { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x20 - i++ - if m.TTY { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - data[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *PodList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n111, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n111 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodLogOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodLogOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Container))) - i += copy(data[i:], m.Container) - data[i] = 0x10 - i++ - if m.Follow { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x18 - i++ - if m.Previous { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if m.SinceSeconds != nil { - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SinceTime.Size())) - n112, err := m.SinceTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n112 - } - data[i] = 0x30 - i++ - if m.Timestamps { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if m.TailLines != nil { - data[i] = 0x38 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - data[i] = 0x40 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.LimitBytes)) - } - return i, nil -} - -func (m *PodProxyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodProxyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - return i, nil -} - -func (m *PodSecurityContext) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodSecurityContext) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.SELinuxOptions != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n113, err := m.SELinuxOptions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n113 - } - if m.RunAsUser != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - data[i] = 0x18 - i++ - if *m.RunAsNonRoot { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - if len(m.SupplementalGroups) > 0 { - for _, num := range m.SupplementalGroups { - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(num)) - } - } - if m.FSGroup != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.FSGroup)) - } - return i, nil -} - -func (m *PodSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RestartPolicy))) - i += copy(data[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DNSPolicy))) - i += copy(data[i:], m.DNSPolicy) - if len(m.NodeSelector) > 0 { - for k := range m.NodeSelector { - data[i] = 0x3a - i++ - v := m.NodeSelector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceAccountName))) - i += copy(data[i:], m.ServiceAccountName) - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(data[i:], m.DeprecatedServiceAccount) - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.NodeName))) - i += copy(data[i:], m.NodeName) - data[i] = 0x58 - i++ - if m.HostNetwork { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x60 - i++ - if m.HostPID { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x68 - i++ - if m.HostIPC { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if m.SecurityContext != nil { - data[i] = 0x72 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecurityContext.Size())) - n114, err := m.SecurityContext.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n114 - } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - data[i] = 0x7a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x82 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Hostname))) - i += copy(data[i:], m.Hostname) - data[i] = 0x8a - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Subdomain))) - i += copy(data[i:], m.Subdomain) - return i, nil -} - -func (m *PodStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Phase))) - i += copy(data[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.HostIP))) - i += copy(data[i:], m.HostIP) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.PodIP))) - i += copy(data[i:], m.PodIP) - if m.StartTime != nil { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n115, err := m.StartTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n115 - } - if len(m.ContainerStatuses) > 0 { - for _, msg := range m.ContainerStatuses { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodStatusResult) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodStatusResult) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n116, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n116 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n117, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n117 - return i, nil -} - -func (m *PodTemplate) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodTemplate) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n118, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n118 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n119, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n119 - return i, nil -} - -func (m *PodTemplateList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodTemplateList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n120, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n120 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodTemplateSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodTemplateSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n121, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n121 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n122, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n122 - return i, nil -} - -func (m *Preconditions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Preconditions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.UID != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.UID))) - i += copy(data[i:], *m.UID) - } - return i, nil -} - -func (m *PreferredSchedulingTerm) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PreferredSchedulingTerm) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Weight)) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Preference.Size())) - n123, err := m.Preference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n123 - return i, nil -} - -func (m *Probe) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Probe) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Handler.Size())) - n124, err := m.Handler.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n124 - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.InitialDelaySeconds)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TimeoutSeconds)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PeriodSeconds)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SuccessThreshold)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FailureThreshold)) - return i, nil -} - -func (m *RBDVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RBDVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RBDImage))) - i += copy(data[i:], m.RBDImage) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RBDPool))) - i += copy(data[i:], m.RBDPool) - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.RadosUser))) - i += copy(data[i:], m.RadosUser) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Keyring))) - i += copy(data[i:], m.Keyring) - if m.SecretRef != nil { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n125, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n125 - } - data[i] = 0x40 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *RangeAllocation) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RangeAllocation) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n126, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n126 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Range))) - i += copy(data[i:], m.Range) - if m.Data != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - return i, nil -} - -func (m *ReplicationController) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationController) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n127, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n127 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n128, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n128 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n129, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n129 - return i, nil -} - -func (m *ReplicationControllerList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n130, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n130 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicationControllerSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if m.Template != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n131, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n131 - } - return i, nil -} - -func (m *ReplicationControllerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - return i, nil -} - -func (m *ResourceFieldSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceFieldSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContainerName))) - i += copy(data[i:], m.ContainerName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Resource))) - i += copy(data[i:], m.Resource) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Divisor.Size())) - n132, err := m.Divisor.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n132 - return i, nil -} - -func (m *ResourceQuota) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuota) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n133, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n133 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n134, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n134 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n135, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n135 - return i, nil -} - -func (m *ResourceQuotaList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuotaList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n136, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n136 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ResourceQuotaSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuotaSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hard) > 0 { - for k := range m.Hard { - data[i] = 0xa - i++ - v := m.Hard[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n137, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n137 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ResourceQuotaStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceQuotaStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hard) > 0 { - for k := range m.Hard { - data[i] = 0xa - i++ - v := m.Hard[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n138, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n138 - } - } - if len(m.Used) > 0 { - for k := range m.Used { - data[i] = 0x12 - i++ - v := m.Used[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n139, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n139 - } - } - return i, nil -} - -func (m *ResourceRequirements) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ResourceRequirements) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for k := range m.Limits { - data[i] = 0xa - i++ - v := m.Limits[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n140, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n140 - } - } - if len(m.Requests) > 0 { - for k := range m.Requests { - data[i] = 0x12 - i++ - v := m.Requests[k] - msgSize := (&v).Size() - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize)) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64((&v).Size())) - n141, err := (&v).MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n141 - } - } - return i, nil -} - -func (m *SELinuxOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SELinuxOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.User))) - i += copy(data[i:], m.User) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Role))) - i += copy(data[i:], m.Role) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Level))) - i += copy(data[i:], m.Level) - return i, nil -} - -func (m *Secret) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Secret) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n142, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n142 - if len(m.Data) > 0 { - for k := range m.Data { - data[i] = 0x12 - i++ - v := m.Data[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - return i, nil -} - -func (m *SecretKeySelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretKeySelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LocalObjectReference.Size())) - n143, err := m.LocalObjectReference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n143 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - return i, nil -} - -func (m *SecretList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n144, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n144 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SecretVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecretVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SecurityContext) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SecurityContext) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Capabilities != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Capabilities.Size())) - n145, err := m.Capabilities.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n145 - } - if m.Privileged != nil { - data[i] = 0x10 - i++ - if *m.Privileged { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - if m.SELinuxOptions != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n146, err := m.SELinuxOptions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n146 - } - if m.RunAsUser != nil { - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - data[i] = 0x28 - i++ - if *m.RunAsNonRoot { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - data[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SerializedReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SerializedReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Reference.Size())) - n147, err := m.Reference.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n147 - return i, nil -} - -func (m *Service) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Service) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n148, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n148 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n149, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n149 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n150, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n150 - return i, nil -} - -func (m *ServiceAccount) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceAccount) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n151, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n151 - if len(m.Secrets) > 0 { - for _, msg := range m.Secrets { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ServiceAccountList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceAccountList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n152, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n152 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ServiceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n153, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n153 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ServicePort) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServicePort) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Protocol))) - i += copy(data[i:], m.Protocol) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port)) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetPort.Size())) - n154, err := m.TargetPort.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n154 - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NodePort)) - return i, nil -} - -func (m *ServiceProxyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceProxyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - return i, nil -} - -func (m *ServiceSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClusterIP))) - i += copy(data[i:], m.ClusterIP) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.DeprecatedPublicIPs) > 0 { - for _, s := range m.DeprecatedPublicIPs { - data[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SessionAffinity))) - i += copy(data[i:], m.SessionAffinity) - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.LoadBalancerIP))) - i += copy(data[i:], m.LoadBalancerIP) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - data[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ServiceStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServiceStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n155, err := m.LoadBalancer.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n155 - return i, nil -} - -func (m *TCPSocketAction) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TCPSocketAction) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n156, err := m.Port.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n156 - return i, nil -} - -func (m *Taint) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Taint) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) - i += copy(data[i:], m.Effect) - return i, nil -} - -func (m *Toleration) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Toleration) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Value))) - i += copy(data[i:], m.Value) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Effect))) - i += copy(data[i:], m.Effect) - return i, nil -} - -func (m *Volume) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Volume) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.VolumeSource.Size())) - n157, err := m.VolumeSource.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n157 - return i, nil -} - -func (m *VolumeMount) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *VolumeMount) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x10 - i++ - if m.ReadOnly { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.MountPath))) - i += copy(data[i:], m.MountPath) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SubPath))) - i += copy(data[i:], m.SubPath) - return i, nil -} - -func (m *VolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *VolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HostPath != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.HostPath.Size())) - n158, err := m.HostPath.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n158 - } - if m.EmptyDir != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.EmptyDir.Size())) - n159, err := m.EmptyDir.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n159 - } - if m.GCEPersistentDisk != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.GCEPersistentDisk.Size())) - n160, err := m.GCEPersistentDisk.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n160 - } - if m.AWSElasticBlockStore != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AWSElasticBlockStore.Size())) - n161, err := m.AWSElasticBlockStore.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n161 - } - if m.GitRepo != nil { - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.GitRepo.Size())) - n162, err := m.GitRepo.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n162 - } - if m.Secret != nil { - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Secret.Size())) - n163, err := m.Secret.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n163 - } - if m.NFS != nil { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(m.NFS.Size())) - n164, err := m.NFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n164 - } - if m.ISCSI != nil { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ISCSI.Size())) - n165, err := m.ISCSI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n165 - } - if m.Glusterfs != nil { - data[i] = 0x4a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Glusterfs.Size())) - n166, err := m.Glusterfs.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n166 - } - if m.PersistentVolumeClaim != nil { - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PersistentVolumeClaim.Size())) - n167, err := m.PersistentVolumeClaim.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n167 - } - if m.RBD != nil { - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RBD.Size())) - n168, err := m.RBD.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n168 - } - if m.FlexVolume != nil { - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FlexVolume.Size())) - n169, err := m.FlexVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n169 - } - if m.Cinder != nil { - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Cinder.Size())) - n170, err := m.Cinder.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n170 - } - if m.CephFS != nil { - data[i] = 0x72 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CephFS.Size())) - n171, err := m.CephFS.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n171 - } - if m.Flocker != nil { - data[i] = 0x7a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Flocker.Size())) - n172, err := m.Flocker.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n172 - } - if m.DownwardAPI != nil { - data[i] = 0x82 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DownwardAPI.Size())) - n173, err := m.DownwardAPI.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n173 - } - if m.FC != nil { - data[i] = 0x8a - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FC.Size())) - n174, err := m.FC.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n174 - } - if m.AzureFile != nil { - data[i] = 0x92 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AzureFile.Size())) - n175, err := m.AzureFile.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n175 - } - if m.ConfigMap != nil { - data[i] = 0x9a - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ConfigMap.Size())) - n176, err := m.ConfigMap.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n176 - } - if m.VsphereVolume != nil { - data[i] = 0xa2 - i++ - data[i] = 0x1 - i++ - i = encodeVarintGenerated(data, i, uint64(m.VsphereVolume.Size())) - n177, err := m.VsphereVolume.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n177 - } - return i, nil -} - -func (m *VsphereVirtualDiskVolumeSource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.VolumePath))) - i += copy(data[i:], m.VolumePath) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FSType))) - i += copy(data[i:], m.FSType) - return i, nil -} - -func (m *WeightedPodAffinityTerm) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *WeightedPodAffinityTerm) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Weight)) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodAffinityTerm.Size())) - n178, err := m.PodAffinityTerm.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n178 - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *Affinity) Size() (n int) { - var l int - _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AttachedVolume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AzureFileVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Binding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Capabilities) Size() (n int) { - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CephFSVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *CinderVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ComponentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ComponentStatus) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ComponentStatusList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ConfigMap) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ConfigMapKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ConfigMapList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ConfigMapVolumeSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Container) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 3 - n += 3 - n += 3 - return n -} - -func (m *ContainerImage) Size() (n int) { - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n -} - -func (m *ContainerPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerState) Size() (n int) { - var l int - _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ContainerStateRunning) Size() (n int) { - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateTerminated) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateWaiting) Size() (n int) { - var l int - _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonEndpoint) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - return n -} - -func (m *DeleteOptions) Size() (n int) { - var l int - _ = l - if m.GracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - l = m.Preconditions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.OrphanDependents != nil { - n += 2 - } - return n -} - -func (m *DownwardAPIVolumeFile) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *DownwardAPIVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EmptyDirVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointAddress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointSubset) Size() (n int) { - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Endpoints) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EndpointsList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EnvVar) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *EnvVarSource) Size() (n int) { - var l int - _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EventList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EventSource) Size() (n int) { - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExecAction) Size() (n int) { - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *FCVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) - } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *FlexVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *FlockerVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *GitRepoVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GlusterfsVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *HTTPGetAction) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPHeader) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Handler) Size() (n int) { - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HostPathVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ISCSIVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *KeyToPath) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Lifecycle) Size() (n int) { - var l int - _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *LimitRange) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LimitRangeItem) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *LimitRangeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LimitRangeSpec) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *List) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ListOptions) Size() (n int) { - var l int - _ = l - l = len(m.LabelSelector) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldSelector) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - return n -} - -func (m *LoadBalancerIngress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LoadBalancerStatus) Size() (n int) { - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LocalObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NFSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Namespace) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamespaceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamespaceSpec) Size() (n int) { - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamespaceStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Node) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeAddress) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeAffinity) Size() (n int) { - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeDaemonEndpoints) Size() (n int) { - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeSelector) Size() (n int) { - var l int - _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSpec) Size() (n int) { - var l int - _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *NodeStatus) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSystemInfo) Size() (n int) { - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectMeta) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GenerateName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Generation)) - l = m.CreationTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeletionTimestamp != nil { - l = m.DeletionTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DeletionGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.OwnerReferences) > 0 { - for _, e := range m.OwnerReferences { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *OwnerReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.Controller != nil { - n += 2 - } - return n -} - -func (m *PersistentVolume) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolumeClaim) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolumeClaimList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PersistentVolumeClaimSpec) Size() (n int) { - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PersistentVolumeClaimStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *PersistentVolumeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PersistentVolumeSource) Size() (n int) { - var l int - _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PersistentVolumeSpec) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolumeStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodAffinity) Size() (n int) { - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodAffinityTerm) Size() (n int) { - var l int - _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodAntiAffinity) Size() (n int) { - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodAttachOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodExecOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodLogOptions) Size() (n int) { - var l int - _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) - } - return n -} - -func (m *PodProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityContext) Size() (n int) { - var l int - _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) - } - } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) - } - return n -} - -func (m *PodSpec) Size() (n int) { - var l int - _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodStatusResult) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodTemplateList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Preconditions) Size() (n int) { - var l int - _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PreferredSchedulingTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Probe) Size() (n int) { - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n -} - -func (m *RBDVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicationController) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicationControllerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicationControllerSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicationControllerStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - return n -} - -func (m *ResourceFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceQuota) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceQuotaList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceQuotaSpec) Size() (n int) { - var l int - _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceQuotaStatus) Size() (n int) { - var l int - _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Used) > 0 { - for k, v := range m.Used { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ResourceRequirements) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SELinuxOptions) Size() (n int) { - var l int - _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Secret) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SecretKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SecretList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SecretVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SecurityContext) Size() (n int) { - var l int - _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Privileged != nil { - n += 2 - } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if m.ReadOnlyRootFilesystem != nil { - n += 2 - } - return n -} - -func (m *SerializedReference) Size() (n int) { - var l int - _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Service) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceAccount) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceAccountList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServicePort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) - return n -} - -func (m *ServiceProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceSpec) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.DeprecatedPublicIPs) > 0 { - for _, s := range m.DeprecatedPublicIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TCPSocketAction) Size() (n int) { - var l int - _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Taint) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Toleration) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Volume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeMount) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeSource) Size() (n int) { - var l int - _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WeightedPodAffinityTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Affinity) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} - } - if err := m.NodeAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} - } - if err := m.PodAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} - } - if err := m.PodAntiAffinity.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttachedVolume) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = UniqueVolumeName(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureFileVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShareName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Binding) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Capabilities) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Add = append(m.Add, Capability(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drop = append(m.Drop, Capability(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CephFSVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Monitors = append(m.Monitors, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretFile = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CinderVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ComponentConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Data == nil { - m.Data = make(map[string]string) - } - m.Data[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapKeySelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Container) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkingDir = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} - } - if err := m.ReadinessProbe.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TerminationMessagePath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullPolicy = PullPolicy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StdinOnce = bool(v != 0) - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerImage) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Names = append(m.Names, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) - } - m.SizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.SizeBytes |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerPort) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.HostPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ContainerPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostIP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerState) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Running == nil { - m.Running = &ContainerStateRunning{} - } - if err := m.Running.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} - } - if err := m.Terminated.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateRunning) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateTerminated) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) - } - m.ExitCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ExitCode |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Signal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StartedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FinishedAt.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateWaiting) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.State.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTerminationState.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) - } - m.RestartCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.RestartCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImageID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonEndpoint) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.GracePeriodSeconds = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Preconditions == nil { - m.Preconditions = &Preconditions{} - } - if err := m.Preconditions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.OrphanDependents = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIVolumeFile) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyDirVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Medium = StorageMedium(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointAddress) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} - } - if err := m.TargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointPort) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointSubset) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVar) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} - } - if err := m.ValueFrom.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVarSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InvolvedObject.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Source.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FirstTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Event{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Component = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecAction) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FCVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetWWNs = append(m.TargetWWNs, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Lun = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlexVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Options == nil { - m.Options = make(map[string]string) - } - m.Options[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlockerVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DatasetName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCEPersistentDiskVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PDName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GitRepoVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Repository = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Revision = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Directory = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GlusterfsVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EndpointsName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPGetAction) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scheme = URIScheme(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) - if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPHeader) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Handler) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Handler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exec == nil { - m.Exec = &ExecAction{} - } - if err := m.Exec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTPGet == nil { - m.HTTPGet = &HTTPGetAction{} - } - if err := m.HTTPGet.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TCPSocket == nil { - m.TCPSocket = &TCPSocketAction{} - } - if err := m.TCPSocket.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPathVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ISCSIVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetPortal = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IQN = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - m.Lun = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ISCSIInterface = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyToPath) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lifecycle) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PostStart == nil { - m.PostStart = &Handler{} - } - if err := m.PostStart.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PreStop == nil { - m.PreStop = &Handler{} - } - if err := m.PreStop.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRange) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeItem) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = LimitType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Max == nil { - m.Max = make(ResourceList) - } - m.Max[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Min == nil { - m.Min = make(ResourceList) - } - m.Min[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Default == nil { - m.Default = make(ResourceList) - } - m.Default[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.DefaultRequest == nil { - m.DefaultRequest = make(ResourceList) - } - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.MaxLimitRequestRatio == nil { - m.MaxLimitRequestRatio = make(ResourceList) - } - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, LimitRange{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limits = append(m.Limits, LimitRangeItem{}) - if err := m.Limits[len(m.Limits)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *List) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: List: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, k8s_io_kubernetes_pkg_runtime.RawExtension{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Watch = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LoadBalancerIngress) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LoadBalancerStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, LoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalObjectReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NFSVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Server = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Namespace) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Namespace{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Finalizers = append(m.Finalizers, FinalizerName(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = NamespacePhase(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Node) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Node: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeAddress) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = NodeAddressType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeAffinity) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { - m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} - } - if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = NodeConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastHeartbeatTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeDaemonEndpoints) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.KubeletEndpoint.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Node{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeProxyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) - if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = NodeSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelectorTerm) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodCIDR = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProviderID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Unschedulable = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - m.Capacity[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Allocatable == nil { - m.Allocatable = make(ResourceList) - } - m.Allocatable[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = NodePhase(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, NodeCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, NodeAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DaemonEndpoints.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.NodeInfo.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, ContainerImage{}) - if err := m.Images[len(m.Images)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(data[iNdEx:postIndex])) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) - if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSystemInfo) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MachineID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SystemUUID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BootID = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KernelVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OSImage = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerRuntimeVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KubeletVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KubeProxyVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OperatingSystem = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Architecture = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectFieldSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldPath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GenerateName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelfLink = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - m.Generation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CreationTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeletionTimestamp == nil { - m.DeletionTimestamp = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.DeletionTimestamp.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DeletionGracePeriodSeconds = &v - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Labels == nil { - m.Labels = make(map[string]string) - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Finalizers = append(m.Finalizers, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldPath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OwnerReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Controller = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolume) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaim) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PersistentVolumeClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PersistentVolumeClaimPhase(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - m.Capacity[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClaimName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PersistentVolume{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GCEPersistentDisk == nil { - m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} - } - if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AWSElasticBlockStore == nil { - m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} - } - if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HostPath == nil { - m.HostPath = &HostPathVolumeSource{} - } - if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} - } - if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NFS == nil { - m.NFS = &NFSVolumeSource{} - } - if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RBD == nil { - m.RBD = &RBDVolumeSource{} - } - if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} - } - if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cinder == nil { - m.Cinder = &CinderVolumeSource{} - } - if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CephFS == nil { - m.CephFS = &CephFSVolumeSource{} - } - if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FC == nil { - m.FC = &FCVolumeSource{} - } - if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Flocker == nil { - m.Flocker = &FlockerVolumeSource{} - } - if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FlexVolume == nil { - m.FlexVolume = &FlexVolumeSource{} - } - if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AzureFile == nil { - m.AzureFile = &AzureFileVolumeSource{} - } - if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VsphereVolume == nil { - m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} - } - if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - m.Capacity[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PersistentVolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(data[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClaimRef == nil { - m.ClaimRef = &ObjectReference{} - } - if err := m.ClaimRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PersistentVolumePhase(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Pod) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pod: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAffinity) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAffinityTerm) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.LabelSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespaces = append(m.Namespaces, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKey = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAntiAffinity) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAttachOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = PodConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodExecOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Pod{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodLogOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Follow = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Previous = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SinceSeconds = &v - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SinceTime == nil { - m.SinceTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.SinceTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Timestamps = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TailLines = &v - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LimitBytes = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodProxyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityContext) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsUser = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SupplementalGroups = append(m.SupplementalGroups, v) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.FSGroup = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, Container{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RestartPolicy = RestartPolicy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TerminationGracePeriodSeconds = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DNSPolicy = DNSPolicy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - m.NodeSelector[mapkey] = mapvalue - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedServiceAccount = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecurityContext == nil { - m.SecurityContext = &PodSecurityContext{} - } - if err := m.SecurityContext.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subdomain = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PodPhase(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, PodCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostIP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodIP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) - if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodStatusResult) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplate) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplateList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplateSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Preconditions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_kubernetes_pkg_types.UID(data[iNdEx:postIndex]) - m.UID = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PreferredSchedulingTerm) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) - } - m.Weight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Preference.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Probe) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Probe: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Handler.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) - } - m.InitialDelaySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - m.TimeoutSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.TimeoutSeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) - } - m.PeriodSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.PeriodSeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) - } - m.SuccessThreshold = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.SuccessThreshold |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) - } - m.FailureThreshold = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.FailureThreshold |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RBDVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CephMonitors = append(m.CephMonitors, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RBDImage = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RBDPool = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RadosUser = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyring = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeAllocation) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Range = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationController) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ReplicationController{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Selector == nil { - m.Selector = make(map[string]string) - } - m.Selector[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Template == nil { - m.Template = &PodTemplateSpec{} - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) - } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceFieldSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Divisor.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuota) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuotaList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceQuota{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuotaSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Hard == nil { - m.Hard = make(ResourceList) - } - m.Hard[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scopes = append(m.Scopes, ResourceQuotaScope(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuotaStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Hard == nil { - m.Hard = make(ResourceList) - } - m.Hard[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Used == nil { - m.Used = make(ResourceList) - } - m.Used[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceRequirements) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Limits == nil { - m.Limits = make(ResourceList) - } - m.Limits[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_kubernetes_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - if m.Requests == nil { - m.Requests = make(ResourceList) - } - m.Requests[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SELinuxOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Level = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Secret) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Secret: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, data[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - if m.Data == nil { - m.Data = make(map[string][]byte) - } - m.Data[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = SecretType(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretKeySelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Secret{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecurityContext) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Capabilities == nil { - m.Capabilities = &Capabilities{} - } - if err := m.Capabilities.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Privileged = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsUser = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnlyRootFilesystem = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SerializedReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Reference.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Service) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Service: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccount) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Secrets = append(m.Secrets, ObjectReference{}) - if err := m.Secrets[len(m.Secrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccountList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ServiceAccount{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Service{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServicePort) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetPort.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) - } - m.NodePort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.NodePort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceProxyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ServicePort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Selector == nil { - m.Selector = make(map[string]string) - } - m.Selector[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterIP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ServiceType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalIPs = append(m.ExternalIPs, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedPublicIPs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedPublicIPs = append(m.DeprecatedPublicIPs, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SessionAffinity = ServiceAffinity(data[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LoadBalancerIP = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TCPSocketAction) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Taint) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Taint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Effect = TaintEffect(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Toleration) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Toleration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = TolerationOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Effect = TaintEffect(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Volume) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Volume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VolumeSource.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeMount) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountPath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HostPath == nil { - m.HostPath = &HostPathVolumeSource{} - } - if err := m.HostPath.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EmptyDir == nil { - m.EmptyDir = &EmptyDirVolumeSource{} - } - if err := m.EmptyDir.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GCEPersistentDisk == nil { - m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} - } - if err := m.GCEPersistentDisk.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AWSElasticBlockStore == nil { - m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} - } - if err := m.AWSElasticBlockStore.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GitRepo == nil { - m.GitRepo = &GitRepoVolumeSource{} - } - if err := m.GitRepo.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Secret == nil { - m.Secret = &SecretVolumeSource{} - } - if err := m.Secret.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NFS == nil { - m.NFS = &NFSVolumeSource{} - } - if err := m.NFS.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} - } - if err := m.ISCSI.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} - } - if err := m.Glusterfs.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PersistentVolumeClaim == nil { - m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} - } - if err := m.PersistentVolumeClaim.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RBD == nil { - m.RBD = &RBDVolumeSource{} - } - if err := m.RBD.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FlexVolume == nil { - m.FlexVolume = &FlexVolumeSource{} - } - if err := m.FlexVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cinder == nil { - m.Cinder = &CinderVolumeSource{} - } - if err := m.Cinder.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CephFS == nil { - m.CephFS = &CephFSVolumeSource{} - } - if err := m.CephFS.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Flocker == nil { - m.Flocker = &FlockerVolumeSource{} - } - if err := m.Flocker.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DownwardAPI == nil { - m.DownwardAPI = &DownwardAPIVolumeSource{} - } - if err := m.DownwardAPI.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FC == nil { - m.FC = &FCVolumeSource{} - } - if err := m.FC.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AzureFile == nil { - m.AzureFile = &AzureFileVolumeSource{} - } - if err := m.AzureFile.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &ConfigMapVolumeSource{} - } - if err := m.ConfigMap.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VsphereVolume == nil { - m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} - } - if err := m.VsphereVolume.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VsphereVirtualDiskVolumeSource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumePath = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WeightedPodAffinityTerm) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) - } - m.Weight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodAffinityTerm.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto deleted file mode 100644 index 4a99eec88..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto +++ /dev/null @@ -1,2924 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.api.v1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -message AWSElasticBlockStoreVolumeSource { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - optional string volumeID = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - optional int32 partition = 3; - - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - optional bool readOnly = 4; -} - -// Affinity is a group of affinity scheduling rules. -message Affinity { - // Describes node affinity scheduling rules for the pod. - optional NodeAffinity nodeAffinity = 1; - - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - optional PodAffinity podAffinity = 2; - - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - optional PodAntiAffinity podAntiAffinity = 3; -} - -// AttachedVolume describes a volume attached to a node -message AttachedVolume { - // Name of the attached volume - optional string name = 1; - - // DevicePath represents the device path where the volume should be avilable - optional string devicePath = 2; -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -message AzureFileVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key - optional string secretName = 1; - - // Share Name - optional string shareName = 2; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 3; -} - -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. -message Binding { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The target object that you want to bind to the standard object. - optional ObjectReference target = 2; -} - -// Adds and removes POSIX capabilities from running containers. -message Capabilities { - // Added capabilities - repeated string add = 1; - - // Removed capabilities - repeated string drop = 2; -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -message CephFSVolumeSource { - // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - repeated string monitors = 1; - - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - optional string path = 2; - - // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - optional string user = 3; - - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - optional string secretFile = 4; - - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - optional LocalObjectReference secretRef = 5; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - optional bool readOnly = 6; -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - optional string volumeID = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - optional string fsType = 2; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - optional bool readOnly = 3; -} - -// Information about the condition of a component. -message ComponentCondition { - // Type of condition for a component. - // Valid value: "Healthy" - optional string type = 1; - - // Status of the condition for a component. - // Valid values for "Healthy": "True", "False", or "Unknown". - optional string status = 2; - - // Message about the condition for a component. - // For example, information about a health check. - optional string message = 3; - - // Condition error code for a component. - // For example, a health check error code. - optional string error = 4; -} - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -message ComponentStatus { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // List of component conditions observed - repeated ComponentCondition conditions = 2; -} - -// Status of all the conditions for the component as a list of ComponentStatus objects. -message ComponentStatusList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ComponentStatus objects. - repeated ComponentStatus items = 2; -} - -// ConfigMap holds configuration data for pods to consume. -message ConfigMap { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - map<string, string> data = 2; -} - -// Selects a key from a ConfigMap. -message ConfigMapKeySelector { - // The ConfigMap to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key to select. - optional string key = 2; -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -message ConfigMapList { - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ConfigMaps. - repeated ConfigMap items = 2; -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -message ConfigMapVolumeSource { - optional LocalObjectReference localObjectReference = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - repeated KeyToPath items = 2; -} - -// A single application container that you want to run within a pod. -message Container { - // Name of the container specified as a DNS_LABEL. - // Each container in a pod must have a unique name (DNS_LABEL). - // Cannot be updated. - optional string name = 1; - - // Docker image name. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md - optional string image = 2; - - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md#containers-and-commands - repeated string command = 3; - - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md#containers-and-commands - repeated string args = 4; - - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - optional string workingDir = 5; - - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Cannot be updated. - repeated ContainerPort ports = 6; - - // List of environment variables to set in the container. - // Cannot be updated. - repeated EnvVar env = 7; - - // Compute Resources required by this container. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#resources - optional ResourceRequirements resources = 8; - - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - repeated VolumeMount volumeMounts = 9; - - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - optional Probe livenessProbe = 10; - - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - optional Probe readinessProbe = 11; - - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - optional Lifecycle lifecycle = 12; - - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Defaults to /dev/termination-log. - // Cannot be updated. - optional string terminationMessagePath = 13; - - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md#updating-images - optional string imagePullPolicy = 14; - - // Security options the pod should run with. - // More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md - optional SecurityContext securityContext = 15; - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - optional bool stdin = 16; - - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - optional bool stdinOnce = 17; - - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - optional bool tty = 18; -} - -// Describe a container image -message ContainerImage { - // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - repeated string names = 1; - - // The size of the image in bytes. - optional int64 sizeBytes = 2; -} - -// ContainerPort represents a network port in a single container. -message ContainerPort { - // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - // named port in a pod must have a unique name. Name for the port that can be - // referred to by services. - optional string name = 1; - - // Number of port to expose on the host. - // If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // Most containers do not need this. - optional int32 hostPort = 2; - - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - optional int32 containerPort = 3; - - // Protocol for port. Must be UDP or TCP. - // Defaults to "TCP". - optional string protocol = 4; - - // What host IP to bind the external port to. - optional string hostIP = 5; -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -message ContainerState { - // Details about a waiting container - optional ContainerStateWaiting waiting = 1; - - // Details about a running container - optional ContainerStateRunning running = 2; - - // Details about a terminated container - optional ContainerStateTerminated terminated = 3; -} - -// ContainerStateRunning is a running state of a container. -message ContainerStateRunning { - // Time at which the container was last (re-)started - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 1; -} - -// ContainerStateTerminated is a terminated state of a container. -message ContainerStateTerminated { - // Exit status from the last termination of the container - optional int32 exitCode = 1; - - // Signal from the last termination of the container - optional int32 signal = 2; - - // (brief) reason from the last termination of the container - optional string reason = 3; - - // Message regarding the last termination of the container - optional string message = 4; - - // Time at which previous execution of the container started - optional k8s.io.kubernetes.pkg.api.unversioned.Time startedAt = 5; - - // Time at which the container last terminated - optional k8s.io.kubernetes.pkg.api.unversioned.Time finishedAt = 6; - - // Container's ID in the format 'docker://<container_id>' - optional string containerID = 7; -} - -// ContainerStateWaiting is a waiting state of a container. -message ContainerStateWaiting { - // (brief) reason the container is not yet running. - optional string reason = 1; - - // Message regarding why the container is not yet running. - optional string message = 2; -} - -// ContainerStatus contains details for the current status of this container. -message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. - // Cannot be updated. - optional string name = 1; - - // Details about the container's current condition. - optional ContainerState state = 2; - - // Details about the container's last termination condition. - optional ContainerState lastState = 3; - - // Specifies whether the container has passed its readiness probe. - optional bool ready = 4; - - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - optional int32 restartCount = 5; - - // The image the container is running. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md - // TODO(dchen1107): Which image the container is running with? - optional string image = 6; - - // ImageID of the container's image. - optional string imageID = 7; - - // Container's ID in the format 'docker://<container_id>'. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#container-information - optional string containerID = 8; -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -message DaemonEndpoint { - // Port number of the given endpoint. - optional int32 Port = 1; -} - -// DeleteOptions may be provided when deleting an API object -message DeleteOptions { - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - optional int64 gracePeriodSeconds = 1; - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - optional Preconditions preconditions = 2; - - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - optional bool orphanDependents = 3; -} - -// DownwardAPIVolumeFile represents information to create the file containing the pod field -message DownwardAPIVolumeFile { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - optional string path = 1; - - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - optional ObjectFieldSelector fieldRef = 2; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 3; -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -message DownwardAPIVolumeSource { - // Items is a list of downward API volume file - repeated DownwardAPIVolumeFile items = 1; -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -message EmptyDirVolumeSource { - // What type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#emptydir - optional string medium = 1; -} - -// EndpointAddress is a tuple that describes single IP address. -message EndpointAddress { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. - optional string ip = 1; - - // The Hostname of this endpoint - optional string hostname = 3; - - // Reference to object providing the endpoint. - optional ObjectReference targetRef = 2; -} - -// EndpointPort is a tuple that describes a single port. -message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - optional string name = 1; - - // The port number of the endpoint. - optional int32 port = 2; - - // The IP protocol for this port. - // Must be UDP or TCP. - // Default is TCP. - optional string protocol = 3; -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -message EndpointSubset { - // IP addresses which offer the related ports that are marked as ready. These endpoints - // should be considered safe for load balancers and clients to utilize. - repeated EndpointAddress addresses = 1; - - // IP addresses which offer the related ports but are not currently marked as ready - // because they have not yet finished starting, have recently failed a readiness check, - // or have recently failed a liveness check. - repeated EndpointAddress notReadyAddresses = 2; - - // Port numbers available on the related IP addresses. - repeated EndpointPort ports = 3; -} - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -message Endpoints { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The set of all endpoints is the union of all subsets. Addresses are placed into - // subsets according to the IPs they share. A single address with multiple ports, - // some of which are ready and some of which are not (because they come from - // different containers) will result in the address being displayed in different - // subsets for the different ports. No address will appear in both Addresses and - // NotReadyAddresses in the same subset. - // Sets of addresses and ports that comprise a service. - repeated EndpointSubset subsets = 2; -} - -// EndpointsList is a list of endpoints. -message EndpointsList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of endpoints. - repeated Endpoints items = 2; -} - -// EnvVar represents an environment variable present in a Container. -message EnvVar { - // Name of the environment variable. Must be a C_IDENTIFIER. - optional string name = 1; - - // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // Defaults to "". - optional string value = 2; - - // Source for the environment variable's value. Cannot be used if value is not empty. - optional EnvVarSource valueFrom = 3; -} - -// EnvVarSource represents a source for the value of an EnvVar. -message EnvVarSource { - // Selects a field of the pod; only name and namespace are supported. - optional ObjectFieldSelector fieldRef = 1; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - optional ResourceFieldSelector resourceFieldRef = 2; - - // Selects a key of a ConfigMap. - optional ConfigMapKeySelector configMapKeyRef = 3; - - // Selects a key of a secret in the pod's namespace - optional SecretKeySelector secretKeyRef = 4; -} - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -message Event { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // The object that this event is about. - optional ObjectReference involvedObject = 2; - - // This should be a short, machine understandable string that gives the reason - // for the transition into the object's current status. - // TODO: provide exact specification for format. - optional string reason = 3; - - // A human-readable description of the status of this operation. - // TODO: decide on maximum length. - optional string message = 4; - - // The component reporting this event. Should be a short machine understandable string. - optional EventSource source = 5; - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - optional k8s.io.kubernetes.pkg.api.unversioned.Time firstTimestamp = 6; - - // The time at which the most recent occurrence of this event was recorded. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTimestamp = 7; - - // The number of times this event has occurred. - optional int32 count = 8; - - // Type of this event (Normal, Warning), new types could be added in the future - optional string type = 9; -} - -// EventList is a list of events. -message EventList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of events - repeated Event items = 2; -} - -// EventSource contains information for an event. -message EventSource { - // Component from which the event is generated. - optional string component = 1; - - // Host name on which the event is generated. - optional string host = 2; -} - -// ExecAction describes a "run in container" action. -message ExecAction { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - repeated string command = 1; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -message FCVolumeSource { - // Required: FC target world wide names (WWNs) - repeated string targetWWNs = 1; - - // Required: FC target lun number - optional int32 lun = 2; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 4; -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. -message FlexVolumeSource { - // Driver is the name of the driver to use for this volume. - optional string driver = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - optional string fsType = 2; - - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - optional LocalObjectReference secretRef = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - optional bool readOnly = 4; - - // Optional: Extra command options if any. - map<string, string> options = 5; -} - -// Represents a Flocker volume mounted by the Flocker agent. -// Flocker volumes do not support ownership management or SELinux relabeling. -message FlockerVolumeSource { - // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker - optional string datasetName = 1; -} - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -message GCEPersistentDiskVolumeSource { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - optional string pdName = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - optional int32 partition = 3; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - optional bool readOnly = 4; -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -message GitRepoVolumeSource { - // Repository URL - optional string repository = 1; - - // Commit hash for the specified revision. - optional string revision = 2; - - // Target directory name. - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - optional string directory = 3; -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -message GlusterfsVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod - optional string endpoints = 1; - - // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod - optional string path = 2; - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod - optional bool readOnly = 3; -} - -// HTTPGetAction describes an action based on HTTP Get requests. -message HTTPGetAction { - // Path to access on the HTTP server. - optional string path = 1; - - // Name or number of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; - - // Host name to connect to, defaults to the pod IP. You probably want to set - // "Host" in httpHeaders instead. - optional string host = 3; - - // Scheme to use for connecting to the host. - // Defaults to HTTP. - optional string scheme = 4; - - // Custom headers to set in the request. HTTP allows repeated headers. - repeated HTTPHeader httpHeaders = 5; -} - -// HTTPHeader describes a custom header to be used in HTTP probes -message HTTPHeader { - // The header field name - optional string name = 1; - - // The header field value - optional string value = 2; -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -message Handler { - // One and only one of the following should be specified. - // Exec specifies the action to take. - optional ExecAction exec = 1; - - // HTTPGet specifies the http request to perform. - optional HTTPGetAction httpGet = 2; - - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - optional TCPSocketAction tcpSocket = 3; -} - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -message HostPathVolumeSource { - // Path of the directory on the host. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath - optional string path = 1; -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - optional string targetPortal = 1; - - // Target iSCSI Qualified Name. - optional string iqn = 2; - - // iSCSI target lun number. - optional int32 lun = 3; - - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - optional string iscsiInterface = 4; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 5; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - optional bool readOnly = 6; -} - -// Maps a string key to a path within a volume. -message KeyToPath { - // The key to project. - optional string key = 1; - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - optional string path = 2; -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -message Lifecycle { - // PostStart is called immediately after a container is created. If the handler fails, - // the container is terminated and restarted according to its restart policy. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#hook-details - optional Handler postStart = 1; - - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#hook-details - optional Handler preStop = 2; -} - -// LimitRange sets resource usage limits for each kind of resource in a Namespace. -message LimitRange { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the limits enforced. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional LimitRangeSpec spec = 2; -} - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. -message LimitRangeItem { - // Type of resource that this limit applies to. - optional string type = 1; - - // Max usage constraints on this kind by resource name. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> max = 2; - - // Min usage constraints on this kind by resource name. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> min = 3; - - // Default resource requirement limit value by resource name if resource limit is omitted. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> default = 4; - - // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> defaultRequest = 5; - - // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> maxLimitRequestRatio = 6; -} - -// LimitRangeList is a list of LimitRange items. -message LimitRangeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_limit_range.md - repeated LimitRange items = 2; -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind. -message LimitRangeSpec { - // Limits is the list of LimitRangeItem objects that are enforced. - repeated LimitRangeItem limits = 1; -} - -// List holds a list of objects, which may not be known by the server. -message List { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of objects - repeated k8s.io.kubernetes.pkg.runtime.RawExtension items = 2; -} - -// ListOptions is the query options to a standard REST list call. -message ListOptions { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - optional string labelSelector = 1; - - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - optional string fieldSelector = 2; - - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - optional bool watch = 3; - - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - optional string resourceVersion = 4; - - // Timeout for the list/watch call. - optional int64 timeoutSeconds = 5; -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -message LoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - optional string ip = 1; - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - optional string hostname = 2; -} - -// LoadBalancerStatus represents the status of a load-balancer. -message LoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. - // Traffic intended for the service should be sent to these ingress points. - repeated LoadBalancerIngress ingress = 1; -} - -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -message LocalObjectReference { - // Name of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - // TODO: Add other useful fields. apiVersion, kind, uid? - optional string name = 1; -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -message NFSVolumeSource { - // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - optional string server = 1; - - // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - optional string path = 2; - - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - optional bool readOnly = 3; -} - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional. -message Namespace { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional NamespaceSpec spec = 2; - - // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional NamespaceStatus status = 3; -} - -// NamespaceList is a list of Namespaces. -message NamespaceList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - repeated Namespace items = 2; -} - -// NamespaceSpec describes the attributes on a Namespace. -message NamespaceSpec { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/release-1.3/docs/design/namespaces.md#finalizers - repeated string finalizers = 1; -} - -// NamespaceStatus is information about the current status of a Namespace. -message NamespaceStatus { - // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/release-1.3/docs/design/namespaces.md#phases - optional string phase = 1; -} - -// Node is a worker node in Kubernetes, formerly known as minion. -// Each node will have a unique identifier in the cache (i.e. in etcd). -message Node { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of a node. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional NodeSpec spec = 2; - - // Most recently observed status of the node. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional NodeStatus status = 3; -} - -// NodeAddress contains information for the node's address. -message NodeAddress { - // Node address type, one of Hostname, ExternalIP or InternalIP. - optional string type = 1; - - // The node address. - optional string address = 2; -} - -// Node affinity is a group of node affinity scheduling rules. -message NodeAffinity { - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// NodeCondition contains condition infromation for a node. -message NodeCondition { - // Type of node condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time we got an update on a given condition. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastHeartbeatTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -message NodeDaemonEndpoints { - // Endpoint on which Kubelet is listening. - optional DaemonEndpoint kubeletEndpoint = 1; -} - -// NodeList is the whole list of all Nodes which have been registered with master. -message NodeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of nodes - repeated Node items = 2; -} - -// NodeProxyOptions is the query options to a Node's proxy call. -message NodeProxyOptions { - // Path is the URL path to use for the current proxy request to node. - optional string path = 1; -} - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -message NodeSelector { - // Required. A list of node selector terms. The terms are ORed. - repeated NodeSelectorTerm nodeSelectorTerms = 1; -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -message NodeSelectorRequirement { - // The label key that the selector applies to. - optional string key = 1; - - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - optional string operator = 2; - - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - repeated string values = 3; -} - -// A null or empty node selector term matches no objects. -message NodeSelectorTerm { - // Required. A list of node selector requirements. The requirements are ANDed. - repeated NodeSelectorRequirement matchExpressions = 1; -} - -// NodeSpec describes the attributes that a node is created with. -message NodeSpec { - // PodCIDR represents the pod IP range assigned to the node. - optional string podCIDR = 1; - - // External ID of the node assigned by some machine database (e.g. a cloud provider). - // Deprecated. - optional string externalID = 2; - - // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> - optional string providerID = 3; - - // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#manual-node-administration"` - optional bool unschedulable = 4; -} - -// NodeStatus is information about the current status of a node. -message NodeStatus { - // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#capacity for more details. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1; - - // Allocatable represents the resources of a node that are available for scheduling. - // Defaults to Capacity. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> allocatable = 2; - - // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-phase - optional string phase = 3; - - // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-condition - repeated NodeCondition conditions = 4; - - // List of addresses reachable to the node. - // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-addresses - repeated NodeAddress addresses = 5; - - // Endpoints of daemons running on the Node. - optional NodeDaemonEndpoints daemonEndpoints = 6; - - // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-info - optional NodeSystemInfo nodeInfo = 7; - - // List of container images on this node - repeated ContainerImage images = 8; - - // List of attachable volumes in use (mounted) by the node. - repeated string volumesInUse = 9; - - // List of volumes that are attached to the node. - repeated AttachedVolume volumesAttached = 10; -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -message NodeSystemInfo { - // Machine ID reported by the node. - optional string machineID = 1; - - // System UUID reported by the node. - optional string systemUUID = 2; - - // Boot ID reported by the node. - optional string bootID = 3; - - // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - optional string kernelVersion = 4; - - // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - optional string osImage = 5; - - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - optional string containerRuntimeVersion = 6; - - // Kubelet Version reported by the node. - optional string kubeletVersion = 7; - - // KubeProxy Version reported by the node. - optional string kubeProxyVersion = 8; - - // The Operating System reported by the node - optional string operatingSystem = 9; - - // The Architecture reported by the node - optional string architecture = 10; -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -message ObjectFieldSelector { - // Version of the schema the FieldPath is written in terms of, defaults to "v1". - optional string apiVersion = 1; - - // Path of the field to select in the specified API version. - optional string fieldPath = 2; -} - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -message ObjectMeta { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - optional string name = 1; - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#idempotency - optional string generateName = 2; - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - optional string namespace = 3; - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - optional string selfLink = 4; - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - optional string uid = 5; - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 6; - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - optional int64 generation = 7; - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8; - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource will be deleted (no longer visible from - // resource lists, and not reachable by name) after the time in this field. Once set, this - // value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet - // will send a hard termination signal to the container. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9; - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - optional int64 deletionGracePeriodSeconds = 10; - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md - // TODO: replace map[string]string with labels.LabelSet type - map<string, string> labels = 11; - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/annotations.md - map<string, string> annotations = 12; - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - repeated OwnerReference ownerReferences = 13; - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - repeated string finalizers = 14; -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -message ObjectReference { - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Namespace of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - optional string namespace = 2; - - // Name of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - optional string name = 3; - - // UID of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - optional string uid = 4; - - // API version of the referent. - optional string apiVersion = 5; - - // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - optional string resourceVersion = 6; - - // If referring to a piece of an object instead of an entire object, this string - // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - // For example, if the object reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - optional string fieldPath = 7; -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -message OwnerReference { - // API version of the referent. - optional string apiVersion = 5; - - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Name of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - optional string name = 3; - - // UID of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - optional string uid = 4; - - // If true, this reference points to the managing controller. - optional bool controller = 6; -} - -// PersistentVolume (PV) is a storage resource provisioned by an administrator. -// It is analogous to a node. -// More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md -message PersistentVolume { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines a specification of a persistent volume owned by the cluster. - // Provisioned by an administrator. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistent-volumes - optional PersistentVolumeSpec spec = 2; - - // Status represents the current information/status for the persistent volume. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistent-volumes - optional PersistentVolumeStatus status = 3; -} - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -message PersistentVolumeClaim { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimSpec spec = 2; - - // Status represents the current information/status of a persistent volume claim. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimStatus status = 3; -} - -// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. -message PersistentVolumeClaimList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // A list of persistent volume claims. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - repeated PersistentVolumeClaim items = 2; -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -message PersistentVolumeClaimSpec { - // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes-1 - repeated string accessModes = 1; - - // A label query over volumes to consider for binding. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; - - // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#resources - optional ResourceRequirements resources = 2; - - // VolumeName is the binding reference to the PersistentVolume backing this claim. - optional string volumeName = 3; -} - -// PersistentVolumeClaimStatus is the current status of a persistent volume claim. -message PersistentVolumeClaimStatus { - // Phase represents the current phase of PersistentVolumeClaim. - optional string phase = 1; - - // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes-1 - repeated string accessModes = 2; - - // Represents the actual resources of the underlying volume. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 3; -} - -// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. -// This volume finds the bound PV and mounts that volume for the pod. A -// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -// type of volume that is owned by someone else (the system). -message PersistentVolumeClaimVolumeSource { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional string claimName = 1; - - // Will force the ReadOnly setting in VolumeMounts. - // Default false. - optional bool readOnly = 2; -} - -// PersistentVolumeList is a list of PersistentVolume items. -message PersistentVolumeList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of persistent volumes. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md - repeated PersistentVolume items = 2; -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the -// administrator who creates PVs. Exactly one of its members must be set. -message PersistentVolumeSource { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; - - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath - optional HostPathVolumeSource hostPath = 3; - - // Glusterfs represents a Glusterfs volume that is attached to a host and - // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md - optional GlusterfsVolumeSource glusterfs = 4; - - // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - optional NFSVolumeSource nfs = 5; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md - optional RBDVolumeSource rbd = 6; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - optional ISCSIVolumeSource iscsi = 7; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - optional CinderVolumeSource cinder = 8; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - optional CephFSVolumeSource cephfs = 9; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - optional FCVolumeSource fc = 10; - - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - optional FlockerVolumeSource flocker = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using a exec based plugin. This is an - // alpha feature and may change in future. - optional FlexVolumeSource flexVolume = 12; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - optional AzureFileVolumeSource azureFile = 13; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; -} - -// PersistentVolumeSpec is the specification of a persistent volume. -message PersistentVolumeSpec { - // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#capacity - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> capacity = 1; - - // The actual volume backing the persistent volume. - optional PersistentVolumeSource persistentVolumeSource = 2; - - // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes - repeated string accessModes = 3; - - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // Expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#binding - optional ObjectReference claimRef = 4; - - // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default) and Recycle. - // Recyling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#recycling-policy - optional string persistentVolumeReclaimPolicy = 5; -} - -// PersistentVolumeStatus is the current status of a persistent volume. -message PersistentVolumeStatus { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#phase - optional string phase = 1; - - // A human-readable message indicating details about why the volume is in this state. - optional string message = 2; - - // Reason is a brief CamelCase string that describes any failure and is meant - // for machine parsing and tidy display in the CLI. - optional string reason = 3; -} - -// Pod is a collection of containers that can run on a host. This resource is created -// by clients and scheduled onto hosts. -message Pod { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional PodSpec spec = 2; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional PodStatus status = 3; -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -message PodAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key <topologyKey> tches that of any node on which -// a pod of the set of pods is running -message PodAffinityTerm { - // A label query over a set of resources, in this case pods. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector labelSelector = 1; - - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - repeated string namespaces = 2; - - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - optional string topologyKey = 3; -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -message PodAntiAffinity { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// PodAttachOptions is the query options to a Pod's remote attach call. -// --- -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodAttachOptions { - // Stdin if true, redirects the standard input stream of the pod for this call. - // Defaults to false. - optional bool stdin = 1; - - // Stdout if true indicates that stdout is to be redirected for the attach call. - // Defaults to true. - optional bool stdout = 2; - - // Stderr if true indicates that stderr is to be redirected for the attach call. - // Defaults to true. - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the attach call. - // This is passed through the container runtime so the tty - // is allocated on the worker node by the container runtime. - // Defaults to false. - optional bool tty = 4; - - // The container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - optional string container = 5; -} - -// PodCondition contains details for the current condition of this pod. -message PodCondition { - // Type is the type of the condition. - // Currently only Ready. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions - optional string type = 1; - - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions - optional string status = 2; - - // Last time we probed the condition. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transitioned from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // Unique, one-word, CamelCase reason for the condition's last transition. - optional string reason = 5; - - // Human-readable message indicating details about last transition. - optional string message = 6; -} - -// PodExecOptions is the query options to a Pod's remote exec call. -// --- -// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodExecOptions { - // Redirect the standard input stream of the pod for this call. - // Defaults to false. - optional bool stdin = 1; - - // Redirect the standard output stream of the pod for this call. - // Defaults to true. - optional bool stdout = 2; - - // Redirect the standard error stream of the pod for this call. - // Defaults to true. - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the exec call. - // Defaults to false. - optional bool tty = 4; - - // Container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - optional string container = 5; - - // Command is the remote command to execute. argv array. Not executed within a shell. - repeated string command = 6; -} - -// PodList is a list of Pods. -message PodList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of pods. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pods.md - repeated Pod items = 2; -} - -// PodLogOptions is the query options for a Pod's logs REST call. -message PodLogOptions { - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - optional string container = 1; - - // Follow the log stream of the pod. Defaults to false. - optional bool follow = 2; - - // Return previous terminated container logs. Defaults to false. - optional bool previous = 3; - - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional int64 sinceSeconds = 4; - - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - optional k8s.io.kubernetes.pkg.api.unversioned.Time sinceTime = 5; - - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - optional bool timestamps = 6; - - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - optional int64 tailLines = 7; - - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - optional int64 limitBytes = 8; -} - -// PodProxyOptions is the query options to a Pod's proxy call. -message PodProxyOptions { - // Path is the URL path to use for the current proxy request to pod. - optional string path = 1; -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -message PodSecurityContext { - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - optional SELinuxOptions seLinuxOptions = 1; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - optional int64 runAsUser = 2; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional bool runAsNonRoot = 3; - - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - repeated int64 supplementalGroups = 4; - - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - optional int64 fsGroup = 5; -} - -// PodSpec is a description of a pod. -message PodSpec { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md - repeated Volume volumes = 1; - - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md - repeated Container containers = 2; - - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#restartpolicy - optional string restartPolicy = 3; - - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - optional int64 terminationGracePeriodSeconds = 4; - - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - optional int64 activeDeadlineSeconds = 5; - - // Set DNS policy for containers within the pod. - // One of 'ClusterFirst' or 'Default'. - // Defaults to "ClusterFirst". - optional string dnsPolicy = 6; - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/node-selection/README.md - map<string, string> nodeSelector = 7; - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/release-1.3/docs/design/service_accounts.md - optional string serviceAccountName = 8; - - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - optional string serviceAccount = 9; - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - optional string nodeName = 10; - - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - optional bool hostNetwork = 11; - - // Use the host's pid namespace. - // Optional: Default to false. - optional bool hostPID = 12; - - // Use the host's ipc namespace. - // Optional: Default to false. - optional bool hostIPC = 13; - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - optional PodSecurityContext securityContext = 14; - - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod - repeated LocalObjectReference imagePullSecrets = 15; - - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - optional string hostname = 16; - - // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". - // If not specified, the pod will not have a domainname at all. - optional string subdomain = 17; -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -message PodStatus { - // Current condition of the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-phase - optional string phase = 1; - - // Current service state of pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions - repeated PodCondition conditions = 2; - - // A human readable message indicating details about why the pod is in this condition. - optional string message = 3; - - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' - optional string reason = 4; - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - optional string hostIP = 5; - - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - optional string podIP = 6; - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 7; - - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-statuses - repeated ContainerStatus containerStatuses = 8; -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -message PodStatusResult { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional PodStatus status = 2; -} - -// PodTemplate describes a template for creating copies of a predefined pod. -message PodTemplate { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional PodTemplateSpec template = 2; -} - -// PodTemplateList is a list of PodTemplates. -message PodTemplateList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of pod templates - repeated PodTemplate items = 2; -} - -// PodTemplateSpec describes the data a pod should have when created from a template -message PodTemplateSpec { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional PodSpec spec = 2; -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -message Preconditions { - // Specifies the target UID. - optional string uid = 1; -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -message PreferredSchedulingTerm { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - optional int32 weight = 1; - - // A node selector term, associated with the corresponding weight. - optional NodeSelectorTerm preference = 2; -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -message Probe { - // The action taken to determine the health of a container - optional Handler handler = 1; - - // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - optional int32 initialDelaySeconds = 2; - - // Number of seconds after which the probe times out. - // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - optional int32 timeoutSeconds = 3; - - // How often (in seconds) to perform the probe. - // Default to 10 seconds. Minimum value is 1. - optional int32 periodSeconds = 4; - - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - optional int32 successThreshold = 5; - - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // Defaults to 3. Minimum value is 1. - optional int32 failureThreshold = 6; -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -message RBDVolumeSource { - // A collection of Ceph monitors. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - repeated string monitors = 1; - - // The rados image name. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - optional string image = 2; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - optional string fsType = 3; - - // The rados pool name. - // Default is rbd. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it. - optional string pool = 4; - - // The rados user name. - // Default is admin. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - optional string user = 5; - - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - optional string keyring = 6; - - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - optional LocalObjectReference secretRef = 7; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - optional bool readOnly = 8; -} - -// RangeAllocation is not a public type. -message RangeAllocation { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Range is string that identifies the range represented by 'data'. - optional string range = 2; - - // Data is a bit array containing all allocated addresses in the previous segment. - optional bytes data = 3; -} - -// ReplicationController represents the configuration of a replication controller. -message ReplicationController { - // If the Labels of a ReplicationController are empty, they are defaulted to - // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ReplicationControllerSpec spec = 2; - - // Status is the most recently observed status of the replication controller. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ReplicationControllerStatus status = 3; -} - -// ReplicationControllerList is a collection of replication controllers. -message ReplicationControllerList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of replication controllers. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md - repeated ReplicationController items = 2; -} - -// ReplicationControllerSpec is the specification of a replication controller. -message ReplicationControllerSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the Replicas count. - // If Selector is empty, it is defaulted to the labels present on the Pod template. - // Label keys and values that must match in order to be controlled by this replication - // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - map<string, string> selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - optional PodTemplateSpec template = 3; -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -message ReplicationControllerStatus { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - optional int32 fullyLabeledReplicas = 2; - - // ObservedGeneration reflects the generation of the most recently observed replication controller. - optional int64 observedGeneration = 3; -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -message ResourceFieldSelector { - // Container name: required for volumes, optional for env vars - optional string containerName = 1; - - // Required: resource to select - optional string resource = 2; - - // Specifies the output format of the exposed resources, defaults to "1" - optional k8s.io.kubernetes.pkg.api.resource.Quantity divisor = 3; -} - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -message ResourceQuota { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the desired quota. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ResourceQuotaSpec spec = 2; - - // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ResourceQuotaStatus status = 3; -} - -// ResourceQuotaList is a list of ResourceQuota items. -message ResourceQuotaList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - repeated ResourceQuota items = 2; -} - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. -message ResourceQuotaSpec { - // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> hard = 1; - - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - repeated string scopes = 2; -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use. -message ResourceQuotaStatus { - // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> hard = 1; - - // Used is the current observed total usage of the resource in the namespace. - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> used = 2; -} - -// ResourceRequirements describes the compute resource requirements. -message ResourceRequirements { - // Limits describes the maximum amount of compute resources allowed. - // More info: http://releases.k8s.io/release-1.3/docs/design/resources.md#resource-specifications - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> limits = 1; - - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. - // More info: http://releases.k8s.io/release-1.3/docs/design/resources.md#resource-specifications - map<string, k8s.io.kubernetes.pkg.api.resource.Quantity> requests = 2; -} - -// SELinuxOptions are the labels to be applied to the container -message SELinuxOptions { - // User is a SELinux user label that applies to the container. - optional string user = 1; - - // Role is a SELinux role label that applies to the container. - optional string role = 2; - - // Type is a SELinux type label that applies to the container. - optional string type = 3; - - // Level is SELinux level label that applies to the container. - optional string level = 4; -} - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -message Secret { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 - map<string, bytes> data = 2; - - // Used to facilitate programmatic handling of secret data. - optional string type = 3; -} - -// SecretKeySelector selects a key of a Secret. -message SecretKeySelector { - // The name of the secret in the pod's namespace to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key of the secret to select from. Must be a valid secret key. - optional string key = 2; -} - -// SecretList is a list of Secret. -message SecretList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of secret objects. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md - repeated Secret items = 2; -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -message SecretVolumeSource { - // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#secrets - optional string secretName = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - repeated KeyToPath items = 2; -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -message SecurityContext { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - optional Capabilities capabilities = 1; - - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - optional bool privileged = 2; - - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional SELinuxOptions seLinuxOptions = 3; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional int64 runAsUser = 4; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - optional bool runAsNonRoot = 5; - - // Whether this container has a read-only root filesystem. - // Default is false. - optional bool readOnlyRootFilesystem = 6; -} - -// SerializedReference is a reference to serialized object. -message SerializedReference { - // The reference to an object in the system. - optional ObjectReference reference = 1; -} - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -message Service { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Spec defines the behavior of a service. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ServiceSpec spec = 2; - - // Most recently observed status of the service. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ServiceStatus status = 3; -} - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -message ServiceAccount { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional ObjectMeta metadata = 1; - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md - repeated ObjectReference secrets = 2; - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret - repeated LocalObjectReference imagePullSecrets = 3; -} - -// ServiceAccountList is a list of ServiceAccount objects -message ServiceAccountList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ServiceAccounts. - // More info: http://releases.k8s.io/release-1.3/docs/design/service_accounts.md#service-accounts - repeated ServiceAccount items = 2; -} - -// ServiceList holds a list of services. -message ServiceList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of services - repeated Service items = 2; -} - -// ServicePort contains information on service's port. -message ServicePort { - // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - // Optional if only one ServicePort is defined on this service. - optional string name = 1; - - // The IP protocol for this port. Supports "TCP" and "UDP". - // Default is TCP. - optional string protocol = 2; - - // The port that will be exposed by this service. - optional int32 port = 3; - - // Number or name of the port to access on the pods targeted by the service. - // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - // If this is a string, it will be looked up as a named port in the - // target Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#defining-a-service - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4; - - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#type--nodeport - optional int32 nodePort = 5; -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -message ServiceProxyOptions { - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - optional string path = 1; -} - -// ServiceSpec describes the attributes that a user creates on a service. -message ServiceSpec { - // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies - repeated ServicePort ports = 1; - - // This service will route traffic to pods having labels matching this selector. - // Label keys and values that must match in order to receive traffic for this service. - // If empty, all pods are selected, if not specified, endpoints must be manually specified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#overview - map<string, string> selector = 2; - - // ClusterIP is usually assigned by the master and is the IP address of the service. - // If specified, it will be allocated to the service if it is unused - // or else creation of the service will fail. - // Valid values are None, empty string (""), or a valid IP address. - // 'None' can be specified for a headless service when proxying is not required. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies - optional string clusterIP = 3; - - // Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. - // Defaults to ClusterIP. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#external-services - optional string type = 4; - - // externalIPs is a list of IP addresses for which nodes in the cluster - // will also accept traffic for this service. These IPs are not managed by - // Kubernetes. The user is responsible for ensuring that traffic arrives - // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. - repeated string externalIPs = 5; - - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +genconversion=false - repeated string deprecatedPublicIPs = 6; - - // Supports "ClientIP" and "None". Used to maintain session affinity. - // Enable client IP based session affinity. - // Must be ClientIP or None. - // Defaults to None. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies - optional string sessionAffinity = 7; - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - optional string loadBalancerIP = 8; - - // If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md - repeated string loadBalancerSourceRanges = 9; -} - -// ServiceStatus represents the current status of a service. -message ServiceStatus { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - optional LoadBalancerStatus loadBalancer = 1; -} - -// TCPSocketAction describes an action based on opening a socket -message TCPSocketAction { - // Number or name of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1; -} - -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. -message Taint { - // Required. The taint key to be applied to a node. - optional string key = 1; - - // Required. The taint value corresponding to the taint key. - optional string value = 2; - - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. - optional string effect = 3; -} - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple <key,value,effect> using the matching operator <operator>. -message Toleration { - // Required. Key is the taint key that the toleration applies to. - optional string key = 1; - - // operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - optional string operator = 2; - - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - optional string value = 3; - - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. - optional string effect = 4; -} - -// Volume represents a named volume in a pod that may be accessed by any container in the pod. -message Volume { - // Volume's name. - // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - optional string name = 1; - - // VolumeSource represents the location and type of the mounted volume. - // If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - optional VolumeSource volumeSource = 2; -} - -// VolumeMount describes a mounting of a Volume within a container. -message VolumeMount { - // This must match the Name of a Volume. - optional string name = 1; - - // Mounted read-only if true, read-write otherwise (false or unspecified). - // Defaults to false. - optional bool readOnly = 2; - - // Path within the container at which the volume should be mounted. Must - // not contain ':'. - optional string mountPath = 3; - - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - optional string subPath = 4; -} - -// Represents the source of a volume to mount. -// Only one of its members may be specified. -message VolumeSource { - // HostPath represents a pre-existing file or directory on the host - // machine that is directly exposed to the container. This is generally - // used for system agents or other privileged things that are allowed - // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - optional HostPathVolumeSource hostPath = 1; - - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#emptydir - optional EmptyDirVolumeSource emptyDir = 2; - - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; - - // GitRepo represents a git repository at a particular revision. - optional GitRepoVolumeSource gitRepo = 5; - - // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#secrets - optional SecretVolumeSource secret = 6; - - // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - optional NFSVolumeSource nfs = 7; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/examples/iscsi/README.md - optional ISCSIVolumeSource iscsi = 8; - - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md - optional GlusterfsVolumeSource glusterfs = 9; - - // PersistentVolumeClaimVolumeSource represents a reference to a - // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md - optional RBDVolumeSource rbd = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using a exec based plugin. This is an - // alpha feature and may change in future. - optional FlexVolumeSource flexVolume = 12; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - optional CinderVolumeSource cinder = 13; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - optional CephFSVolumeSource cephfs = 14; - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - optional FlockerVolumeSource flocker = 15; - - // DownwardAPI represents downward API about the pod that should populate this volume - optional DownwardAPIVolumeSource downwardAPI = 16; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - optional FCVolumeSource fc = 17; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - optional AzureFileVolumeSource azureFile = 18; - - // ConfigMap represents a configMap that should populate this volume - optional ConfigMapVolumeSource configMap = 19; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; -} - -// Represents a vSphere volume resource. -message VsphereVirtualDiskVolumeSource { - // Path that identifies vSphere volume vmdk - optional string volumePath = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 2; -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -message WeightedPodAffinityTerm { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - optional int32 weight = 1; - - // Required. A pod affinity term, associated with the corresponding weight. - optional PodAffinityTerm podAffinityTerm = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go b/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go deleted file mode 100644 index 935bd973b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/meta.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/types" -) - -func (obj *ObjectMeta) GetObjectMeta() meta.Object { return obj } - -// Namespace implements meta.Object for any object with an ObjectMeta typed field. Allows -// fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } -func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } -func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } -func (meta *ObjectMeta) GetCreationTimestamp() unversioned.Time { return meta.CreationTimestamp } -func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp unversioned.Time) { - meta.CreationTimestamp = creationTimestamp -} -func (meta *ObjectMeta) GetDeletionTimestamp() *unversioned.Time { return meta.DeletionTimestamp } -func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *unversioned.Time) { - meta.DeletionTimestamp = deletionTimestamp -} -func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } -func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } -func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } -func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } -func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } - -func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference { - ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences)) - for i := 0; i < len(meta.OwnerReferences); i++ { - ret[i].Kind = meta.OwnerReferences[i].Kind - ret[i].Name = meta.OwnerReferences[i].Name - ret[i].UID = meta.OwnerReferences[i].UID - ret[i].APIVersion = meta.OwnerReferences[i].APIVersion - if meta.OwnerReferences[i].Controller != nil { - value := *meta.OwnerReferences[i].Controller - ret[i].Controller = &value - } - } - return ret -} - -func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) { - newReferences := make([]OwnerReference, len(references)) - for i := 0; i < len(references); i++ { - newReferences[i].Kind = references[i].Kind - newReferences[i].Name = references[i].Name - newReferences[i].UID = references[i].UID - newReferences[i].APIVersion = references[i].APIVersion - if references[i].Controller != nil { - value := *references[i].Controller - newReferences[i].Controller = &value - } - } - meta.OwnerReferences = newReferences -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/api/v1/register.go deleted file mode 100644 index bf07189ee..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) - addConversionFuncs(scheme) - addDefaultingFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationController{}, - &ReplicationControllerList{}, - &Service{}, - &ServiceProxyOptions{}, - &ServiceList{}, - &Endpoints{}, - &EndpointsList{}, - &Node{}, - &NodeList{}, - &NodeProxyOptions{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &Secret{}, - &SecretList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &DeleteOptions{}, - &ExportOptions{}, - &ListOptions{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - // Add common types - scheme.AddKnownTypes(SchemeGroupVersion, &unversioned.Status{}) - - // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go deleted file mode 100644 index 1b6c236db..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.generated.go +++ /dev/null @@ -1,59933 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_resource "k8s.io/kubernetes/pkg/api/resource" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg5_runtime "k8s.io/kubernetes/pkg/runtime" - pkg1_types "k8s.io/kubernetes/pkg/types" - pkg4_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_resource.Quantity - var v1 pkg2_unversioned.Time - var v2 pkg5_runtime.RawExtension - var v3 pkg1_types.UID - var v4 pkg4_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ObjectMeta) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.GenerateName != "" - yyq2[2] = x.Namespace != "" - yyq2[3] = x.SelfLink != "" - yyq2[4] = x.UID != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.Generation != 0 - yyq2[7] = true - yyq2[8] = x.DeletionTimestamp != nil - yyq2[9] = x.DeletionGracePeriodSeconds != nil - yyq2[10] = len(x.Labels) != 0 - yyq2[11] = len(x.Annotations) != 0 - yyq2[12] = len(x.OwnerReferences) != 0 - yyq2[13] = len(x.Finalizers) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generateName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.GenerateName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selfLink")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SelfLink)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("generation")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.Generation)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.CreationTimestamp - yym26 := z.EncBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.EncExt(yy25) { - } else if yym26 { - z.EncBinaryMarshal(yy25) - } else if !yym26 && z.IsJSONHandle() { - z.EncJSONMarshal(yy25) - } else { - z.EncFallback(yy25) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("creationTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.CreationTimestamp - yym28 := z.EncBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.EncExt(yy27) { - } else if yym28 { - z.EncBinaryMarshal(yy27) - } else if !yym28 && z.IsJSONHandle() { - z.EncJSONMarshal(yy27) - } else { - z.EncFallback(yy27) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym30 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym30 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionTimestamp == nil { - r.EncodeNil() - } else { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(x.DeletionTimestamp) { - } else if yym31 { - z.EncBinaryMarshal(x.DeletionTimestamp) - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(x.DeletionTimestamp) - } else { - z.EncFallback(x.DeletionTimestamp) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy33 := *x.DeletionGracePeriodSeconds - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeInt(int64(yy33)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletionGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeletionGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy35 := *x.DeletionGracePeriodSeconds - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(yy35)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.Labels == nil { - r.EncodeNil() - } else { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Labels == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - z.F.EncMapStringStringV(x.Labels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Annotations == nil { - r.EncodeNil() - } else { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("annotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Annotations == nil { - r.EncodeNil() - } else { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - z.F.EncMapStringStringV(x.Annotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ownerReferences")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OwnerReferences == nil { - r.EncodeNil() - } else { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - h.encSliceOwnerReference(([]OwnerReference)(x.OwnerReferences), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym48 := z.EncBinary() - _ = yym48 - if false { - } else { - z.F.EncSliceStringV(x.Finalizers, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectMeta) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectMeta) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "generateName": - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "selfLink": - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "generation": - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - case "creationTimestamp": - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} - } else { - yyv11 := &x.CreationTimestamp - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if yym12 { - z.DecBinaryUnmarshal(yyv11) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - case "deletionTimestamp": - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym14 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - case "deletionGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "labels": - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv17 := &x.Labels - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecMapStringStringX(yyv17, false, d) - } - } - case "annotations": - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv19 := &x.Annotations - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecMapStringStringX(yyv19, false, d) - } - } - case "ownerReferences": - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv21 := &x.OwnerReferences - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv21), d) - } - } - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv23 := &x.Finalizers - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecSliceStringX(yyv23, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectMeta) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj25 int - var yyb25 bool - var yyhl25 bool = l >= 0 - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.GenerateName = "" - } else { - x.GenerateName = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SelfLink = "" - } else { - x.SelfLink = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Generation = 0 - } else { - x.Generation = int64(r.DecodeInt(64)) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CreationTimestamp = pkg2_unversioned.Time{} - } else { - yyv33 := &x.CreationTimestamp - yym34 := z.DecBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.DecExt(yyv33) { - } else if yym34 { - z.DecBinaryUnmarshal(yyv33) - } else if !yym34 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv33) - } else { - z.DecFallback(yyv33, false) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionTimestamp != nil { - x.DeletionTimestamp = nil - } - } else { - if x.DeletionTimestamp == nil { - x.DeletionTimestamp = new(pkg2_unversioned.Time) - } - yym36 := z.DecBinary() - _ = yym36 - if false { - } else if z.HasExtensions() && z.DecExt(x.DeletionTimestamp) { - } else if yym36 { - z.DecBinaryUnmarshal(x.DeletionTimestamp) - } else if !yym36 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.DeletionTimestamp) - } else { - z.DecFallback(x.DeletionTimestamp, false) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DeletionGracePeriodSeconds != nil { - x.DeletionGracePeriodSeconds = nil - } - } else { - if x.DeletionGracePeriodSeconds == nil { - x.DeletionGracePeriodSeconds = new(int64) - } - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - *((*int64)(x.DeletionGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Labels = nil - } else { - yyv39 := &x.Labels - yym40 := z.DecBinary() - _ = yym40 - if false { - } else { - z.F.DecMapStringStringX(yyv39, false, d) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Annotations = nil - } else { - yyv41 := &x.Annotations - yym42 := z.DecBinary() - _ = yym42 - if false { - } else { - z.F.DecMapStringStringX(yyv41, false, d) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OwnerReferences = nil - } else { - yyv43 := &x.OwnerReferences - yym44 := z.DecBinary() - _ = yym44 - if false { - } else { - h.decSliceOwnerReference((*[]OwnerReference)(yyv43), d) - } - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv45 := &x.Finalizers - yym46 := z.DecBinary() - _ = yym46 - if false { - } else { - z.F.DecSliceStringX(yyv45, false, d) - } - } - for { - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj25-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Volume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [21]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.VolumeSource.HostPath != nil && x.HostPath != nil - yyq2[2] = x.VolumeSource.EmptyDir != nil && x.EmptyDir != nil - yyq2[3] = x.VolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq2[4] = x.VolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq2[5] = x.VolumeSource.GitRepo != nil && x.GitRepo != nil - yyq2[6] = x.VolumeSource.Secret != nil && x.Secret != nil - yyq2[7] = x.VolumeSource.NFS != nil && x.NFS != nil - yyq2[8] = x.VolumeSource.ISCSI != nil && x.ISCSI != nil - yyq2[9] = x.VolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq2[10] = x.VolumeSource.PersistentVolumeClaim != nil && x.PersistentVolumeClaim != nil - yyq2[11] = x.VolumeSource.RBD != nil && x.RBD != nil - yyq2[12] = x.VolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq2[13] = x.VolumeSource.Cinder != nil && x.Cinder != nil - yyq2[14] = x.VolumeSource.CephFS != nil && x.CephFS != nil - yyq2[15] = x.VolumeSource.Flocker != nil && x.Flocker != nil - yyq2[16] = x.VolumeSource.DownwardAPI != nil && x.DownwardAPI != nil - yyq2[17] = x.VolumeSource.FC != nil && x.FC != nil - yyq2[18] = x.VolumeSource.AzureFile != nil && x.AzureFile != nil - yyq2[19] = x.VolumeSource.ConfigMap != nil && x.ConfigMap != nil - yyq2[20] = x.VolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(21) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - var yyn6 bool - if x.VolumeSource.HostPath == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn9 bool - if x.VolumeSource.EmptyDir == nil { - yyn9 = true - goto LABEL9 - } - LABEL9: - if yyr2 || yy2arr2 { - if yyn9 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn9 { - r.EncodeNil() - } else { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - } - var yyn12 bool - if x.VolumeSource.GCEPersistentDisk == nil { - yyn12 = true - goto LABEL12 - } - LABEL12: - if yyr2 || yy2arr2 { - if yyn12 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn12 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn15 bool - if x.VolumeSource.AWSElasticBlockStore == nil { - yyn15 = true - goto LABEL15 - } - LABEL15: - if yyr2 || yy2arr2 { - if yyn15 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn15 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn18 bool - if x.VolumeSource.GitRepo == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.VolumeSource.Secret == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.VolumeSource.NFS == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn27 bool - if x.VolumeSource.ISCSI == nil { - yyn27 = true - goto LABEL27 - } - LABEL27: - if yyr2 || yy2arr2 { - if yyn27 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn27 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn30 bool - if x.VolumeSource.Glusterfs == nil { - yyn30 = true - goto LABEL30 - } - LABEL30: - if yyr2 || yy2arr2 { - if yyn30 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn30 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn33 bool - if x.VolumeSource.PersistentVolumeClaim == nil { - yyn33 = true - goto LABEL33 - } - LABEL33: - if yyr2 || yy2arr2 { - if yyn33 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn33 { - r.EncodeNil() - } else { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - } - var yyn36 bool - if x.VolumeSource.RBD == nil { - yyn36 = true - goto LABEL36 - } - LABEL36: - if yyr2 || yy2arr2 { - if yyn36 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn36 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn39 bool - if x.VolumeSource.FlexVolume == nil { - yyn39 = true - goto LABEL39 - } - LABEL39: - if yyr2 || yy2arr2 { - if yyn39 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn39 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn42 bool - if x.VolumeSource.Cinder == nil { - yyn42 = true - goto LABEL42 - } - LABEL42: - if yyr2 || yy2arr2 { - if yyn42 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn42 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn45 bool - if x.VolumeSource.CephFS == nil { - yyn45 = true - goto LABEL45 - } - LABEL45: - if yyr2 || yy2arr2 { - if yyn45 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn45 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn48 bool - if x.VolumeSource.Flocker == nil { - yyn48 = true - goto LABEL48 - } - LABEL48: - if yyr2 || yy2arr2 { - if yyn48 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn48 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn51 bool - if x.VolumeSource.DownwardAPI == nil { - yyn51 = true - goto LABEL51 - } - LABEL51: - if yyr2 || yy2arr2 { - if yyn51 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn51 { - r.EncodeNil() - } else { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - } - var yyn54 bool - if x.VolumeSource.FC == nil { - yyn54 = true - goto LABEL54 - } - LABEL54: - if yyr2 || yy2arr2 { - if yyn54 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn54 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn57 bool - if x.VolumeSource.AzureFile == nil { - yyn57 = true - goto LABEL57 - } - LABEL57: - if yyr2 || yy2arr2 { - if yyn57 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn57 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn60 bool - if x.VolumeSource.ConfigMap == nil { - yyn60 = true - goto LABEL60 - } - LABEL60: - if yyr2 || yy2arr2 { - if yyn60 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn60 { - r.EncodeNil() - } else { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - } - var yyn63 bool - if x.VolumeSource.VsphereVolume == nil { - yyn63 = true - goto LABEL63 - } - LABEL63: - if yyr2 || yy2arr2 { - if yyn63 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[20] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[20] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn63 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Volume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Volume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPath": - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "flexVolume": - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Volume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj25 int - var yyb25 bool - var yyhl25 bool = l >= 0 - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - if x.VolumeSource.HostPath == nil { - x.VolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.VolumeSource.EmptyDir == nil { - x.VolumeSource.EmptyDir = new(EmptyDirVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - if x.VolumeSource.GCEPersistentDisk == nil { - x.VolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.VolumeSource.AWSElasticBlockStore == nil { - x.VolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.VolumeSource.GitRepo == nil { - x.VolumeSource.GitRepo = new(GitRepoVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - if x.VolumeSource.Secret == nil { - x.VolumeSource.Secret = new(SecretVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - if x.VolumeSource.NFS == nil { - x.VolumeSource.NFS = new(NFSVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.VolumeSource.ISCSI == nil { - x.VolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.VolumeSource.Glusterfs == nil { - x.VolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.VolumeSource.PersistentVolumeClaim == nil { - x.VolumeSource.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - if x.VolumeSource.RBD == nil { - x.VolumeSource.RBD = new(RBDVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.VolumeSource.FlexVolume == nil { - x.VolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.VolumeSource.Cinder == nil { - x.VolumeSource.Cinder = new(CinderVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.VolumeSource.CephFS == nil { - x.VolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.VolumeSource.Flocker == nil { - x.VolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.VolumeSource.DownwardAPI == nil { - x.VolumeSource.DownwardAPI = new(DownwardAPIVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - if x.VolumeSource.FC == nil { - x.VolumeSource.FC = new(FCVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.VolumeSource.AzureFile == nil { - x.VolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.VolumeSource.ConfigMap == nil { - x.VolumeSource.ConfigMap = new(ConfigMapVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - if x.VolumeSource.VsphereVolume == nil { - x.VolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj25++ - if yyhl25 { - yyb25 = yyj25 > l - } else { - yyb25 = r.CheckBreak() - } - if yyb25 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj25-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [20]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HostPath != nil - yyq2[1] = x.EmptyDir != nil - yyq2[2] = x.GCEPersistentDisk != nil - yyq2[3] = x.AWSElasticBlockStore != nil - yyq2[4] = x.GitRepo != nil - yyq2[5] = x.Secret != nil - yyq2[6] = x.NFS != nil - yyq2[7] = x.ISCSI != nil - yyq2[8] = x.Glusterfs != nil - yyq2[9] = x.PersistentVolumeClaim != nil - yyq2[10] = x.RBD != nil - yyq2[11] = x.FlexVolume != nil - yyq2[12] = x.Cinder != nil - yyq2[13] = x.CephFS != nil - yyq2[14] = x.Flocker != nil - yyq2[15] = x.DownwardAPI != nil - yyq2[16] = x.FC != nil - yyq2[17] = x.AzureFile != nil - yyq2[18] = x.ConfigMap != nil - yyq2[19] = x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(20) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("emptyDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.EmptyDir == nil { - r.EncodeNil() - } else { - x.EmptyDir.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gitRepo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GitRepo == nil { - r.EncodeNil() - } else { - x.GitRepo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secret")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secret == nil { - r.EncodeNil() - } else { - x.Secret.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeClaim")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PersistentVolumeClaim == nil { - r.EncodeNil() - } else { - x.PersistentVolumeClaim.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("downwardAPI")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DownwardAPI == nil { - r.EncodeNil() - } else { - x.DownwardAPI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMap")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMap == nil { - r.EncodeNil() - } else { - x.ConfigMap.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "emptyDir": - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "gitRepo": - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - case "secret": - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "persistentVolumeClaim": - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "downwardAPI": - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "configMap": - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj24 int - var yyb24 bool - var yyhl24 bool = l >= 0 - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.EmptyDir != nil { - x.EmptyDir = nil - } - } else { - if x.EmptyDir == nil { - x.EmptyDir = new(EmptyDirVolumeSource) - } - x.EmptyDir.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GitRepo != nil { - x.GitRepo = nil - } - } else { - if x.GitRepo == nil { - x.GitRepo = new(GitRepoVolumeSource) - } - x.GitRepo.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Secret != nil { - x.Secret = nil - } - } else { - if x.Secret == nil { - x.Secret = new(SecretVolumeSource) - } - x.Secret.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PersistentVolumeClaim != nil { - x.PersistentVolumeClaim = nil - } - } else { - if x.PersistentVolumeClaim == nil { - x.PersistentVolumeClaim = new(PersistentVolumeClaimVolumeSource) - } - x.PersistentVolumeClaim.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.DownwardAPI != nil { - x.DownwardAPI = nil - } - } else { - if x.DownwardAPI == nil { - x.DownwardAPI = new(DownwardAPIVolumeSource) - } - x.DownwardAPI.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMap != nil { - x.ConfigMap = nil - } - } else { - if x.ConfigMap == nil { - x.ConfigMap = new(ConfigMapVolumeSource) - } - x.ConfigMap.CodecDecodeSelf(d) - } - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj24++ - if yyhl24 { - yyb24 = yyj24 > l - } else { - yyb24 = r.CheckBreak() - } - if yyb24 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj24-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClaimName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "claimName": - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - x.ClaimName = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClaimName = "" - } else { - x.ClaimName = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.GCEPersistentDisk != nil - yyq2[1] = x.AWSElasticBlockStore != nil - yyq2[2] = x.HostPath != nil - yyq2[3] = x.Glusterfs != nil - yyq2[4] = x.NFS != nil - yyq2[5] = x.RBD != nil - yyq2[6] = x.ISCSI != nil - yyq2[7] = x.Cinder != nil - yyq2[8] = x.CephFS != nil - yyq2[9] = x.FC != nil - yyq2[10] = x.Flocker != nil - yyq2[11] = x.FlexVolume != nil - yyq2[12] = x.AzureFile != nil - yyq2[13] = x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "gcePersistentDisk": - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "iscsi": - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "cinder": - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "flexVolume": - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "azureFile": - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [18]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Capacity) != 0 - yyq2[1] = len(x.AccessModes) != 0 - yyq2[2] = x.ClaimRef != nil - yyq2[3] = x.PersistentVolumeReclaimPolicy != "" - yyq2[4] = x.PersistentVolumeSource.GCEPersistentDisk != nil && x.GCEPersistentDisk != nil - yyq2[5] = x.PersistentVolumeSource.AWSElasticBlockStore != nil && x.AWSElasticBlockStore != nil - yyq2[6] = x.PersistentVolumeSource.HostPath != nil && x.HostPath != nil - yyq2[7] = x.PersistentVolumeSource.Glusterfs != nil && x.Glusterfs != nil - yyq2[8] = x.PersistentVolumeSource.NFS != nil && x.NFS != nil - yyq2[9] = x.PersistentVolumeSource.RBD != nil && x.RBD != nil - yyq2[10] = x.PersistentVolumeSource.ISCSI != nil && x.ISCSI != nil - yyq2[11] = x.PersistentVolumeSource.Cinder != nil && x.Cinder != nil - yyq2[12] = x.PersistentVolumeSource.CephFS != nil && x.CephFS != nil - yyq2[13] = x.PersistentVolumeSource.FC != nil && x.FC != nil - yyq2[14] = x.PersistentVolumeSource.Flocker != nil && x.Flocker != nil - yyq2[15] = x.PersistentVolumeSource.FlexVolume != nil && x.FlexVolume != nil - yyq2[16] = x.PersistentVolumeSource.AzureFile != nil && x.AzureFile != nil - yyq2[17] = x.PersistentVolumeSource.VsphereVolume != nil && x.VsphereVolume != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(18) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("claimRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ClaimRef == nil { - r.EncodeNil() - } else { - x.ClaimRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persistentVolumeReclaimPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.PersistentVolumeReclaimPolicy.CodecEncodeSelf(e) - } - } - var yyn15 bool - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - yyn15 = true - goto LABEL15 - } - LABEL15: - if yyr2 || yy2arr2 { - if yyn15 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gcePersistentDisk")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn15 { - r.EncodeNil() - } else { - if x.GCEPersistentDisk == nil { - r.EncodeNil() - } else { - x.GCEPersistentDisk.CodecEncodeSelf(e) - } - } - } - } - var yyn18 bool - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("awsElasticBlockStore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.AWSElasticBlockStore == nil { - r.EncodeNil() - } else { - x.AWSElasticBlockStore.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.PersistentVolumeSource.HostPath == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.HostPath == nil { - r.EncodeNil() - } else { - x.HostPath.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.PersistentVolumeSource.Glusterfs == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("glusterfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.Glusterfs == nil { - r.EncodeNil() - } else { - x.Glusterfs.CodecEncodeSelf(e) - } - } - } - } - var yyn27 bool - if x.PersistentVolumeSource.NFS == nil { - yyn27 = true - goto LABEL27 - } - LABEL27: - if yyr2 || yy2arr2 { - if yyn27 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn27 { - r.EncodeNil() - } else { - if x.NFS == nil { - r.EncodeNil() - } else { - x.NFS.CodecEncodeSelf(e) - } - } - } - } - var yyn30 bool - if x.PersistentVolumeSource.RBD == nil { - yyn30 = true - goto LABEL30 - } - LABEL30: - if yyr2 || yy2arr2 { - if yyn30 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rbd")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn30 { - r.EncodeNil() - } else { - if x.RBD == nil { - r.EncodeNil() - } else { - x.RBD.CodecEncodeSelf(e) - } - } - } - } - var yyn33 bool - if x.PersistentVolumeSource.ISCSI == nil { - yyn33 = true - goto LABEL33 - } - LABEL33: - if yyr2 || yy2arr2 { - if yyn33 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsi")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn33 { - r.EncodeNil() - } else { - if x.ISCSI == nil { - r.EncodeNil() - } else { - x.ISCSI.CodecEncodeSelf(e) - } - } - } - } - var yyn36 bool - if x.PersistentVolumeSource.Cinder == nil { - yyn36 = true - goto LABEL36 - } - LABEL36: - if yyr2 || yy2arr2 { - if yyn36 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cinder")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn36 { - r.EncodeNil() - } else { - if x.Cinder == nil { - r.EncodeNil() - } else { - x.Cinder.CodecEncodeSelf(e) - } - } - } - } - var yyn39 bool - if x.PersistentVolumeSource.CephFS == nil { - yyn39 = true - goto LABEL39 - } - LABEL39: - if yyr2 || yy2arr2 { - if yyn39 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cephfs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn39 { - r.EncodeNil() - } else { - if x.CephFS == nil { - r.EncodeNil() - } else { - x.CephFS.CodecEncodeSelf(e) - } - } - } - } - var yyn42 bool - if x.PersistentVolumeSource.FC == nil { - yyn42 = true - goto LABEL42 - } - LABEL42: - if yyr2 || yy2arr2 { - if yyn42 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fc")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn42 { - r.EncodeNil() - } else { - if x.FC == nil { - r.EncodeNil() - } else { - x.FC.CodecEncodeSelf(e) - } - } - } - } - var yyn45 bool - if x.PersistentVolumeSource.Flocker == nil { - yyn45 = true - goto LABEL45 - } - LABEL45: - if yyr2 || yy2arr2 { - if yyn45 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flocker")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn45 { - r.EncodeNil() - } else { - if x.Flocker == nil { - r.EncodeNil() - } else { - x.Flocker.CodecEncodeSelf(e) - } - } - } - } - var yyn48 bool - if x.PersistentVolumeSource.FlexVolume == nil { - yyn48 = true - goto LABEL48 - } - LABEL48: - if yyr2 || yy2arr2 { - if yyn48 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn48 { - r.EncodeNil() - } else { - if x.FlexVolume == nil { - r.EncodeNil() - } else { - x.FlexVolume.CodecEncodeSelf(e) - } - } - } - } - var yyn51 bool - if x.PersistentVolumeSource.AzureFile == nil { - yyn51 = true - goto LABEL51 - } - LABEL51: - if yyr2 || yy2arr2 { - if yyn51 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("azureFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn51 { - r.EncodeNil() - } else { - if x.AzureFile == nil { - r.EncodeNil() - } else { - x.AzureFile.CodecEncodeSelf(e) - } - } - } - } - var yyn54 bool - if x.PersistentVolumeSource.VsphereVolume == nil { - yyn54 = true - goto LABEL54 - } - LABEL54: - if yyr2 || yy2arr2 { - if yyn54 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("vsphereVolume")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn54 { - r.EncodeNil() - } else { - if x.VsphereVolume == nil { - r.EncodeNil() - } else { - x.VsphereVolume.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv5 := &x.AccessModes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) - } - } - case "claimRef": - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - case "persistentVolumeReclaimPolicy": - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) - } - case "gcePersistentDisk": - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - case "awsElasticBlockStore": - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - case "hostPath": - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - case "glusterfs": - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - case "nfs": - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - case "rbd": - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - case "iscsi": - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - case "cinder": - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - case "cephfs": - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - case "fc": - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - case "flocker": - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - case "flexVolume": - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - case "azureFile": - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - case "vsphereVolume": - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv24 := &x.Capacity - yyv24.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv25 := &x.AccessModes - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv25), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ClaimRef != nil { - x.ClaimRef = nil - } - } else { - if x.ClaimRef == nil { - x.ClaimRef = new(ObjectReference) - } - x.ClaimRef.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PersistentVolumeReclaimPolicy = "" - } else { - x.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(r.DecodeString()) - } - if x.PersistentVolumeSource.GCEPersistentDisk == nil { - x.PersistentVolumeSource.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GCEPersistentDisk != nil { - x.GCEPersistentDisk = nil - } - } else { - if x.GCEPersistentDisk == nil { - x.GCEPersistentDisk = new(GCEPersistentDiskVolumeSource) - } - x.GCEPersistentDisk.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AWSElasticBlockStore == nil { - x.PersistentVolumeSource.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AWSElasticBlockStore != nil { - x.AWSElasticBlockStore = nil - } - } else { - if x.AWSElasticBlockStore == nil { - x.AWSElasticBlockStore = new(AWSElasticBlockStoreVolumeSource) - } - x.AWSElasticBlockStore.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.HostPath == nil { - x.PersistentVolumeSource.HostPath = new(HostPathVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HostPath != nil { - x.HostPath = nil - } - } else { - if x.HostPath == nil { - x.HostPath = new(HostPathVolumeSource) - } - x.HostPath.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Glusterfs == nil { - x.PersistentVolumeSource.Glusterfs = new(GlusterfsVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Glusterfs != nil { - x.Glusterfs = nil - } - } else { - if x.Glusterfs == nil { - x.Glusterfs = new(GlusterfsVolumeSource) - } - x.Glusterfs.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.NFS == nil { - x.PersistentVolumeSource.NFS = new(NFSVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NFS != nil { - x.NFS = nil - } - } else { - if x.NFS == nil { - x.NFS = new(NFSVolumeSource) - } - x.NFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.RBD == nil { - x.PersistentVolumeSource.RBD = new(RBDVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RBD != nil { - x.RBD = nil - } - } else { - if x.RBD == nil { - x.RBD = new(RBDVolumeSource) - } - x.RBD.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.ISCSI == nil { - x.PersistentVolumeSource.ISCSI = new(ISCSIVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ISCSI != nil { - x.ISCSI = nil - } - } else { - if x.ISCSI == nil { - x.ISCSI = new(ISCSIVolumeSource) - } - x.ISCSI.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Cinder == nil { - x.PersistentVolumeSource.Cinder = new(CinderVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Cinder != nil { - x.Cinder = nil - } - } else { - if x.Cinder == nil { - x.Cinder = new(CinderVolumeSource) - } - x.Cinder.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.CephFS == nil { - x.PersistentVolumeSource.CephFS = new(CephFSVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CephFS != nil { - x.CephFS = nil - } - } else { - if x.CephFS == nil { - x.CephFS = new(CephFSVolumeSource) - } - x.CephFS.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FC == nil { - x.PersistentVolumeSource.FC = new(FCVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FC != nil { - x.FC = nil - } - } else { - if x.FC == nil { - x.FC = new(FCVolumeSource) - } - x.FC.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.Flocker == nil { - x.PersistentVolumeSource.Flocker = new(FlockerVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Flocker != nil { - x.Flocker = nil - } - } else { - if x.Flocker == nil { - x.Flocker = new(FlockerVolumeSource) - } - x.Flocker.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.FlexVolume == nil { - x.PersistentVolumeSource.FlexVolume = new(FlexVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FlexVolume != nil { - x.FlexVolume = nil - } - } else { - if x.FlexVolume == nil { - x.FlexVolume = new(FlexVolumeSource) - } - x.FlexVolume.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.AzureFile == nil { - x.PersistentVolumeSource.AzureFile = new(AzureFileVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AzureFile != nil { - x.AzureFile = nil - } - } else { - if x.AzureFile == nil { - x.AzureFile = new(AzureFileVolumeSource) - } - x.AzureFile.CodecDecodeSelf(d) - } - if x.PersistentVolumeSource.VsphereVolume == nil { - x.PersistentVolumeSource.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.VsphereVolume != nil { - x.VsphereVolume = nil - } - } else { - if x.VsphereVolume == nil { - x.VsphereVolume = new(VsphereVirtualDiskVolumeSource) - } - x.VsphereVolume.CodecDecodeSelf(d) - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeReclaimPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeReclaimPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PersistentVolumeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = x.Message != "" - yyq2[2] = x.Reason != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumePhase(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePersistentVolume(([]PersistentVolume)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePersistentVolume((*[]PersistentVolume)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaim) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaim) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaim) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PersistentVolumeClaimSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PersistentVolumeClaimStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePersistentVolumeClaim(([]PersistentVolumeClaim)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePersistentVolumeClaim((*[]PersistentVolumeClaim)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.AccessModes) != 0 - yyq2[1] = x.Selector != nil - yyq2[2] = true - yyq2[3] = x.VolumeName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Resources - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Resources - yy12.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv4 := &x.AccessModes - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv4), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv8 := &x.Resources - yyv8.CodecDecodeSelf(d) - } - case "volumeName": - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - x.VolumeName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv11 := &x.AccessModes - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv11), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv15 := &x.Resources - yyv15.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeName = "" - } else { - x.VolumeName = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeClaimStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = len(x.AccessModes) != 0 - yyq2[2] = len(x.Capacity) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("accessModes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AccessModes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePersistentVolumeAccessMode(([]PersistentVolumeAccessMode)(x.AccessModes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeClaimStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) - } - case "accessModes": - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv5 := &x.AccessModes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv5), d) - } - } - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv7 := &x.Capacity - yyv7.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeClaimStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PersistentVolumeClaimPhase(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AccessModes = nil - } else { - yyv10 := &x.AccessModes - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - h.decSlicePersistentVolumeAccessMode((*[]PersistentVolumeAccessMode)(yyv10), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv12 := &x.Capacity - yyv12.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PersistentVolumeAccessMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeAccessMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PersistentVolumeClaimPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PersistentVolumeClaimPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *HostPathVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPathVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPathVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EmptyDirVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Medium != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Medium.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("medium")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Medium.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EmptyDirVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "medium": - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - x.Medium = StorageMedium(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EmptyDirVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Medium = "" - } else { - x.Medium = StorageMedium(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GlusterfsVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("endpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EndpointsName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GlusterfsVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "endpoints": - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - x.EndpointsName = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GlusterfsVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EndpointsName = "" - } else { - x.EndpointsName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RBDVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.FSType != "" - yyq2[3] = x.RBDPool != "" - yyq2[4] = x.RadosUser != "" - yyq2[5] = x.Keyring != "" - yyq2[6] = x.SecretRef != nil - yyq2[7] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CephMonitors == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.CephMonitors, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pool")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RBDPool)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RadosUser)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("keyring")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Keyring)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RBDVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RBDVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "monitors": - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv4 := &x.CephMonitors - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "image": - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - x.RBDImage = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "pool": - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - x.RBDPool = string(r.DecodeString()) - } - case "user": - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - x.RadosUser = string(r.DecodeString()) - } - case "keyring": - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - x.Keyring = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RBDVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CephMonitors = nil - } else { - yyv14 := &x.CephMonitors - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - z.F.DecSliceStringX(yyv14, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDImage = "" - } else { - x.RBDImage = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RBDPool = "" - } else { - x.RBDPool = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RadosUser = "" - } else { - x.RadosUser = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Keyring = "" - } else { - x.Keyring = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CinderVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CinderVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CinderVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CinderVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CephFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Path != "" - yyq2[2] = x.User != "" - yyq2[3] = x.SecretFile != "" - yyq2[4] = x.SecretRef != nil - yyq2[5] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("monitors")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Monitors == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Monitors, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretFile)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CephFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "monitors": - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv4 := &x.Monitors - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "secretFile": - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - x.SecretFile = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CephFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Monitors = nil - } else { - yyv12 := &x.Monitors - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretFile = "" - } else { - x.SecretFile = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlockerVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("datasetName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DatasetName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlockerVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "datasetName": - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - x.DatasetName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlockerVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DatasetName = "" - } else { - x.DatasetName = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x StorageMedium) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *StorageMedium) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Protocol) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Protocol) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.Partition != 0 - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pdName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PDName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GCEPersistentDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "pdName": - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - x.PDName = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GCEPersistentDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PDName = "" - } else { - x.PDName = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FlexVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.SecretRef != nil - yyq2[3] = x.ReadOnly != false - yyq2[4] = len(x.Options) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("driver")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Driver)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Options == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("options")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Options == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncMapStringStringV(x.Options, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FlexVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FlexVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "driver": - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - x.Driver = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "options": - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv8 := &x.Options - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecMapStringStringX(yyv8, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FlexVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Driver = "" - } else { - x.Driver = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Options = nil - } else { - yyv15 := &x.Options - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecMapStringStringX(yyv15, false, d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - yyq2[2] = x.Partition != 0 - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumeID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("partition")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Partition)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumeID": - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "partition": - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AWSElasticBlockStoreVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeID = "" - } else { - x.VolumeID = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Partition = 0 - } else { - x.Partition = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *GitRepoVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Revision != "" - yyq2[2] = x.Directory != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("repository")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Repository)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("directory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Directory)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *GitRepoVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "repository": - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - x.Repository = string(r.DecodeString()) - } - case "revision": - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - x.Revision = string(r.DecodeString()) - } - case "directory": - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - x.Directory = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *GitRepoVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Repository = "" - } else { - x.Repository = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = "" - } else { - x.Revision = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Directory = "" - } else { - x.Directory = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.SecretName != "" - yyq2[1] = len(x.Items) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Items == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv5 := &x.Items - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv9 := &x.Items - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NFSVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("server")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Server)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NFSVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NFSVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "server": - if r.TryDecodeAsNil() { - x.Server = "" - } else { - x.Server = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NFSVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Server = "" - } else { - x.Server = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ISCSIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[3] = x.ISCSIInterface != "" - yyq2[4] = x.FSType != "" - yyq2[5] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPortal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetPortal)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iqn")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IQN)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Lun)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iscsiInterface")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ISCSIInterface)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ISCSIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetPortal": - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - x.TargetPortal = string(r.DecodeString()) - } - case "iqn": - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - x.IQN = string(r.DecodeString()) - } - case "lun": - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - x.Lun = int32(r.DecodeInt(32)) - } - case "iscsiInterface": - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - x.ISCSIInterface = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ISCSIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPortal = "" - } else { - x.TargetPortal = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IQN = "" - } else { - x.IQN = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Lun = 0 - } else { - x.Lun = int32(r.DecodeInt(32)) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ISCSIInterface = "" - } else { - x.ISCSIInterface = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FCVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.FSType != "" - yyq2[3] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetWWNs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetWWNs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.TargetWWNs, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy7 := *x.Lun - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lun")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lun == nil { - r.EncodeNil() - } else { - yy9 := *x.Lun - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FCVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FCVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetWWNs": - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv4 := &x.TargetWWNs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "lun": - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FCVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetWWNs = nil - } else { - yyv11 := &x.TargetWWNs - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lun != nil { - x.Lun = nil - } - } else { - if x.Lun == nil { - x.Lun = new(int32) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int32)(x.Lun)) = int32(r.DecodeInt(32)) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *AzureFileVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.ReadOnly != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("shareName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ShareName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AzureFileVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - case "shareName": - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - x.ShareName = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AzureFileVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ShareName = "" - } else { - x.ShareName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FSType != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FSType)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumePath": - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - x.VolumePath = string(r.DecodeString()) - } - case "fsType": - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VsphereVirtualDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumePath = "" - } else { - x.VolumePath = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSType = "" - } else { - x.FSType = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - yyq2[1] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceKeyToPath(([]KeyToPath)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv4), d) - } - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv8 := &x.Items - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceKeyToPath((*[]KeyToPath)(yyv8), d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KeyToPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KeyToPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KeyToPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KeyToPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.HostPort != 0 - yyq2[3] = x.Protocol != "" - yyq2[4] = x.HostIP != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.HostPort)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ContainerPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "hostPort": - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - x.HostPort = int32(r.DecodeInt(32)) - } - case "containerPort": - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - x.ContainerPort = int32(r.DecodeInt(32)) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPort = 0 - } else { - x.HostPort = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerPort = 0 - } else { - x.ContainerPort = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeMount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.ReadOnly != false - yyq2[3] = x.SubPath != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnly")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.ReadOnly)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mountPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MountPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SubPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeMount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeMount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "readOnly": - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - case "mountPath": - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - x.MountPath = string(r.DecodeString()) - } - case "subPath": - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - x.SubPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeMount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnly = false - } else { - x.ReadOnly = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MountPath = "" - } else { - x.MountPath = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SubPath = "" - } else { - x.SubPath = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVar) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Value != "" - yyq2[2] = x.ValueFrom != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("valueFrom")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ValueFrom == nil { - r.EncodeNil() - } else { - x.ValueFrom.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVar) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVar) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "valueFrom": - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVar) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ValueFrom != nil { - x.ValueFrom = nil - } - } else { - if x.ValueFrom == nil { - x.ValueFrom = new(EnvVarSource) - } - x.ValueFrom.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EnvVarSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.FieldRef != nil - yyq2[1] = x.ResourceFieldRef != nil - yyq2[2] = x.ConfigMapKeyRef != nil - yyq2[3] = x.SecretKeyRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configMapKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ConfigMapKeyRef == nil { - r.EncodeNil() - } else { - x.ConfigMapKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretKeyRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretKeyRef == nil { - r.EncodeNil() - } else { - x.SecretKeyRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EnvVarSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EnvVarSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - case "configMapKeyRef": - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - case "secretKeyRef": - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EnvVarSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ConfigMapKeyRef != nil { - x.ConfigMapKeyRef = nil - } - } else { - if x.ConfigMapKeyRef == nil { - x.ConfigMapKeyRef = new(ConfigMapKeySelector) - } - x.ConfigMapKeyRef.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretKeyRef != nil { - x.SecretKeyRef = nil - } - } else { - if x.SecretKeyRef == nil { - x.SecretKeyRef = new(SecretKeySelector) - } - x.SecretKeyRef.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceFieldSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ContainerName != "" - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Divisor - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("divisor")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Divisor - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceFieldSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "containerName": - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - x.ContainerName = string(r.DecodeString()) - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - case "divisor": - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv6 := &x.Divisor - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceFieldSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerName = "" - } else { - x.ContainerName = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Divisor = pkg3_resource.Quantity{} - } else { - yyv11 := &x.Divisor - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecretKeySelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretKeySelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretKeySelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretKeySelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPHeader) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPHeader) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPHeader) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPHeader) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPGetAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[2] = x.Host != "" - yyq2[3] = x.Scheme != "" - yyq2[4] = len(x.HTTPHeaders) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Port - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Port - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Scheme.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scheme")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Scheme.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpHeaders")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPHeaders == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceHTTPHeader(([]HTTPHeader)(x.HTTPHeaders), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPGetAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPGetAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv5 := &x.Port - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - case "scheme": - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - x.Scheme = URIScheme(r.DecodeString()) - } - case "httpHeaders": - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv9 := &x.HTTPHeaders - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv9), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPGetAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv13 := &x.Port - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scheme = "" - } else { - x.Scheme = URIScheme(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HTTPHeaders = nil - } else { - yyv17 := &x.HTTPHeaders - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceHTTPHeader((*[]HTTPHeader)(yyv17), d) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x URIScheme) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *URIScheme) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *TCPSocketAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Port - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Port - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TCPSocketAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TCPSocketAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "port": - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv4 := &x.Port - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TCPSocketAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = pkg4_intstr.IntOrString{} - } else { - yyv7 := &x.Port - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExecAction) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Command) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Command == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExecAction) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExecAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv4 := &x.Command - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExecAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv7 := &x.Command - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecSliceStringX(yyv7, false, d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Probe) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.InitialDelaySeconds != 0 - yyq2[1] = x.TimeoutSeconds != 0 - yyq2[2] = x.PeriodSeconds != 0 - yyq2[3] = x.SuccessThreshold != 0 - yyq2[4] = x.FailureThreshold != 0 - yyq2[5] = x.Handler.Exec != nil && x.Exec != nil - yyq2[6] = x.Handler.HTTPGet != nil && x.HTTPGet != nil - yyq2[7] = x.Handler.TCPSocket != nil && x.TCPSocket != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("initialDelaySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.InitialDelaySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.TimeoutSeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("periodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.PeriodSeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("successThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.SuccessThreshold)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failureThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.FailureThreshold)) - } - } - } - var yyn18 bool - if x.Handler.Exec == nil { - yyn18 = true - goto LABEL18 - } - LABEL18: - if yyr2 || yy2arr2 { - if yyn18 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn18 { - r.EncodeNil() - } else { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - } - var yyn21 bool - if x.Handler.HTTPGet == nil { - yyn21 = true - goto LABEL21 - } - LABEL21: - if yyr2 || yy2arr2 { - if yyn21 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn21 { - r.EncodeNil() - } else { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - } - var yyn24 bool - if x.Handler.TCPSocket == nil { - yyn24 = true - goto LABEL24 - } - LABEL24: - if yyr2 || yy2arr2 { - if yyn24 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn24 { - r.EncodeNil() - } else { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Probe) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Probe) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "initialDelaySeconds": - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) - } - case "periodSeconds": - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) - } - case "successThreshold": - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) - } - case "failureThreshold": - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - x.FailureThreshold = int32(r.DecodeInt(32)) - } - case "exec": - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Probe) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InitialDelaySeconds = 0 - } else { - x.InitialDelaySeconds = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TimeoutSeconds = 0 - } else { - x.TimeoutSeconds = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PeriodSeconds = 0 - } else { - x.PeriodSeconds = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SuccessThreshold = 0 - } else { - x.SuccessThreshold = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FailureThreshold = 0 - } else { - x.FailureThreshold = int32(r.DecodeInt(32)) - } - if x.Handler.Exec == nil { - x.Handler.Exec = new(ExecAction) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - if x.Handler.HTTPGet == nil { - x.Handler.HTTPGet = new(HTTPGetAction) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - if x.Handler.TCPSocket == nil { - x.Handler.TCPSocket = new(TCPSocketAction) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PullPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PullPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x Capability) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *Capability) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Capabilities) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Add) != 0 - yyq2[1] = len(x.Drop) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Add == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("add")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Add == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Add), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Drop == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("drop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Drop == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceCapability(([]Capability)(x.Drop), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Capabilities) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Capabilities) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "add": - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv4 := &x.Add - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv4), d) - } - } - case "drop": - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv6 := &x.Drop - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Capabilities) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Add = nil - } else { - yyv9 := &x.Add - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Drop = nil - } else { - yyv11 := &x.Drop - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceCapability((*[]Capability)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceRequirements) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Limits) != 0 - yyq2[1] = len(x.Requests) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - x.Limits.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requests")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Requests == nil { - r.EncodeNil() - } else { - x.Requests.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceRequirements) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceRequirements) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4 := &x.Limits - yyv4.CodecDecodeSelf(d) - } - case "requests": - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv5 := &x.Requests - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceRequirements) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv7 := &x.Limits - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Requests = nil - } else { - yyv8 := &x.Requests - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Container) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [18]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Image != "" - yyq2[2] = len(x.Command) != 0 - yyq2[3] = len(x.Args) != 0 - yyq2[4] = x.WorkingDir != "" - yyq2[5] = len(x.Ports) != 0 - yyq2[6] = len(x.Env) != 0 - yyq2[7] = true - yyq2[8] = len(x.VolumeMounts) != 0 - yyq2[9] = x.LivenessProbe != nil - yyq2[10] = x.ReadinessProbe != nil - yyq2[11] = x.Lifecycle != nil - yyq2[12] = x.TerminationMessagePath != "" - yyq2[13] = x.ImagePullPolicy != "" - yyq2[14] = x.SecurityContext != nil - yyq2[15] = x.Stdin != false - yyq2[16] = x.StdinOnce != false - yyq2[17] = x.TTY != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(18) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Command == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Args == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("args")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Args == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Args, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("workingDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.WorkingDir)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceContainerPort(([]ContainerPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.Env == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("env")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Env == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceEnvVar(([]EnvVar)(x.Env), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yy25 := &x.Resources - yy25.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.Resources - yy27.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeMounts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeMounts == nil { - r.EncodeNil() - } else { - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - h.encSliceVolumeMount(([]VolumeMount)(x.VolumeMounts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("livenessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LivenessProbe == nil { - r.EncodeNil() - } else { - x.LivenessProbe.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readinessProbe")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadinessProbe == nil { - r.EncodeNil() - } else { - x.ReadinessProbe.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lifecycle")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Lifecycle == nil { - r.EncodeNil() - } else { - x.Lifecycle.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationMessagePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TerminationMessagePath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - x.ImagePullPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ImagePullPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym54 := z.EncBinary() - _ = yym54 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdinOnce")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeBool(bool(x.StdinOnce)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[17] { - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[17] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Container) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Container) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv6 := &x.Command - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "args": - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv8 := &x.Args - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "workingDir": - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - x.WorkingDir = string(r.DecodeString()) - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv11 := &x.Ports - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv11), d) - } - } - case "env": - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv13 := &x.Env - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv13), d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv15 := &x.Resources - yyv15.CodecDecodeSelf(d) - } - case "volumeMounts": - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv16 := &x.VolumeMounts - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv16), d) - } - } - case "livenessProbe": - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - case "readinessProbe": - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - case "lifecycle": - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - case "terminationMessagePath": - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - x.TerminationMessagePath = string(r.DecodeString()) - } - case "imagePullPolicy": - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdinOnce": - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - x.StdinOnce = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Container) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj27 int - var yyb27 bool - var yyhl27 bool = l >= 0 - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv30 := &x.Command - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - z.F.DecSliceStringX(yyv30, false, d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Args = nil - } else { - yyv32 := &x.Args - yym33 := z.DecBinary() - _ = yym33 - if false { - } else { - z.F.DecSliceStringX(yyv32, false, d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.WorkingDir = "" - } else { - x.WorkingDir = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv35 := &x.Ports - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - h.decSliceContainerPort((*[]ContainerPort)(yyv35), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Env = nil - } else { - yyv37 := &x.Env - yym38 := z.DecBinary() - _ = yym38 - if false { - } else { - h.decSliceEnvVar((*[]EnvVar)(yyv37), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = ResourceRequirements{} - } else { - yyv39 := &x.Resources - yyv39.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeMounts = nil - } else { - yyv40 := &x.VolumeMounts - yym41 := z.DecBinary() - _ = yym41 - if false { - } else { - h.decSliceVolumeMount((*[]VolumeMount)(yyv40), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LivenessProbe != nil { - x.LivenessProbe = nil - } - } else { - if x.LivenessProbe == nil { - x.LivenessProbe = new(Probe) - } - x.LivenessProbe.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadinessProbe != nil { - x.ReadinessProbe = nil - } - } else { - if x.ReadinessProbe == nil { - x.ReadinessProbe = new(Probe) - } - x.ReadinessProbe.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Lifecycle != nil { - x.Lifecycle = nil - } - } else { - if x.Lifecycle == nil { - x.Lifecycle = new(Lifecycle) - } - x.Lifecycle.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminationMessagePath = "" - } else { - x.TerminationMessagePath = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullPolicy = "" - } else { - x.ImagePullPolicy = PullPolicy(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(SecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StdinOnce = false - } else { - x.StdinOnce = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - for { - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj27-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Handler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Exec != nil - yyq2[1] = x.HTTPGet != nil - yyq2[2] = x.TCPSocket != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Exec == nil { - r.EncodeNil() - } else { - x.Exec.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpGet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTPGet == nil { - r.EncodeNil() - } else { - x.HTTPGet.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tcpSocket")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TCPSocket == nil { - r.EncodeNil() - } else { - x.TCPSocket.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Handler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Handler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exec": - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - case "httpGet": - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - case "tcpSocket": - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Handler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Exec != nil { - x.Exec = nil - } - } else { - if x.Exec == nil { - x.Exec = new(ExecAction) - } - x.Exec.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTPGet != nil { - x.HTTPGet = nil - } - } else { - if x.HTTPGet == nil { - x.HTTPGet = new(HTTPGetAction) - } - x.HTTPGet.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TCPSocket != nil { - x.TCPSocket = nil - } - } else { - if x.TCPSocket == nil { - x.TCPSocket = new(TCPSocketAction) - } - x.TCPSocket.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Lifecycle) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PostStart != nil - yyq2[1] = x.PreStop != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("postStart")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PostStart == nil { - r.EncodeNil() - } else { - x.PostStart.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preStop")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreStop == nil { - r.EncodeNil() - } else { - x.PreStop.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Lifecycle) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Lifecycle) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "postStart": - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - case "preStop": - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Lifecycle) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PostStart != nil { - x.PostStart = nil - } - } else { - if x.PostStart == nil { - x.PostStart = new(Handler) - } - x.PostStart.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PreStop != nil { - x.PreStop = nil - } - } else { - if x.PreStop == nil { - x.PreStop = new(Handler) - } - x.PreStop.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConditionStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConditionStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ContainerStateWaiting) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Reason != "" - yyq2[1] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateWaiting) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateWaiting) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateRunning) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.StartedAt - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if yym5 { - z.EncBinaryMarshal(yy4) - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.StartedAt - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if yym7 { - z.EncBinaryMarshal(yy6) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateRunning) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateRunning) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv4 := &x.StartedAt - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if yym5 { - z.DecBinaryUnmarshal(yyv4) - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateRunning) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv7 := &x.StartedAt - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if yym8 { - z.DecBinaryUnmarshal(yyv7) - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStateTerminated) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Signal != 0 - yyq2[2] = x.Reason != "" - yyq2[3] = x.Message != "" - yyq2[4] = true - yyq2[5] = true - yyq2[6] = x.ContainerID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exitCode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ExitCode)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("signal")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Signal)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy16 := &x.StartedAt - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(yy16) { - } else if yym17 { - z.EncBinaryMarshal(yy16) - } else if !yym17 && z.IsJSONHandle() { - z.EncJSONMarshal(yy16) - } else { - z.EncFallback(yy16) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy18 := &x.StartedAt - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(yy18) { - } else if yym19 { - z.EncBinaryMarshal(yy18) - } else if !yym19 && z.IsJSONHandle() { - z.EncJSONMarshal(yy18) - } else { - z.EncFallback(yy18) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy21 := &x.FinishedAt - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(yy21) { - } else if yym22 { - z.EncBinaryMarshal(yy21) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(yy21) - } else { - z.EncFallback(yy21) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finishedAt")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy23 := &x.FinishedAt - yym24 := z.EncBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.EncExt(yy23) { - } else if yym24 { - z.EncBinaryMarshal(yy23) - } else if !yym24 && z.IsJSONHandle() { - z.EncJSONMarshal(yy23) - } else { - z.EncFallback(yy23) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStateTerminated) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "exitCode": - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - x.ExitCode = int32(r.DecodeInt(32)) - } - case "signal": - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - x.Signal = int32(r.DecodeInt(32)) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "startedAt": - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv8 := &x.StartedAt - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "finishedAt": - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} - } else { - yyv10 := &x.FinishedAt - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(yyv10) { - } else if yym11 { - z.DecBinaryUnmarshal(yyv10) - } else if !yym11 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv10) - } else { - z.DecFallback(yyv10, false) - } - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStateTerminated) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExitCode = 0 - } else { - x.ExitCode = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Signal = 0 - } else { - x.Signal = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StartedAt = pkg2_unversioned.Time{} - } else { - yyv18 := &x.StartedAt - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(yyv18) { - } else if yym19 { - z.DecBinaryUnmarshal(yyv18) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv18) - } else { - z.DecFallback(yyv18, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FinishedAt = pkg2_unversioned.Time{} - } else { - yyv20 := &x.FinishedAt - yym21 := z.DecBinary() - _ = yym21 - if false { - } else if z.HasExtensions() && z.DecExt(yyv20) { - } else if yym21 { - z.DecBinaryUnmarshal(yyv20) - } else if !yym21 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv20) - } else { - z.DecFallback(yyv20, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerState) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Waiting != nil - yyq2[1] = x.Running != nil - yyq2[2] = x.Terminated != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("waiting")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Waiting == nil { - r.EncodeNil() - } else { - x.Waiting.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("running")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Running == nil { - r.EncodeNil() - } else { - x.Running.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Terminated == nil { - r.EncodeNil() - } else { - x.Terminated.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerState) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerState) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "waiting": - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - case "running": - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - case "terminated": - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerState) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Waiting != nil { - x.Waiting = nil - } - } else { - if x.Waiting == nil { - x.Waiting = new(ContainerStateWaiting) - } - x.Waiting.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Running != nil { - x.Running = nil - } - } else { - if x.Running == nil { - x.Running = new(ContainerStateRunning) - } - x.Running.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Terminated != nil { - x.Terminated = nil - } - } else { - if x.Terminated == nil { - x.Terminated = new(ContainerStateTerminated) - } - x.Terminated.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = true - yyq2[7] = x.ContainerID != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.State - yy7.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("state")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.State - yy9.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy12 := &x.LastTerminationState - yy12.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastState")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.LastTerminationState - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ready")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(x.Ready)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(x.RestartCount)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Image)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ImageID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "state": - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv5 := &x.State - yyv5.CodecDecodeSelf(d) - } - case "lastState": - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv6 := &x.LastTerminationState - yyv6.CodecDecodeSelf(d) - } - case "ready": - if r.TryDecodeAsNil() { - x.Ready = false - } else { - x.Ready = bool(r.DecodeBool()) - } - case "restartCount": - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - x.RestartCount = int32(r.DecodeInt(32)) - } - case "image": - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - case "imageID": - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - x.ImageID = string(r.DecodeString()) - } - case "containerID": - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.State = ContainerState{} - } else { - yyv14 := &x.State - yyv14.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTerminationState = ContainerState{} - } else { - yyv15 := &x.LastTerminationState - yyv15.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ready = false - } else { - x.Ready = bool(r.DecodeBool()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartCount = 0 - } else { - x.RestartCount = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Image = "" - } else { - x.Image = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageID = "" - } else { - x.ImageID = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerID = "" - } else { - x.ContainerID = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x PodPhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodPhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x PodConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *PodConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = PodConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = PodConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RestartPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RestartPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x DNSPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DNSPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelectorTerms")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelectorTerms == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNodeSelectorTerm(([]NodeSelectorTerm)(x.NodeSelectorTerms), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "nodeSelectorTerms": - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv4 := &x.NodeSelectorTerms - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelectorTerms = nil - } else { - yyv7 := &x.NodeSelectorTerms - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeSelectorTerm((*[]NodeSelectorTerm)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNodeSelectorRequirement(([]NodeSelectorRequirement)(x.MatchExpressions), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv4 := &x.MatchExpressions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv7 := &x.MatchExpressions - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeSelectorRequirement((*[]NodeSelectorRequirement)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Values) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv6 := &x.Values - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = NodeSelectorOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv11 := &x.Values - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Affinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.NodeAffinity != nil - yyq2[1] = x.PodAffinity != nil - yyq2[2] = x.PodAntiAffinity != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeAffinity == nil { - r.EncodeNil() - } else { - x.NodeAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAffinity == nil { - r.EncodeNil() - } else { - x.PodAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAntiAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodAntiAffinity == nil { - r.EncodeNil() - } else { - x.PodAntiAffinity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Affinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Affinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "nodeAffinity": - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - case "podAffinity": - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - case "podAntiAffinity": - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Affinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NodeAffinity != nil { - x.NodeAffinity = nil - } - } else { - if x.NodeAffinity == nil { - x.NodeAffinity = new(NodeAffinity) - } - x.NodeAffinity.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAffinity != nil { - x.PodAffinity = nil - } - } else { - if x.PodAffinity == nil { - x.PodAffinity = new(PodAffinity) - } - x.PodAffinity.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodAntiAffinity != nil { - x.PodAntiAffinity = nil - } - } else { - if x.PodAntiAffinity == nil { - x.PodAntiAffinity = new(PodAntiAffinity) - } - x.PodAntiAffinity.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAntiAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.RequiredDuringSchedulingIgnoredDuringExecution) != 0 - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicePodAffinityTerm(([]PodAffinityTerm)(x.RequiredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceWeightedPodAffinityTerm(([]WeightedPodAffinityTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAntiAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAntiAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv4 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv4), d) - } - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv6 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAntiAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.RequiredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePodAffinityTerm((*[]PodAffinityTerm)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv11 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceWeightedPodAffinityTerm((*[]WeightedPodAffinityTerm)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *WeightedPodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.PodAffinityTerm - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podAffinityTerm")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.PodAffinityTerm - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *WeightedPodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - case "podAffinityTerm": - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv5 := &x.PodAffinityTerm - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *WeightedPodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodAffinityTerm = PodAffinityTerm{} - } else { - yyv8 := &x.PodAffinityTerm - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAffinityTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.LabelSelector != nil - yyq2[2] = x.TopologyKey != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LabelSelector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.LabelSelector) { - } else { - z.EncFallback(x.LabelSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaces")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Namespaces == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncSliceStringV(x.Namespaces, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("topologyKey")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TopologyKey)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAffinityTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAffinityTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "labelSelector": - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - case "namespaces": - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv6 := &x.Namespaces - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "topologyKey": - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - x.TopologyKey = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAffinityTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LabelSelector != nil { - x.LabelSelector = nil - } - } else { - if x.LabelSelector == nil { - x.LabelSelector = new(pkg2_unversioned.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.LabelSelector) { - } else { - z.DecFallback(x.LabelSelector, false) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespaces = nil - } else { - yyv12 := &x.Namespaces - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TopologyKey = "" - } else { - x.TopologyKey = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.RequiredDuringSchedulingIgnoredDuringExecution != nil - yyq2[1] = len(x.PreferredDuringSchedulingIgnoredDuringExecution) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preferredDuringSchedulingIgnoredDuringExecution")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PreferredDuringSchedulingIgnoredDuringExecution == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePreferredSchedulingTerm(([]PreferredSchedulingTerm)(x.PreferredDuringSchedulingIgnoredDuringExecution), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAffinity) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "requiredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - case "preferredDuringSchedulingIgnoredDuringExecution": - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv5 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAffinity) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RequiredDuringSchedulingIgnoredDuringExecution != nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = nil - } - } else { - if x.RequiredDuringSchedulingIgnoredDuringExecution == nil { - x.RequiredDuringSchedulingIgnoredDuringExecution = new(NodeSelector) - } - x.RequiredDuringSchedulingIgnoredDuringExecution.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PreferredDuringSchedulingIgnoredDuringExecution = nil - } else { - yyv9 := &x.PreferredDuringSchedulingIgnoredDuringExecution - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicePreferredSchedulingTerm((*[]PreferredSchedulingTerm)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PreferredSchedulingTerm) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Preference - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Preference - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PreferredSchedulingTerm) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - case "preference": - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv5 := &x.Preference - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PreferredSchedulingTerm) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int32(r.DecodeInt(32)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Preference = NodeSelectorTerm{} - } else { - yyv8 := &x.Preference - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Taint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Value != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Effect.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Taint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Taint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Taint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TaintEffect) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TaintEffect) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Toleration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Key != "" - yyq2[1] = x.Operator != "" - yyq2[2] = x.Value != "" - yyq2[3] = x.Effect != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - x.Operator.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Value)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Effect.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("effect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Effect.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Toleration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Toleration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = TolerationOperator(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "effect": - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Toleration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = TolerationOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Effect = "" - } else { - x.Effect = TaintEffect(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x TolerationOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *TolerationOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [17]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Volumes) != 0 - yyq2[2] = x.RestartPolicy != "" - yyq2[3] = x.TerminationGracePeriodSeconds != nil - yyq2[4] = x.ActiveDeadlineSeconds != nil - yyq2[5] = x.DNSPolicy != "" - yyq2[6] = len(x.NodeSelector) != 0 - yyq2[7] = x.ServiceAccountName != "" - yyq2[8] = x.DeprecatedServiceAccount != "" - yyq2[9] = x.NodeName != "" - yyq2[10] = x.HostNetwork != false - yyq2[11] = x.HostPID != false - yyq2[12] = x.HostIPC != false - yyq2[13] = x.SecurityContext != nil - yyq2[14] = len(x.ImagePullSecrets) != 0 - yyq2[15] = x.Hostname != "" - yyq2[16] = x.Subdomain != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(17) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceVolume(([]Volume)(x.Volumes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Containers == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceContainer(([]Container)(x.Containers), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.RestartPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("restartPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.RestartPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy13 := *x.TerminationGracePeriodSeconds - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(yy13)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminationGracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TerminationGracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy15 := *x.TerminationGracePeriodSeconds - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.ActiveDeadlineSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy20 := *x.ActiveDeadlineSeconds - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - x.DNSPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dnsPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.DNSPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeSelector == nil { - r.EncodeNil() - } else { - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - z.F.EncMapStringStringV(x.NodeSelector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccountName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DeprecatedServiceAccount)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[11] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[11] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("securityContext")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecurityContext == nil { - r.EncodeNil() - } else { - x.SecurityContext.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[14] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[14] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[15] { - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[15] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym54 := z.EncBinary() - _ = yym54 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[16] { - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[16] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subdomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subdomain)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv4 := &x.Volumes - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv4), d) - } - } - case "containers": - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv6 := &x.Containers - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv6), d) - } - } - case "restartPolicy": - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) - } - case "terminationGracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "dnsPolicy": - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) - } - case "nodeSelector": - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv14 := &x.NodeSelector - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - z.F.DecMapStringStringX(yyv14, false, d) - } - } - case "serviceAccountName": - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - x.ServiceAccountName = string(r.DecodeString()) - } - case "serviceAccount": - if r.TryDecodeAsNil() { - x.DeprecatedServiceAccount = "" - } else { - x.DeprecatedServiceAccount = string(r.DecodeString()) - } - case "nodeName": - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - x.NodeName = string(r.DecodeString()) - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - case "securityContext": - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv23 := &x.ImagePullSecrets - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv23), d) - } - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - case "subdomain": - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - x.Subdomain = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj27 int - var yyb27 bool - var yyhl27 bool = l >= 0 - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv28 := &x.Volumes - yym29 := z.DecBinary() - _ = yym29 - if false { - } else { - h.decSliceVolume((*[]Volume)(yyv28), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containers = nil - } else { - yyv30 := &x.Containers - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - h.decSliceContainer((*[]Container)(yyv30), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RestartPolicy = "" - } else { - x.RestartPolicy = RestartPolicy(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TerminationGracePeriodSeconds != nil { - x.TerminationGracePeriodSeconds = nil - } - } else { - if x.TerminationGracePeriodSeconds == nil { - x.TerminationGracePeriodSeconds = new(int64) - } - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - *((*int64)(x.TerminationGracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DNSPolicy = "" - } else { - x.DNSPolicy = DNSPolicy(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSelector = nil - } else { - yyv38 := &x.NodeSelector - yym39 := z.DecBinary() - _ = yym39 - if false { - } else { - z.F.DecMapStringStringX(yyv38, false, d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceAccountName = "" - } else { - x.ServiceAccountName = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeprecatedServiceAccount = "" - } else { - x.DeprecatedServiceAccount = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeName = "" - } else { - x.NodeName = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecurityContext != nil { - x.SecurityContext = nil - } - } else { - if x.SecurityContext == nil { - x.SecurityContext = new(PodSecurityContext) - } - x.SecurityContext.CodecDecodeSelf(d) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv47 := &x.ImagePullSecrets - yym48 := z.DecBinary() - _ = yym48 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv47), d) - } - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subdomain = "" - } else { - x.Subdomain = string(r.DecodeString()) - } - for { - yyj27++ - if yyhl27 { - yyb27 = yyj27 > l - } else { - yyb27 = r.CheckBreak() - } - if yyb27 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj27-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.SELinuxOptions != nil - yyq2[1] = x.RunAsUser != nil - yyq2[2] = x.RunAsNonRoot != nil - yyq2[3] = len(x.SupplementalGroups) != 0 - yyq2[4] = x.FSGroup != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy7 := *x.RunAsUser - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy9 := *x.RunAsUser - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy12 := *x.RunAsNonRoot - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy14 := *x.RunAsNonRoot - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(yy14)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SupplementalGroups == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - z.F.EncSliceInt64V(x.SupplementalGroups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy20 := *x.FSGroup - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FSGroup == nil { - r.EncodeNil() - } else { - yy22 := *x.FSGroup - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(yy22)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv9 := &x.SupplementalGroups - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecSliceInt64X(yyv9, false, d) - } - } - case "fsGroup": - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = nil - } else { - yyv19 := &x.SupplementalGroups - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecSliceInt64X(yyv19, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FSGroup != nil { - x.FSGroup = nil - } - } else { - if x.FSGroup == nil { - x.FSGroup = new(int64) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int64)(x.FSGroup)) = int64(r.DecodeInt(64)) - } - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - yyq2[1] = len(x.Conditions) != 0 - yyq2[2] = x.Message != "" - yyq2[3] = x.Reason != "" - yyq2[4] = x.HostIP != "" - yyq2[5] = x.PodIP != "" - yyq2[6] = x.StartTime != nil - yyq2[7] = len(x.ContainerStatuses) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicePodCondition(([]PodCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym22 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym22 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym23 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym23 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerStatuses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ContainerStatuses == nil { - r.EncodeNil() - } else { - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - h.encSliceContainerStatus(([]ContainerStatus)(x.ContainerStatuses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PodPhase(r.DecodeString()) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv5 := &x.Conditions - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv5), d) - } - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "hostIP": - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - case "podIP": - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - x.PodIP = string(r.DecodeString()) - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym12 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "containerStatuses": - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv13 := &x.ContainerStatuses - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv13), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = PodPhase(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv17 := &x.Conditions - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSlicePodCondition((*[]PodCondition)(yyv17), d) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIP = "" - } else { - x.HostIP = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodIP = "" - } else { - x.PodIP = string(r.DecodeString()) - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg2_unversioned.Time) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym24 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym24 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerStatuses = nil - } else { - yyv25 := &x.ContainerStatuses - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceContainerStatus((*[]ContainerStatus)(yyv25), d) - } - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodStatusResult) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodStatusResult) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodStatusResult) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodStatusResult) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Pod) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Pod) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Pod) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Pod) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePod(([]Pod)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePod((*[]Pod)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv7 := &x.ObjectMeta - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSpec{} - } else { - yyv8 := &x.Spec - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Template - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv5 := &x.Template - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = PodTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodTemplateList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodTemplate(([]PodTemplate)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodTemplateList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodTemplateList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodTemplateList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePodTemplate((*[]PodTemplate)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = len(x.Selector) != 0 - yyq2[2] = x.Template != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Template == nil { - r.EncodeNil() - } else { - x.Template.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv6 := &x.Selector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecMapStringStringX(yyv6, false, d) - } - } - case "template": - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv12 := &x.Selector - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecMapStringStringX(yyv12, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Template != nil { - x.Template = nil - } - } else { - if x.Template == nil { - x.Template = new(PodTemplateSpec) - } - x.Template.CodecDecodeSelf(d) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ObservedGeneration != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationController) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationController) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationController) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationController) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicationControllerSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicationControllerStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceReplicationController(([]ReplicationController)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceReplicationController((*[]ReplicationController)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ServiceAffinity) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceAffinity) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ServiceType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ServiceType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ServiceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ingress) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceLoadBalancerIngress(([]LoadBalancerIngress)(x.Ingress), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv4 := &x.Ingress - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv7 := &x.Ingress - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLoadBalancerIngress((*[]LoadBalancerIngress)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LoadBalancerIngress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.IP != "" - yyq2[1] = x.Hostname != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ip")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LoadBalancerIngress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ip": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LoadBalancerIngress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [9]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Selector) != 0 - yyq2[2] = x.ClusterIP != "" - yyq2[3] = x.Type != "" - yyq2[4] = len(x.ExternalIPs) != 0 - yyq2[5] = len(x.DeprecatedPublicIPs) != 0 - yyq2[6] = x.SessionAffinity != "" - yyq2[7] = x.LoadBalancerIP != "" - yyq2[8] = len(x.LoadBalancerSourceRanges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(9) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceServicePort(([]ServicePort)(x.Ports), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalIPs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ExternalIPs == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - z.F.EncSliceStringV(x.ExternalIPs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.DeprecatedPublicIPs == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deprecatedPublicIPs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DeprecatedPublicIPs == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - z.F.EncSliceStringV(x.DeprecatedPublicIPs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - x.SessionAffinity.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sessionAffinity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.SessionAffinity.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LoadBalancerIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancerSourceRanges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LoadBalancerSourceRanges == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - z.F.EncSliceStringV(x.LoadBalancerSourceRanges, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv4 := &x.Ports - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv4), d) - } - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv6 := &x.Selector - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecMapStringStringX(yyv6, false, d) - } - } - case "clusterIP": - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - x.ClusterIP = string(r.DecodeString()) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ServiceType(r.DecodeString()) - } - case "externalIPs": - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv10 := &x.ExternalIPs - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecSliceStringX(yyv10, false, d) - } - } - case "deprecatedPublicIPs": - if r.TryDecodeAsNil() { - x.DeprecatedPublicIPs = nil - } else { - yyv12 := &x.DeprecatedPublicIPs - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - case "sessionAffinity": - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) - } - case "loadBalancerIP": - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - x.LoadBalancerIP = string(r.DecodeString()) - } - case "loadBalancerSourceRanges": - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv16 := &x.LoadBalancerSourceRanges - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - z.F.DecSliceStringX(yyv16, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv19 := &x.Ports - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - h.decSliceServicePort((*[]ServicePort)(yyv19), d) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv21 := &x.Selector - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - z.F.DecMapStringStringX(yyv21, false, d) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterIP = "" - } else { - x.ClusterIP = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ServiceType(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalIPs = nil - } else { - yyv25 := &x.ExternalIPs - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - z.F.DecSliceStringX(yyv25, false, d) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeprecatedPublicIPs = nil - } else { - yyv27 := &x.DeprecatedPublicIPs - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - z.F.DecSliceStringX(yyv27, false, d) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SessionAffinity = "" - } else { - x.SessionAffinity = ServiceAffinity(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerIP = "" - } else { - x.LoadBalancerIP = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancerSourceRanges = nil - } else { - yyv31 := &x.LoadBalancerSourceRanges - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - z.F.DecSliceStringX(yyv31, false, d) - } - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServicePort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[1] = x.Protocol != "" - yyq2[3] = true - yyq2[4] = x.NodePort != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy13 := &x.TargetPort - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(yy13) { - } else if !yym14 && z.IsJSONHandle() { - z.EncJSONMarshal(yy13) - } else { - z.EncFallback(yy13) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy15 := &x.TargetPort - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.NodePort)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServicePort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServicePort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "targetPort": - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv7 := &x.TargetPort - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - case "nodePort": - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - x.NodePort = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServicePort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPort = pkg4_intstr.IntOrString{} - } else { - yyv14 := &x.TargetPort - yym15 := z.DecBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.DecExt(yyv14) { - } else if !yym15 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv14) - } else { - z.DecFallback(yyv14, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodePort = 0 - } else { - x.NodePort = int32(r.DecodeInt(32)) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Service) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Service) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Service) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Service) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ServiceSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ServiceStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceService(([]Service)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceService((*[]Service)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceService((*[]Service)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccount) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Secrets) != 0 - yyq2[2] = len(x.ImagePullSecrets) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Secrets == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Secrets == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceObjectReference(([]ObjectReference)(x.Secrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imagePullSecrets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ImagePullSecrets == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceLocalObjectReference(([]LocalObjectReference)(x.ImagePullSecrets), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccount) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccount) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "secrets": - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv5 := &x.Secrets - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv5), d) - } - } - case "imagePullSecrets": - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv7 := &x.ImagePullSecrets - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv7), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccount) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv12 := &x.ObjectMeta - yyv12.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Secrets = nil - } else { - yyv13 := &x.Secrets - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceObjectReference((*[]ObjectReference)(yyv13), d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImagePullSecrets = nil - } else { - yyv15 := &x.ImagePullSecrets - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceLocalObjectReference((*[]LocalObjectReference)(yyv15), d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceAccountList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceServiceAccount(([]ServiceAccount)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceAccountList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceAccountList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceAccountList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceServiceAccount((*[]ServiceAccount)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Endpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subsets")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subsets == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpointSubset(([]EndpointSubset)(x.Subsets), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Endpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Endpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "subsets": - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv5 := &x.Subsets - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv5), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Endpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subsets = nil - } else { - yyv11 := &x.Subsets - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceEndpointSubset((*[]EndpointSubset)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointSubset) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Addresses) != 0 - yyq2[1] = len(x.NotReadyAddresses) != 0 - yyq2[2] = len(x.Ports) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Addresses == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.Addresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("notReadyAddresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NotReadyAddresses == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceEndpointAddress(([]EndpointAddress)(x.NotReadyAddresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceEndpointPort(([]EndpointPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointSubset) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointSubset) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv4 := &x.Addresses - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv4), d) - } - } - case "notReadyAddresses": - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv6 := &x.NotReadyAddresses - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv6), d) - } - } - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv8 := &x.Ports - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv8), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointSubset) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv11 := &x.Addresses - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv11), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NotReadyAddresses = nil - } else { - yyv13 := &x.NotReadyAddresses - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEndpointAddress((*[]EndpointAddress)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv15 := &x.Ports - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceEndpointPort((*[]EndpointPort)(yyv15), d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Hostname != "" - yyq2[2] = x.TargetRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ip")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.IP)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostname")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Hostname)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetRef == nil { - r.EncodeNil() - } else { - x.TargetRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ip": - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - case "hostname": - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - case "targetRef": - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IP = "" - } else { - x.IP = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hostname = "" - } else { - x.Hostname = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetRef != nil { - x.TargetRef = nil - } - } else { - if x.TargetRef == nil { - x.TargetRef = new(ObjectReference) - } - x.TargetRef.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - yyq2[2] = x.Protocol != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Protocol.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Protocol.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "protocol": - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Protocol = "" - } else { - x.Protocol = Protocol(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EndpointsList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEndpoints(([]Endpoints)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EndpointsList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EndpointsList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EndpointsList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEndpoints((*[]Endpoints)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodCIDR != "" - yyq2[1] = x.ExternalID != "" - yyq2[2] = x.ProviderID != "" - yyq2[3] = x.Unschedulable != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("externalID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ExternalID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("providerID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ProviderID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unschedulable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.Unschedulable)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podCIDR": - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - case "externalID": - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - x.ExternalID = string(r.DecodeString()) - } - case "providerID": - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - x.ProviderID = string(r.DecodeString()) - } - case "unschedulable": - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - x.Unschedulable = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExternalID = "" - } else { - x.ExternalID = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ProviderID = "" - } else { - x.ProviderID = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Unschedulable = false - } else { - x.Unschedulable = bool(r.DecodeBool()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonEndpoint) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonEndpoint) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonEndpoint) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonEndpoint) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeDaemonEndpoints) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.KubeletEndpoint - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.KubeletEndpoint - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeDaemonEndpoints) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kubeletEndpoint": - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv4 := &x.KubeletEndpoint - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeDaemonEndpoints) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletEndpoint = DaemonEndpoint{} - } else { - yyv6 := &x.KubeletEndpoint - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeSystemInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 10 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("machineID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MachineID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemUUID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemUUID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("bootID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BootID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kernelVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KernelVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("osImage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OSImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerRuntimeVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntimeVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeProxyVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeProxyVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operatingSystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.OperatingSystem)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("architecture")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Architecture)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeSystemInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeSystemInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "machineID": - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - x.MachineID = string(r.DecodeString()) - } - case "systemUUID": - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - x.SystemUUID = string(r.DecodeString()) - } - case "bootID": - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - x.BootID = string(r.DecodeString()) - } - case "kernelVersion": - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - x.KernelVersion = string(r.DecodeString()) - } - case "osImage": - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - x.OSImage = string(r.DecodeString()) - } - case "containerRuntimeVersion": - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) - } - case "kubeletVersion": - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - x.KubeletVersion = string(r.DecodeString()) - } - case "kubeProxyVersion": - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - x.KubeProxyVersion = string(r.DecodeString()) - } - case "operatingSystem": - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - x.OperatingSystem = string(r.DecodeString()) - } - case "architecture": - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - x.Architecture = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeSystemInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MachineID = "" - } else { - x.MachineID = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemUUID = "" - } else { - x.SystemUUID = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BootID = "" - } else { - x.BootID = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KernelVersion = "" - } else { - x.KernelVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OSImage = "" - } else { - x.OSImage = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerRuntimeVersion = "" - } else { - x.ContainerRuntimeVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletVersion = "" - } else { - x.KubeletVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeProxyVersion = "" - } else { - x.KubeProxyVersion = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OperatingSystem = "" - } else { - x.OperatingSystem = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Architecture = "" - } else { - x.Architecture = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Capacity) != 0 - yyq2[1] = len(x.Allocatable) != 0 - yyq2[2] = x.Phase != "" - yyq2[3] = len(x.Conditions) != 0 - yyq2[4] = len(x.Addresses) != 0 - yyq2[5] = true - yyq2[6] = true - yyq2[7] = len(x.Images) != 0 - yyq2[8] = len(x.VolumesInUse) != 0 - yyq2[9] = len(x.VolumesAttached) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capacity")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capacity == nil { - r.EncodeNil() - } else { - x.Capacity.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocatable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Allocatable == nil { - r.EncodeNil() - } else { - x.Allocatable.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encSliceNodeCondition(([]NodeCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Addresses == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("addresses")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Addresses == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceNodeAddress(([]NodeAddress)(x.Addresses), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy19 := &x.DaemonEndpoints - yy19.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("daemonEndpoints")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy21 := &x.DaemonEndpoints - yy21.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yy24 := &x.NodeInfo - yy24.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeInfo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy26 := &x.NodeInfo - yy26.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.Images == nil { - r.EncodeNil() - } else { - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("images")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Images == nil { - r.EncodeNil() - } else { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - h.encSliceContainerImage(([]ContainerImage)(x.Images), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesInUse")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesInUse == nil { - r.EncodeNil() - } else { - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - h.encSliceUniqueVolumeName(([]UniqueVolumeName)(x.VolumesInUse), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumesAttached")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumesAttached == nil { - r.EncodeNil() - } else { - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - h.encSliceAttachedVolume(([]AttachedVolume)(x.VolumesAttached), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capacity": - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv4 := &x.Capacity - yyv4.CodecDecodeSelf(d) - } - case "allocatable": - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv5 := &x.Allocatable - yyv5.CodecDecodeSelf(d) - } - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NodePhase(r.DecodeString()) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv7 := &x.Conditions - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv7), d) - } - } - case "addresses": - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv9 := &x.Addresses - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv9), d) - } - } - case "daemonEndpoints": - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv11 := &x.DaemonEndpoints - yyv11.CodecDecodeSelf(d) - } - case "nodeInfo": - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv12 := &x.NodeInfo - yyv12.CodecDecodeSelf(d) - } - case "images": - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv13 := &x.Images - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv13), d) - } - } - case "volumesInUse": - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv15 := &x.VolumesInUse - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv15), d) - } - } - case "volumesAttached": - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv17 := &x.VolumesAttached - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv17), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj19 int - var yyb19 bool - var yyhl19 bool = l >= 0 - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Capacity = nil - } else { - yyv20 := &x.Capacity - yyv20.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allocatable = nil - } else { - yyv21 := &x.Allocatable - yyv21.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NodePhase(r.DecodeString()) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv23 := &x.Conditions - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - h.decSliceNodeCondition((*[]NodeCondition)(yyv23), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Addresses = nil - } else { - yyv25 := &x.Addresses - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceNodeAddress((*[]NodeAddress)(yyv25), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DaemonEndpoints = NodeDaemonEndpoints{} - } else { - yyv27 := &x.DaemonEndpoints - yyv27.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeInfo = NodeSystemInfo{} - } else { - yyv28 := &x.NodeInfo - yyv28.CodecDecodeSelf(d) - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Images = nil - } else { - yyv29 := &x.Images - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - h.decSliceContainerImage((*[]ContainerImage)(yyv29), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesInUse = nil - } else { - yyv31 := &x.VolumesInUse - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceUniqueVolumeName((*[]UniqueVolumeName)(yyv31), d) - } - } - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumesAttached = nil - } else { - yyv33 := &x.VolumesAttached - yym34 := z.DecBinary() - _ = yym34 - if false { - } else { - h.decSliceAttachedVolume((*[]AttachedVolume)(yyv33), d) - } - } - for { - yyj19++ - if yyhl19 { - yyb19 = yyj19 > l - } else { - yyb19 = r.CheckBreak() - } - if yyb19 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj19-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x UniqueVolumeName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *UniqueVolumeName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *AttachedVolume) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Name.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Name.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("devicePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DevicePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *AttachedVolume) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *AttachedVolume) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = UniqueVolumeName(r.DecodeString()) - } - case "devicePath": - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - x.DevicePath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *AttachedVolume) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = UniqueVolumeName(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DevicePath = "" - } else { - x.DevicePath = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ContainerImage) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SizeBytes != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("names")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Names == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Names, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sizeBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.SizeBytes)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ContainerImage) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ContainerImage) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "names": - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv4 := &x.Names - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "sizeBytes": - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - x.SizeBytes = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ContainerImage) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Names = nil - } else { - yyv8 := &x.Names - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SizeBytes = 0 - } else { - x.SizeBytes = int64(r.DecodeInt(64)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x NodeConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastHeartbeatTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastHeartbeatTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastHeartbeatTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "lastHeartbeatTime": - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} - } else { - yyv6 := &x.LastHeartbeatTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastHeartbeatTime = pkg2_unversioned.Time{} - } else { - yyv15 := &x.LastHeartbeatTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NodeAddressType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NodeAddressType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NodeAddress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeAddress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeAddress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeAddressType(r.DecodeString()) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeAddress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = NodeAddressType(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x ResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encResourceList((ResourceList)(x), e) - } - } -} - -func (x *ResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decResourceList((*ResourceList)(x), d) - } -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NodeSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NodeStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNode(([]Node)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNode((*[]Node)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceNode((*[]Node)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FinalizerName) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FinalizerName) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *NamespaceSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Finalizers) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("finalizers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Finalizers == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceFinalizerName(([]FinalizerName)(x.Finalizers), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "finalizers": - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv4 := &x.Finalizers - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Finalizers = nil - } else { - yyv7 := &x.Finalizers - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceFinalizerName((*[]FinalizerName)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Phase != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Phase.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("phase")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Phase.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "phase": - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NamespacePhase(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Phase = "" - } else { - x.Phase = NamespacePhase(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x NamespacePhase) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *NamespacePhase) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *Namespace) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Namespace) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Namespace) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Namespace) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NamespaceSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = NamespaceStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NamespaceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNamespace(([]Namespace)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NamespaceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NamespaceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NamespaceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceNamespace((*[]Namespace)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Binding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.Target - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("target")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Target - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Binding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Binding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "target": - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv5 := &x.Target - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Binding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Target = ObjectReference{} - } else { - yyv10 := &x.Target - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Preconditions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.UID != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.UID == nil { - r.EncodeNil() - } else { - yy4 := *x.UID - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UID == nil { - r.EncodeNil() - } else { - yy6 := *x.UID - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Preconditions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Preconditions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "uid": - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Preconditions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.UID != nil { - x.UID = nil - } - } else { - if x.UID == nil { - x.UID = new(pkg1_types.UID) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x.UID) { - } else { - *((*string)(x.UID)) = r.DecodeString() - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.GracePeriodSeconds != nil - yyq2[1] = x.Preconditions != nil - yyq2[2] = x.OrphanDependents != nil - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy4 := *x.GracePeriodSeconds - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("gracePeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.GracePeriodSeconds == nil { - r.EncodeNil() - } else { - yy6 := *x.GracePeriodSeconds - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("preconditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Preconditions == nil { - r.EncodeNil() - } else { - x.Preconditions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy12 := *x.OrphanDependents - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(yy12)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("orphanDependents")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OrphanDependents == nil { - r.EncodeNil() - } else { - yy14 := *x.OrphanDependents - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeBool(bool(yy14)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "gracePeriodSeconds": - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - case "preconditions": - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - case "orphanDependents": - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.GracePeriodSeconds != nil { - x.GracePeriodSeconds = nil - } - } else { - if x.GracePeriodSeconds == nil { - x.GracePeriodSeconds = new(int64) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(x.GracePeriodSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Preconditions != nil { - x.Preconditions = nil - } - } else { - if x.Preconditions == nil { - x.Preconditions = new(Preconditions) - } - x.Preconditions.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.OrphanDependents != nil { - x.OrphanDependents = nil - } - } else { - if x.OrphanDependents == nil { - x.OrphanDependents = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.OrphanDependents)) = r.DecodeBool() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("export")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exact")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "export": - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - case "exact": - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.LabelSelector != "" - yyq2[1] = x.FieldSelector != "" - yyq2[2] = x.Watch != false - yyq2[3] = x.ResourceVersion != "" - yyq2[4] = x.TimeoutSeconds != nil - yyq2[5] = x.Kind != "" - yyq2[6] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("watch")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.TimeoutSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.TimeoutSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "labelSelector": - if r.TryDecodeAsNil() { - x.LabelSelector = "" - } else { - x.LabelSelector = string(r.DecodeString()) - } - case "fieldSelector": - if r.TryDecodeAsNil() { - x.FieldSelector = "" - } else { - x.FieldSelector = string(r.DecodeString()) - } - case "watch": - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LabelSelector = "" - } else { - x.LabelSelector = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldSelector = "" - } else { - x.FieldSelector = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodLogOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [10]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Container != "" - yyq2[1] = x.Follow != false - yyq2[2] = x.Previous != false - yyq2[3] = x.SinceSeconds != nil - yyq2[4] = x.SinceTime != nil - yyq2[5] = x.Timestamps != false - yyq2[6] = x.TailLines != nil - yyq2[7] = x.LimitBytes != nil - yyq2[8] = x.Kind != "" - yyq2[9] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(10) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("follow")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Follow)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("previous")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Previous)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy13 := *x.SinceSeconds - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(yy13)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sinceSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceSeconds == nil { - r.EncodeNil() - } else { - yy15 := *x.SinceSeconds - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym18 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("sinceTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SinceTime == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(x.SinceTime) { - } else if yym19 { - z.EncBinaryMarshal(x.SinceTime) - } else if !yym19 && z.IsJSONHandle() { - z.EncJSONMarshal(x.SinceTime) - } else { - z.EncFallback(x.SinceTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timestamps")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeBool(bool(x.Timestamps)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.TailLines == nil { - r.EncodeNil() - } else { - yy24 := *x.TailLines - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(yy24)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tailLines")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TailLines == nil { - r.EncodeNil() - } else { - yy26 := *x.TailLines - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeInt(int64(yy26)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy29 := *x.LimitBytes - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeInt(int64(yy29)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limitBytes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LimitBytes == nil { - r.EncodeNil() - } else { - yy31 := *x.LimitBytes - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeInt(int64(yy31)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodLogOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodLogOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "follow": - if r.TryDecodeAsNil() { - x.Follow = false - } else { - x.Follow = bool(r.DecodeBool()) - } - case "previous": - if r.TryDecodeAsNil() { - x.Previous = false - } else { - x.Previous = bool(r.DecodeBool()) - } - case "sinceSeconds": - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - case "sinceTime": - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym10 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - case "timestamps": - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - x.Timestamps = bool(r.DecodeBool()) - } - case "tailLines": - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - case "limitBytes": - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodLogOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Follow = false - } else { - x.Follow = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Previous = false - } else { - x.Previous = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceSeconds != nil { - x.SinceSeconds = nil - } - } else { - if x.SinceSeconds == nil { - x.SinceSeconds = new(int64) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*int64)(x.SinceSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SinceTime != nil { - x.SinceTime = nil - } - } else { - if x.SinceTime == nil { - x.SinceTime = new(pkg2_unversioned.Time) - } - yym25 := z.DecBinary() - _ = yym25 - if false { - } else if z.HasExtensions() && z.DecExt(x.SinceTime) { - } else if yym25 { - z.DecBinaryUnmarshal(x.SinceTime) - } else if !yym25 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.SinceTime) - } else { - z.DecFallback(x.SinceTime, false) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Timestamps = false - } else { - x.Timestamps = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TailLines != nil { - x.TailLines = nil - } - } else { - if x.TailLines == nil { - x.TailLines = new(int64) - } - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - *((*int64)(x.TailLines)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LimitBytes != nil { - x.LimitBytes = nil - } - } else { - if x.LimitBytes == nil { - x.LimitBytes = new(int64) - } - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - *((*int64)(x.LimitBytes)) = int64(r.DecodeInt(64)) - } - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodAttachOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Stdin != false - yyq2[1] = x.Stdout != false - yyq2[2] = x.Stderr != false - yyq2[3] = x.TTY != false - yyq2[4] = x.Container != "" - yyq2[5] = x.Kind != "" - yyq2[6] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodAttachOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodAttachOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - case "stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodAttachOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodExecOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Stdin != false - yyq2[1] = x.Stdout != false - yyq2[2] = x.Stderr != false - yyq2[3] = x.TTY != false - yyq2[4] = x.Container != "" - yyq2[6] = x.Kind != "" - yyq2[7] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdin")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Stdin)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stdout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Stdout)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("stderr")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Stderr)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tty")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeBool(bool(x.TTY)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("container")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Container)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("command")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Command == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - z.F.EncSliceStringV(x.Command, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodExecOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodExecOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "stdin": - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - case "stdout": - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - case "stderr": - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - case "tty": - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - case "container": - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - case "command": - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv9 := &x.Command - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecSliceStringX(yyv9, false, d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodExecOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdin = false - } else { - x.Stdin = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stdout = false - } else { - x.Stdout = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Stderr = false - } else { - x.Stderr = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TTY = false - } else { - x.TTY = bool(r.DecodeBool()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Container = "" - } else { - x.Container = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Command = nil - } else { - yyv19 := &x.Command - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - z.F.DecSliceStringX(yyv19, false, d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NodeProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NodeProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NodeProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NodeProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ServiceProxyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServiceProxyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServiceProxyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *OwnerReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[4] = x.Controller != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Controller == nil { - r.EncodeNil() - } else { - yy16 := *x.Controller - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("controller")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Controller == nil { - r.EncodeNil() - } else { - yy18 := *x.Controller - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *OwnerReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *OwnerReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "controller": - if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } - } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Controller)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *OwnerReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Controller != nil { - x.Controller = nil - } - } else { - if x.Controller == nil { - x.Controller = new(bool) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*bool)(x.Controller)) = r.DecodeBool() - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.Namespace != "" - yyq2[2] = x.Name != "" - yyq2[3] = x.UID != "" - yyq2[4] = x.APIVersion != "" - yyq2[5] = x.ResourceVersion != "" - yyq2[6] = x.FieldPath != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.EncExt(x.UID) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "fieldPath": - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = pkg1_types.UID(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldPath = "" - } else { - x.FieldPath = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SerializedReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Kind != "" - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.Reference - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reference")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Reference - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SerializedReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SerializedReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "reference": - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv4 := &x.Reference - yyv4.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SerializedReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reference = ObjectReference{} - } else { - yyv8 := &x.Reference - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Component != "" - yyq2[1] = x.Host != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("component")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Component)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "component": - if r.TryDecodeAsNil() { - x.Component = "" - } else { - x.Component = string(r.DecodeString()) - } - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Component = "" - } else { - x.Component = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Event) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [11]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Reason != "" - yyq2[3] = x.Message != "" - yyq2[4] = true - yyq2[5] = true - yyq2[6] = true - yyq2[7] = x.Count != 0 - yyq2[8] = x.Type != "" - yyq2[9] = x.Kind != "" - yyq2[10] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(11) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.InvolvedObject - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("involvedObject")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.InvolvedObject - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yy20 := &x.Source - yy20.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("source")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.Source - yy22.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yy25 := &x.FirstTimestamp - yym26 := z.EncBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.EncExt(yy25) { - } else if yym26 { - z.EncBinaryMarshal(yy25) - } else if !yym26 && z.IsJSONHandle() { - z.EncJSONMarshal(yy25) - } else { - z.EncFallback(yy25) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("firstTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy27 := &x.FirstTimestamp - yym28 := z.EncBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.EncExt(yy27) { - } else if yym28 { - z.EncBinaryMarshal(yy27) - } else if !yym28 && z.IsJSONHandle() { - z.EncJSONMarshal(yy27) - } else { - z.EncFallback(yy27) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yy30 := &x.LastTimestamp - yym31 := z.EncBinary() - _ = yym31 - if false { - } else if z.HasExtensions() && z.EncExt(yy30) { - } else if yym31 { - z.EncBinaryMarshal(yy30) - } else if !yym31 && z.IsJSONHandle() { - z.EncJSONMarshal(yy30) - } else { - z.EncFallback(yy30) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTimestamp")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy32 := &x.LastTimestamp - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(yy32) { - } else if yym33 { - z.EncBinaryMarshal(yy32) - } else if !yym33 && z.IsJSONHandle() { - z.EncJSONMarshal(yy32) - } else { - z.EncFallback(yy32) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("count")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeInt(int64(x.Count)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[9] { - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[9] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[10] { - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[10] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Event) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Event) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "involvedObject": - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv5 := &x.InvolvedObject - yyv5.CodecDecodeSelf(d) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "source": - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv8 := &x.Source - yyv8.CodecDecodeSelf(d) - } - case "firstTimestamp": - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} - } else { - yyv9 := &x.FirstTimestamp - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if yym10 { - z.DecBinaryUnmarshal(yyv9) - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - case "lastTimestamp": - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} - } else { - yyv11 := &x.LastTimestamp - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if yym12 { - z.DecBinaryUnmarshal(yyv11) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - case "count": - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - x.Count = int32(r.DecodeInt(32)) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Event) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj17 int - var yyb17 bool - var yyhl17 bool = l >= 0 - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv18 := &x.ObjectMeta - yyv18.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.InvolvedObject = ObjectReference{} - } else { - yyv19 := &x.InvolvedObject - yyv19.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Source = EventSource{} - } else { - yyv22 := &x.Source - yyv22.CodecDecodeSelf(d) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FirstTimestamp = pkg2_unversioned.Time{} - } else { - yyv23 := &x.FirstTimestamp - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(yyv23) { - } else if yym24 { - z.DecBinaryUnmarshal(yyv23) - } else if !yym24 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv23) - } else { - z.DecFallback(yyv23, false) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTimestamp = pkg2_unversioned.Time{} - } else { - yyv25 := &x.LastTimestamp - yym26 := z.DecBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.DecExt(yyv25) { - } else if yym26 { - z.DecBinaryUnmarshal(yyv25) - } else if !yym26 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv25) - } else { - z.DecFallback(yyv25, false) - } - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Count = 0 - } else { - x.Count = int32(r.DecodeInt(32)) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj17++ - if yyhl17 { - yyb17 = yyj17 > l - } else { - yyb17 = r.CheckBreak() - } - if yyb17 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj17-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *EventList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceEvent(([]Event)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *EventList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *EventList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *EventList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceEvent((*[]Event)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *List) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceruntime_RawExtension(([]pkg5_runtime.RawExtension)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *List) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *List) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *List) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceruntime_RawExtension((*[]pkg5_runtime.RawExtension)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LimitType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LimitType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *LimitRangeItem) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = len(x.Max) != 0 - yyq2[2] = len(x.Min) != 0 - yyq2[3] = len(x.Default) != 0 - yyq2[4] = len(x.DefaultRequest) != 0 - yyq2[5] = len(x.MaxLimitRequestRatio) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Max == nil { - r.EncodeNil() - } else { - x.Max.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Min == nil { - r.EncodeNil() - } else { - x.Min.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("default")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Default == nil { - r.EncodeNil() - } else { - x.Default.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultRequest")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultRequest == nil { - r.EncodeNil() - } else { - x.DefaultRequest.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxLimitRequestRatio")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxLimitRequestRatio == nil { - r.EncodeNil() - } else { - x.MaxLimitRequestRatio.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeItem) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeItem) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = LimitType(r.DecodeString()) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv5 := &x.Max - yyv5.CodecDecodeSelf(d) - } - case "min": - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv6 := &x.Min - yyv6.CodecDecodeSelf(d) - } - case "default": - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv7 := &x.Default - yyv7.CodecDecodeSelf(d) - } - case "defaultRequest": - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv8 := &x.DefaultRequest - yyv8.CodecDecodeSelf(d) - } - case "maxLimitRequestRatio": - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv9 := &x.MaxLimitRequestRatio - yyv9.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeItem) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = LimitType(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = nil - } else { - yyv12 := &x.Max - yyv12.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = nil - } else { - yyv13 := &x.Min - yyv13.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Default = nil - } else { - yyv14 := &x.Default - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultRequest = nil - } else { - yyv15 := &x.DefaultRequest - yyv15.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxLimitRequestRatio = nil - } else { - yyv16 := &x.MaxLimitRequestRatio - yyv16.CodecDecodeSelf(d) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("limits")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Limits == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceLimitRangeItem(([]LimitRangeItem)(x.Limits), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "limits": - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv4 := &x.Limits - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Limits = nil - } else { - yyv7 := &x.Limits - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceLimitRangeItem((*[]LimitRangeItem)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = LimitRangeSpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LimitRangeList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceLimitRange(([]LimitRange)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LimitRangeList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LimitRangeList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LimitRangeList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceLimitRange((*[]LimitRange)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ResourceQuotaScope) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ResourceQuotaScope) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ResourceQuotaSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hard) != 0 - yyq2[1] = len(x.Scopes) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Scopes == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scopes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Scopes == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceResourceQuotaScope(([]ResourceQuotaScope)(x.Scopes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4 := &x.Hard - yyv4.CodecDecodeSelf(d) - } - case "scopes": - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv5 := &x.Scopes - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv8 := &x.Hard - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Scopes = nil - } else { - yyv9 := &x.Scopes - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceResourceQuotaScope((*[]ResourceQuotaScope)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hard) != 0 - yyq2[1] = len(x.Used) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hard == nil { - r.EncodeNil() - } else { - x.Hard.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("used")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Used == nil { - r.EncodeNil() - } else { - x.Used.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hard": - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv4 := &x.Hard - yyv4.CodecDecodeSelf(d) - } - case "used": - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv5 := &x.Used - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hard = nil - } else { - yyv7 := &x.Hard - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Used = nil - } else { - yyv8 := &x.Used - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuota) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuota) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuota) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuota) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ResourceQuotaSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ResourceQuotaStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceQuotaList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceResourceQuota(([]ResourceQuota)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceQuotaList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceQuotaList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceQuotaList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceResourceQuota((*[]ResourceQuota)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Secret) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Type != "" - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Data == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encMapstringSliceuint8((map[string][]uint8)(x.Data), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Secret) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Secret) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv5), d) - } - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = SecretType(r.DecodeString()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Secret) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv12 := &x.Data - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decMapstringSliceuint8((*map[string][]uint8)(yyv12), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = SecretType(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SecretType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SecretType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SecretList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceSecret(([]Secret)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecretList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecretList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecretList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceSecret((*[]Secret)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMap) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Data == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncMapStringStringV(x.Data, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMap) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMap) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - z.F.DecMapStringStringX(yyv5, false, d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMap) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv11 := &x.Data - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecMapStringStringX(yyv11, false, d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ConfigMapList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceConfigMap(([]ConfigMap)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ConfigMapList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ConfigMapList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ConfigMapList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceConfigMap((*[]ConfigMap)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ComponentConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ComponentConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ComponentCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Message != "" - yyq2[3] = x.Error != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Status.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Status.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("error")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Error)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ComponentConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - case "error": - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ComponentConditionType(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = ConditionStatus(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Error = "" - } else { - x.Error = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Conditions) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceComponentCondition(([]ComponentCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv5 := &x.Conditions - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv5), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv11 := &x.Conditions - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceComponentCondition((*[]ComponentCondition)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ComponentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceComponentStatus(([]ComponentStatus)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ComponentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ComponentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ComponentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceComponentStatus((*[]ComponentStatus)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Items) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceDownwardAPIVolumeFile(([]DownwardAPIVolumeFile)(x.Items), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeSource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceDownwardAPIVolumeFile((*[]DownwardAPIVolumeFile)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DownwardAPIVolumeFile) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FieldRef != nil - yyq2[2] = x.ResourceFieldRef != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.FieldRef == nil { - r.EncodeNil() - } else { - x.FieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceFieldRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceFieldRef == nil { - r.EncodeNil() - } else { - x.ResourceFieldRef.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DownwardAPIVolumeFile) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "fieldRef": - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - case "resourceFieldRef": - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DownwardAPIVolumeFile) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.FieldRef != nil { - x.FieldRef = nil - } - } else { - if x.FieldRef == nil { - x.FieldRef = new(ObjectFieldSelector) - } - x.FieldRef.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceFieldRef != nil { - x.ResourceFieldRef = nil - } - } else { - if x.ResourceFieldRef == nil { - x.ResourceFieldRef = new(ResourceFieldSelector) - } - x.ResourceFieldRef.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SecurityContext) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Capabilities != nil - yyq2[1] = x.Privileged != nil - yyq2[2] = x.SELinuxOptions != nil - yyq2[3] = x.RunAsUser != nil - yyq2[4] = x.RunAsNonRoot != nil - yyq2[5] = x.ReadOnlyRootFilesystem != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("capabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Capabilities == nil { - r.EncodeNil() - } else { - x.Capabilities.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Privileged == nil { - r.EncodeNil() - } else { - yy7 := *x.Privileged - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Privileged == nil { - r.EncodeNil() - } else { - yy9 := *x.Privileged - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy15 := *x.RunAsUser - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(yy15)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsUser == nil { - r.EncodeNil() - } else { - yy17 := *x.RunAsUser - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(yy17)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy20 := *x.RunAsNonRoot - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeBool(bool(yy20)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsNonRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RunAsNonRoot == nil { - r.EncodeNil() - } else { - yy22 := *x.RunAsNonRoot - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy25 := *x.ReadOnlyRootFilesystem - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ReadOnlyRootFilesystem == nil { - r.EncodeNil() - } else { - yy27 := *x.ReadOnlyRootFilesystem - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SecurityContext) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SecurityContext) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "capabilities": - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - case "privileged": - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - case "runAsNonRoot": - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SecurityContext) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Capabilities != nil { - x.Capabilities = nil - } - } else { - if x.Capabilities == nil { - x.Capabilities = new(Capabilities) - } - x.Capabilities.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Privileged != nil { - x.Privileged = nil - } - } else { - if x.Privileged == nil { - x.Privileged = new(bool) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*bool)(x.Privileged)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsUser != nil { - x.RunAsUser = nil - } - } else { - if x.RunAsUser == nil { - x.RunAsUser = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.RunAsUser)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RunAsNonRoot != nil { - x.RunAsNonRoot = nil - } - } else { - if x.RunAsNonRoot == nil { - x.RunAsNonRoot = new(bool) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*bool)(x.RunAsNonRoot)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ReadOnlyRootFilesystem != nil { - x.ReadOnlyRootFilesystem = nil - } - } else { - if x.ReadOnlyRootFilesystem == nil { - x.ReadOnlyRootFilesystem = new(bool) - } - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - *((*bool)(x.ReadOnlyRootFilesystem)) = r.DecodeBool() - } - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SELinuxOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.User != "" - yyq2[1] = x.Role != "" - yyq2[2] = x.Type != "" - yyq2[3] = x.Level != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("role")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Role)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Type)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("level")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Level)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "role": - if r.TryDecodeAsNil() { - x.Role = "" - } else { - x.Role = string(r.DecodeString()) - } - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - case "level": - if r.TryDecodeAsNil() { - x.Level = "" - } else { - x.Level = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Role = "" - } else { - x.Role = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Level = "" - } else { - x.Level = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RangeAllocation) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("range")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Range)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RangeAllocation) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RangeAllocation) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "range": - if r.TryDecodeAsNil() { - x.Range = "" - } else { - x.Range = string(r.DecodeString()) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv6 := &x.Data - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RangeAllocation) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Range = "" - } else { - x.Range = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv13 := &x.Data - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *yyv13 = r.DecodeBytes(*(*[]byte)(yyv13), false, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceOwnerReference(v []OwnerReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceOwnerReference(v *[]OwnerReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []OwnerReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]OwnerReference, yyrl1) - } - } else { - yyv1 = make([]OwnerReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = OwnerReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, OwnerReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = OwnerReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, OwnerReference{}) // var yyz1 OwnerReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = OwnerReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []OwnerReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeAccessMode(v []PersistentVolumeAccessMode, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeAccessMode(v *[]PersistentVolumeAccessMode, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolumeAccessMode{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolumeAccessMode, yyrl1) - } - } else { - yyv1 = make([]PersistentVolumeAccessMode, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 PersistentVolumeAccessMode - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = PersistentVolumeAccessMode(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolumeAccessMode{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolume(v []PersistentVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolume(v *[]PersistentVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 456) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolume, yyrl1) - } - } else { - yyv1 = make([]PersistentVolume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PersistentVolume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PersistentVolume{}) // var yyz1 PersistentVolume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePersistentVolumeClaim(v []PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePersistentVolumeClaim(v *[]PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PersistentVolumeClaim{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PersistentVolumeClaim, yyrl1) - } - } else { - yyv1 = make([]PersistentVolumeClaim, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PersistentVolumeClaim{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PersistentVolumeClaim{}) // var yyz1 PersistentVolumeClaim - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PersistentVolumeClaim{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PersistentVolumeClaim{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceKeyToPath(v []KeyToPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceKeyToPath(v *[]KeyToPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []KeyToPath{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]KeyToPath, yyrl1) - } - } else { - yyv1 = make([]KeyToPath, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, KeyToPath{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, KeyToPath{}) // var yyz1 KeyToPath - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = KeyToPath{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []KeyToPath{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHTTPHeader(v []HTTPHeader, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPHeader(v *[]HTTPHeader, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HTTPHeader{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HTTPHeader, yyrl1) - } - } else { - yyv1 = make([]HTTPHeader, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPHeader{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPHeader{}) // var yyz1 HTTPHeader - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPHeader{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPHeader{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCapability(v []Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCapability(v *[]Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Capability{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Capability, yyrl1) - } - } else { - yyv1 = make([]Capability, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = Capability(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = Capability(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 Capability - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = Capability(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Capability{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerPort(v []ContainerPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerPort(v *[]ContainerPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerPort, yyrl1) - } - } else { - yyv1 = make([]ContainerPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerPort{}) // var yyz1 ContainerPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEnvVar(v []EnvVar, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEnvVar(v *[]EnvVar, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EnvVar{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EnvVar, yyrl1) - } - } else { - yyv1 = make([]EnvVar, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EnvVar{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EnvVar{}) // var yyz1 EnvVar - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EnvVar{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EnvVar{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolumeMount(v []VolumeMount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolumeMount(v *[]VolumeMount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []VolumeMount{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]VolumeMount, yyrl1) - } - } else { - yyv1 = make([]VolumeMount, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, VolumeMount{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, VolumeMount{}) // var yyz1 VolumeMount - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = VolumeMount{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []VolumeMount{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorTerm(v []NodeSelectorTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorTerm(v *[]NodeSelectorTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeSelectorTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeSelectorTerm, yyrl1) - } - } else { - yyv1 = make([]NodeSelectorTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeSelectorTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeSelectorTerm{}) // var yyz1 NodeSelectorTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeSelectorTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeSelectorRequirement(v []NodeSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeSelectorRequirement(v *[]NodeSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeSelectorRequirement{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeSelectorRequirement, yyrl1) - } - } else { - yyv1 = make([]NodeSelectorRequirement, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeSelectorRequirement{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeSelectorRequirement{}) // var yyz1 NodeSelectorRequirement - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeSelectorRequirement{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeSelectorRequirement{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodAffinityTerm(v []PodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodAffinityTerm(v *[]PodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodAffinityTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodAffinityTerm, yyrl1) - } - } else { - yyv1 = make([]PodAffinityTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodAffinityTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodAffinityTerm{}) // var yyz1 PodAffinityTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodAffinityTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodAffinityTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceWeightedPodAffinityTerm(v []WeightedPodAffinityTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceWeightedPodAffinityTerm(v *[]WeightedPodAffinityTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []WeightedPodAffinityTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]WeightedPodAffinityTerm, yyrl1) - } - } else { - yyv1 = make([]WeightedPodAffinityTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, WeightedPodAffinityTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, WeightedPodAffinityTerm{}) // var yyz1 WeightedPodAffinityTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = WeightedPodAffinityTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []WeightedPodAffinityTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePreferredSchedulingTerm(v []PreferredSchedulingTerm, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePreferredSchedulingTerm(v *[]PreferredSchedulingTerm, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PreferredSchedulingTerm{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PreferredSchedulingTerm, yyrl1) - } - } else { - yyv1 = make([]PreferredSchedulingTerm, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PreferredSchedulingTerm{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PreferredSchedulingTerm{}) // var yyz1 PreferredSchedulingTerm - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PreferredSchedulingTerm{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PreferredSchedulingTerm{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceVolume(v []Volume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceVolume(v *[]Volume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Volume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 176) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Volume, yyrl1) - } - } else { - yyv1 = make([]Volume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Volume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Volume{}) // var yyz1 Volume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Volume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Volume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainer(v []Container, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainer(v *[]Container, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Container{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 256) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Container, yyrl1) - } - } else { - yyv1 = make([]Container, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Container{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Container{}) // var yyz1 Container - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Container{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Container{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLocalObjectReference(v []LocalObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLocalObjectReference(v *[]LocalObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LocalObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LocalObjectReference, yyrl1) - } - } else { - yyv1 = make([]LocalObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LocalObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LocalObjectReference{}) // var yyz1 LocalObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LocalObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LocalObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodCondition(v []PodCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodCondition(v *[]PodCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodCondition, yyrl1) - } - } else { - yyv1 = make([]PodCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodCondition{}) // var yyz1 PodCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerStatus(v []ContainerStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerStatus(v *[]ContainerStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 120) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerStatus, yyrl1) - } - } else { - yyv1 = make([]ContainerStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerStatus{}) // var yyz1 ContainerStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePod(v []Pod, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePod(v *[]Pod, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Pod{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 648) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Pod, yyrl1) - } - } else { - yyv1 = make([]Pod, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Pod{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Pod{}) // var yyz1 Pod - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Pod{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Pod{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodTemplate(v []PodTemplate, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodTemplate(v *[]PodTemplate, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodTemplate{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodTemplate, yyrl1) - } - } else { - yyv1 = make([]PodTemplate, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodTemplate{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodTemplate{}) // var yyz1 PodTemplate - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodTemplate{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodTemplate{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicationController(v []ReplicationController, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicationController(v *[]ReplicationController, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicationController{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicationController, yyrl1) - } - } else { - yyv1 = make([]ReplicationController, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicationController{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicationController{}) // var yyz1 ReplicationController - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicationController{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicationController{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLoadBalancerIngress(v []LoadBalancerIngress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLoadBalancerIngress(v *[]LoadBalancerIngress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LoadBalancerIngress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LoadBalancerIngress, yyrl1) - } - } else { - yyv1 = make([]LoadBalancerIngress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LoadBalancerIngress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LoadBalancerIngress{}) // var yyz1 LoadBalancerIngress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LoadBalancerIngress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LoadBalancerIngress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceServicePort(v []ServicePort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServicePort(v *[]ServicePort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServicePort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 80) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServicePort, yyrl1) - } - } else { - yyv1 = make([]ServicePort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServicePort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServicePort{}) // var yyz1 ServicePort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServicePort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServicePort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceService(v []Service, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceService(v *[]Service, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Service{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 432) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Service, yyrl1) - } - } else { - yyv1 = make([]Service, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Service{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Service{}) // var yyz1 Service - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Service{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Service{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceObjectReference(v []ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceObjectReference(v *[]ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ObjectReference, yyrl1) - } - } else { - yyv1 = make([]ObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ObjectReference{}) // var yyz1 ObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceServiceAccount(v []ServiceAccount, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServiceAccount(v *[]ServiceAccount, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ServiceAccount{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ServiceAccount, yyrl1) - } - } else { - yyv1 = make([]ServiceAccount, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ServiceAccount{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ServiceAccount{}) // var yyz1 ServiceAccount - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ServiceAccount{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ServiceAccount{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointSubset(v []EndpointSubset, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointSubset(v *[]EndpointSubset, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointSubset{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointSubset, yyrl1) - } - } else { - yyv1 = make([]EndpointSubset, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointSubset{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointSubset{}) // var yyz1 EndpointSubset - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointSubset{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointSubset{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointAddress(v []EndpointAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointAddress(v *[]EndpointAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointAddress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointAddress, yyrl1) - } - } else { - yyv1 = make([]EndpointAddress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointAddress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointAddress{}) // var yyz1 EndpointAddress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointAddress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointAddress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpointPort(v []EndpointPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpointPort(v *[]EndpointPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []EndpointPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]EndpointPort, yyrl1) - } - } else { - yyv1 = make([]EndpointPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, EndpointPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, EndpointPort{}) // var yyz1 EndpointPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = EndpointPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []EndpointPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEndpoints(v []Endpoints, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEndpoints(v *[]Endpoints, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Endpoints{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Endpoints, yyrl1) - } - } else { - yyv1 = make([]Endpoints, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Endpoints{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Endpoints{}) // var yyz1 Endpoints - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Endpoints{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Endpoints{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeCondition(v []NodeCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeCondition(v *[]NodeCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeCondition, yyrl1) - } - } else { - yyv1 = make([]NodeCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeCondition{}) // var yyz1 NodeCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNodeAddress(v []NodeAddress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNodeAddress(v *[]NodeAddress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NodeAddress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NodeAddress, yyrl1) - } - } else { - yyv1 = make([]NodeAddress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NodeAddress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NodeAddress{}) // var yyz1 NodeAddress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NodeAddress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NodeAddress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceContainerImage(v []ContainerImage, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceContainerImage(v *[]ContainerImage, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ContainerImage{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ContainerImage, yyrl1) - } - } else { - yyv1 = make([]ContainerImage, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ContainerImage{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ContainerImage{}) // var yyz1 ContainerImage - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ContainerImage{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ContainerImage{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceUniqueVolumeName(v []UniqueVolumeName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceUniqueVolumeName(v *[]UniqueVolumeName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []UniqueVolumeName{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]UniqueVolumeName, yyrl1) - } - } else { - yyv1 = make([]UniqueVolumeName, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = UniqueVolumeName(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = UniqueVolumeName(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 UniqueVolumeName - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = UniqueVolumeName(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []UniqueVolumeName{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceAttachedVolume(v []AttachedVolume, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAttachedVolume(v *[]AttachedVolume, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []AttachedVolume{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]AttachedVolume, yyrl1) - } - } else { - yyv1 = make([]AttachedVolume, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, AttachedVolume{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, AttachedVolume{}) // var yyz1 AttachedVolume - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = AttachedVolume{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []AttachedVolume{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encResourceList(v ResourceList, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yyk1.CodecEncodeSelf(e) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy3 := &yyv1 - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(yy3) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(yy3) - } else { - z.EncFallback(yy3) - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decResourceList(v *ResourceList, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 72) - yyv1 = make(map[ResourceName]pkg3_resource.Quantity, yyrl1) - *v = yyv1 - } - var yymk1 ResourceName - var yymv1 pkg3_resource.Quantity - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = ResourceName(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg3_resource.Quantity{} - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.DecExt(yyv3) { - } else if !yym4 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv3) - } else { - z.DecFallback(yyv3, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = ResourceName(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = pkg3_resource.Quantity{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = pkg3_resource.Quantity{} - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceNode(v []Node, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNode(v *[]Node, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Node{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 616) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Node, yyrl1) - } - } else { - yyv1 = make([]Node, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Node{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Node{}) // var yyz1 Node - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Node{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Node{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceFinalizerName(v []FinalizerName, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFinalizerName(v *[]FinalizerName, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []FinalizerName{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]FinalizerName, yyrl1) - } - } else { - yyv1 = make([]FinalizerName, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FinalizerName(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FinalizerName(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FinalizerName - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FinalizerName(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FinalizerName{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNamespace(v []Namespace, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNamespace(v *[]Namespace, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Namespace{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Namespace, yyrl1) - } - } else { - yyv1 = make([]Namespace, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Namespace{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Namespace{}) // var yyz1 Namespace - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Namespace{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Namespace{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceEvent(v []Event, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceEvent(v *[]Event, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Event{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 488) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Event, yyrl1) - } - } else { - yyv1 = make([]Event, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Event{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Event{}) // var yyz1 Event - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Event{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Event{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceruntime_RawExtension(v []pkg5_runtime.RawExtension, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yym3 := z.EncBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.EncExt(yy2) { - } else if !yym3 && z.IsJSONHandle() { - z.EncJSONMarshal(yy2) - } else { - z.EncFallback(yy2) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceruntime_RawExtension(v *[]pkg5_runtime.RawExtension, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg5_runtime.RawExtension{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) - } - } else { - yyv1 = make([]pkg5_runtime.RawExtension, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg5_runtime.RawExtension{} - } else { - yyv2 := &yyv1[yyj1] - yym3 := z.DecBinary() - _ = yym3 - if false { - } else if z.HasExtensions() && z.DecExt(yyv2) { - } else if !yym3 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv2) - } else { - z.DecFallback(yyv2, false) - } - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg5_runtime.RawExtension{} - } else { - yyv4 := &yyv1[yyj1] - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg5_runtime.RawExtension{}) // var yyz1 pkg5_runtime.RawExtension - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg5_runtime.RawExtension{} - } else { - yyv6 := &yyv1[yyj1] - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg5_runtime.RawExtension{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLimitRangeItem(v []LimitRangeItem, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRangeItem(v *[]LimitRangeItem, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LimitRangeItem{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LimitRangeItem, yyrl1) - } - } else { - yyv1 = make([]LimitRangeItem, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LimitRangeItem{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LimitRangeItem{}) // var yyz1 LimitRangeItem - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRangeItem{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LimitRangeItem{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLimitRange(v []LimitRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLimitRange(v *[]LimitRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LimitRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LimitRange, yyrl1) - } - } else { - yyv1 = make([]LimitRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LimitRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LimitRange{}) // var yyz1 LimitRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LimitRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LimitRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceResourceQuotaScope(v []ResourceQuotaScope, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuotaScope(v *[]ResourceQuotaScope, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ResourceQuotaScope{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ResourceQuotaScope, yyrl1) - } - } else { - yyv1 = make([]ResourceQuotaScope, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = ResourceQuotaScope(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = ResourceQuotaScope(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 ResourceQuotaScope - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = ResourceQuotaScope(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ResourceQuotaScope{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceResourceQuota(v []ResourceQuota, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceResourceQuota(v *[]ResourceQuota, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ResourceQuota{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 288) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ResourceQuota, yyrl1) - } - } else { - yyv1 = make([]ResourceQuota, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ResourceQuota{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ResourceQuota{}) // var yyz1 ResourceQuota - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ResourceQuota{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ResourceQuota{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encMapstringSliceuint8(v map[string][]uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(yyv1)) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSliceuint8(v *map[string][]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]uint8, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []uint8 - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else { - *yyv3 = r.DecodeBytes(*(*[]byte)(yyv3), false, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *yyv6 = r.DecodeBytes(*(*[]byte)(yyv6), false, false) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSliceuint8(v []uint8, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeUint(uint64(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceuint8(v *[]uint8, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []uint8{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]uint8, yyrl1) - } - } else { - yyv1 = make([]uint8, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = 0 - } else { - yyv1[yyj1] = uint8(r.DecodeUint(8)) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, 0) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = 0 - } else { - yyv1[yyj1] = uint8(r.DecodeUint(8)) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, 0) // var yyz1 uint8 - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = 0 - } else { - yyv1[yyj1] = uint8(r.DecodeUint(8)) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []uint8{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceSecret(v []Secret, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSecret(v *[]Secret, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Secret{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Secret, yyrl1) - } - } else { - yyv1 = make([]Secret, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Secret{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Secret{}) // var yyz1 Secret - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Secret{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Secret{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceConfigMap(v []ConfigMap, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceConfigMap(v *[]ConfigMap, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ConfigMap{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 248) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ConfigMap, yyrl1) - } - } else { - yyv1 = make([]ConfigMap, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ConfigMap{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ConfigMap{}) // var yyz1 ConfigMap - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ConfigMap{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ConfigMap{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceComponentCondition(v []ComponentCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentCondition(v *[]ComponentCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ComponentCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ComponentCondition, yyrl1) - } - } else { - yyv1 = make([]ComponentCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ComponentCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ComponentCondition{}) // var yyz1 ComponentCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ComponentCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceComponentStatus(v []ComponentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceComponentStatus(v *[]ComponentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ComponentStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ComponentStatus, yyrl1) - } - } else { - yyv1 = make([]ComponentStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ComponentStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ComponentStatus{}) // var yyz1 ComponentStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ComponentStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ComponentStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDownwardAPIVolumeFile(v []DownwardAPIVolumeFile, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDownwardAPIVolumeFile(v *[]DownwardAPIVolumeFile, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) - } - } else { - yyv1 = make([]DownwardAPIVolumeFile, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DownwardAPIVolumeFile{}) // var yyz1 DownwardAPIVolumeFile - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DownwardAPIVolumeFile{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DownwardAPIVolumeFile{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go deleted file mode 100644 index 34b9ac471..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go +++ /dev/null @@ -1,3314 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// The comments for the structs and fields can be used from go-resful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored and not exported to the SwaggerAPI. -// -// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#idempotency - GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` - - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` - - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource will be deleted (no longer visible from - // resource lists, and not reachable by name) after the time in this field. Once set, this - // value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet - // will send a hard termination signal to the container. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` - - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md - // TODO: replace map[string]string with labels.LabelSet type - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/annotations.md - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` -} - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" -) - -// Volume represents a named volume in a pod that may be accessed by any container in the pod. -type Volume struct { - // Volume's name. - // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // VolumeSource represents the location and type of the mounted volume. - // If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` -} - -// Represents the source of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents a pre-existing file or directory on the host - // machine that is directly exposed to the container. This is generally - // used for system agents or other privileged things that are allowed - // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#emptydir - EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` - // GitRepo represents a git repository at a particular revision. - GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` - // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#secrets - Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` - // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/examples/iscsi/README.md - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` - // PersistentVolumeClaimVolumeSource represents a reference to a - // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md - RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` - // FlexVolume represents a generic volume resource that is - // provisioned/attached using a exec based plugin. This is an - // alpha feature and may change in future. - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` - - // DownwardAPI represents downward API about the pod that should populate this volume - DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` - // ConfigMap represents a configMap that should populate this volume - ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` -} - -// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. -// This volume finds the bound PV and mounts that volume for the pod. A -// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -// type of volume that is owned by someone else (the system). -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` - // Will force the ReadOnly setting in VolumeMounts. - // Default false. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the -// administrator who creates PVs. Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath - HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` - // Glusterfs represents a Glusterfs volume that is attached to a host and - // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` - // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md - RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` - // FlexVolume represents a generic volume resource that is - // provisioned/attached using a exec based plugin. This is an - // alpha feature and may change in future. - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` -} - -// +genclient=true,nonNamespaced=true - -// PersistentVolume (PV) is a storage resource provisioned by an administrator. -// It is analogous to a node. -// More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md -type PersistentVolume struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines a specification of a persistent volume owned by the cluster. - // Provisioned by an administrator. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistent-volumes - Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status represents the current information/status for the persistent volume. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistent-volumes - Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PersistentVolumeSpec is the specification of a persistent volume. -type PersistentVolumeSpec struct { - // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#capacity - Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // The actual volume backing the persistent volume. - PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` - // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // Expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#binding - ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` - // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default) and Recycle. - // Recyling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#recycling-policy - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -// PersistentVolumeStatus is the current status of a persistent volume. -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#phase - Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` - // A human-readable message indicating details about why the volume is in this state. - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - // Reason is a brief CamelCase string that describes any failure and is meant - // for machine parsing and tidy display in the CLI. - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` -} - -// PersistentVolumeList is a list of PersistentVolume items. -type PersistentVolumeList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of persistent volumes. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md - Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status represents the current information/status of a persistent volume claim. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. -type PersistentVolumeClaimList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // A list of persistent volume claims. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes-1 - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // A label query over volumes to consider for binding. - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#resources - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` - // VolumeName is the binding reference to the PersistentVolume backing this claim. - VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` -} - -// PersistentVolumeClaimStatus is the current status of a persistent volume claim. -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim. - Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` - // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes-1 - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // Represents the actual resources of the underlying volume. - Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // Path of the directory on the host. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath - Path string `json:"path" protobuf:"bytes,1,opt,name=path"` -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // What type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#emptydir - Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod - EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` - - // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // A collection of Ceph monitors. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // The rados image name. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // The rados pool name. - // Default is rbd. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it. - RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` - // The rados user name. - // Default is admin. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -type CinderVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` - // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` -} - -// Represents a Flocker volume mounted by the Flocker agent. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker - DatasetName string `json:"datasetName" protobuf:"bytes,1,opt,name=datasetName"` -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using a exec based plugin. This is an alpha feature and may change in future. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: Extra command options if any. - Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -type GitRepoVolumeSource struct { - // Repository URL - Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` - // Commit hash for the specified revision. - Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` - // Target directory name. - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#secrets - SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - Server string `json:"server" protobuf:"bytes,1,opt,name=server"` - - // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. - // Defaults to false. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` - // Target iSCSI Qualified Name. - IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI target lun number. - Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Required: FC target world wide names (WWNs) - TargetWWNs []string `json:"targetWWNs" protobuf:"bytes,1,rep,name=targetWWNs"` - // Required: FC target lun number - Lun *int32 `json:"lun" protobuf:"varint,2,opt,name=lun"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` - // Share Name - ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error. Paths must be relative and may not contain - // the '..' path or start with '..'. - Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` -} - -// ContainerPort represents a network port in a single container. -type ContainerPort struct { - // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - // named port in a pod must have a unique name. Name for the port that can be - // referred to by services. - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number of port to expose on the host. - // If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // Most containers do not need this. - HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` - // Protocol for port. Must be UDP or TCP. - // Defaults to "TCP". - Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` - // What host IP to bind the external port to. - HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // This must match the Name of a Volume. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Mounted read-only if true, read-write otherwise (false or unspecified). - // Defaults to false. - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` - // Path within the container at which the volume should be mounted. Must - // not contain ':'. - MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` -} - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Name of the environment variable. Must be a C_IDENTIFIER. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Optional: no more than one of the following may be specified. - - // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // Defaults to "". - Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` - // Source for the environment variable's value. Cannot be used if value is not empty. - ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` -} - -// EnvVarSource represents a source for the value of an EnvVar. -type EnvVarSource struct { - // Selects a field of the pod; only name and namespace are supported. - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` - // Selects a key of a ConfigMap. - ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` - // Selects a key of a secret in the pod's namespace - SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Version of the schema the FieldPath is written in terms of, defaults to "v1". - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` - // Path of the field to select in the specified API version. - FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` - // Required: resource to select - Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` - // Specifies the output format of the exposed resources, defaults to "1" - Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // The key to select. - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // The key of the secret to select from. Must be a valid secret key. - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The header field value - Value string `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Path to access on the HTTP server. - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // Name or number of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` - // Host name to connect to, defaults to the pod IP. You probably want to set - // "Host" in httpHeaders instead. - Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` - // Scheme to use for connecting to the host. - // Defaults to HTTP. - Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` - // Custom headers to set in the request. HTTP allows repeated headers. - HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Number or name of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` - // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` - // Number of seconds after which the probe times out. - // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` - // How often (in seconds) to perform the probe. - // Default to 10 seconds. Minimum value is 1. - PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // Defaults to 3. Minimum value is 1. - FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Adds and removes POSIX capabilities from running containers. -type Capabilities struct { - // Added capabilities - Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` - // Removed capabilities - Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // More info: http://releases.k8s.io/release-1.3/docs/design/resources.md#resource-specifications - Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. - // More info: http://releases.k8s.io/release-1.3/docs/design/resources.md#resource-specifications - Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` -} - -const ( - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// A single application container that you want to run within a pod. -type Container struct { - // Name of the container specified as a DNS_LABEL. - // Each container in a pod must have a unique name (DNS_LABEL). - // Cannot be updated. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Docker image name. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md - Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md#containers-and-commands - Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md#containers-and-commands - Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Cannot be updated. - Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` - // List of environment variables to set in the container. - // Cannot be updated. - Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` - // Compute Resources required by this container. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#resources - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"` - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes - ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Defaults to /dev/termination-log. - // Cannot be updated. - TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md#updating-images - ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` - // Security options the pod should run with. - // More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md - SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` - // HTTPGet specifies the http request to perform. - HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, - // the container is terminated and restarted according to its restart policy. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#hook-details - PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#hook-details - PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` -} - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// ContainerStateWaiting is a waiting state of a container. -type ContainerStateWaiting struct { - // (brief) reason the container is not yet running. - Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` - // Message regarding why the container is not yet running. - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} - -// ContainerStateRunning is a running state of a container. -type ContainerStateRunning struct { - // Time at which the container was last (re-)started - StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` -} - -// ContainerStateTerminated is a terminated state of a container. -type ContainerStateTerminated struct { - // Exit status from the last termination of the container - ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` - // Signal from the last termination of the container - Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` - // (brief) reason from the last termination of the container - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - // Message regarding the last termination of the container - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` - // Time at which previous execution of the container started - StartedAt unversioned.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` - // Time at which the container last terminated - FinishedAt unversioned.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` - // Container's ID in the format 'docker://<container_id>' - ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // Details about a waiting container - Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` - // Details about a running container - Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` - // Details about a terminated container - Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` -} - -// ContainerStatus contains details for the current status of this container. -type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. - // Cannot be updated. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. - State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. - LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. - Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md - // TODO(dchen1107): Which image the container is running with? - Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. - ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format 'docker://<container_id>'. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#container-information - ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -// PodConditionType is a valid value for PodCondition.Type -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" -) - -// PodCondition contains details for the current condition of this pod. -type PodCondition struct { - // Type is the type of the condition. - // Currently only Ready. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions - Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we probed the condition. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transitioned from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human-readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirst indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default (as - // determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" - - DefaultTerminationGracePeriodSeconds = 30 -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` -} - -// A null or empty node selector term matches no objects. -type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"` -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key <topologyKey> tches that of any node on which -// a pod of the set of pods is running -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - LabelSelector *unversioned.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // nil list means "this pod's namespace," empty list means "all namespaces" - // The json tag here is not "omitempty" since we need to distinguish nil and empty. - // See https://golang.org/pkg/encoding/json/#Marshal for more details. - Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` -} - -// The node this Taint is attached to has the effect "effect" on -// any pod that that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // Required. The taint value corresponding to the taint key. - Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule and PreferNoSchedule. - Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // but allow all already-running pods to continue running. - // Enforced by the scheduler and Kubelet. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // do not allow pods to start on Kubelet unless they tolerate the taint, - // and evict any already-running pods that do not tolerate the taint. - // Enforced by the scheduler and Kubelet. - // TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple <key,value,effect> using the matching operator <operator>. -type Toleration struct { - // Required. Key is the taint key that the toleration applies to. - Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule and PreferNoSchedule. - Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` - // TODO: For forgiveness (#1574), we'd eventually add at least a grace period - // here, and possibly an occurrence threshold and period. -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -const ( - // This annotation key will be used to contain an array of v1 JSON encoded Containers - // for init containers. The annotation will be placed into the internal type and cleared. - PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" - // This annotation key will be used to contain an array of v1 JSON encoded - // ContainerStatuses for init containers. The annotation will be placed into the internal - // type and cleared. - PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" -) - -// PodSpec is a description of a pod. -type PodSpec struct { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md - Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` - // List of initialization containers belonging to the pod. - // Init containers are executed in order prior to containers being started. If any - // init container fails, the pod is considered to have failed and is handled according - // to its restartPolicy. The name for an init container or normal container must be - // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. - // The resourceRequirements of an init container are taken into account during scheduling - // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers - // in a similar fashion. - // Init containers cannot currently be added or removed. - // Init containers are in alpha state and may change without notice. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md - InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"` - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md - Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#restartpolicy - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for containers within the pod. - // One of 'ClusterFirst' or 'Default'. - // Defaults to "ClusterFirst". - DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/node-selection/README.md - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/release-1.3/docs/design/service_accounts.md - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` - // Use the host's pid namespace. - // Optional: Default to false. - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` - // Use the host's ipc namespace. - // Optional: Default to false. - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` - // If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>". - // If not specified, the pod will not have a domainname at all. - Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // Current condition of the pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-phase - Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` - // Current service state of pod. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions - Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` - // A human readable message indicating details about why the pod is in this condition. - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // Init containers are in alpha state and may change without notice. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-statuses - InitContainerStatuses []ContainerStatus `json:"-"` - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-statuses - ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` -} - -// +genclient=true - -// Pod is a collection of containers that can run on a host. This resource is created -// by clients and scheduled onto hosts. -type Pod struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodList is a list of Pods. -type PodList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pods. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/pods.md - Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// +genclient=true - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pod templates - Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ReplicationControllerSpec is the specification of a replication controller. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Selector is a label query over pods that should match the Replicas count. - // If Selector is empty, it is defaulted to the labels present on the Pod template. - // Label keys and values that must match in order to be controlled by this replication - // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. - // Reference to an object that describes the pod that will be created if insufficient replicas are detected. - // TemplateRef *ObjectReference `json:"templateRef,omitempty"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - - // ObservedGeneration reflects the generation of the most recently observed replication controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` -} - -// +genclient=true - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - unversioned.TypeMeta `json:",inline"` - - // If the Labels of a ReplicationController are empty, they are defaulted to - // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the replication controller. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of replication controllers. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md - Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the cluster IP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" -) - -// ServiceStatus represents the current status of a service. -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` -} - -// LoadBalancerStatus represents the status of a load-balancer. -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. - // Traffic intended for the service should be sent to these ingress points. - Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` -} - -// ServiceSpec describes the attributes that a user creates on a service. -type ServiceSpec struct { - // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies - Ports []ServicePort `json:"ports" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` - - // This service will route traffic to pods having labels matching this selector. - // Label keys and values that must match in order to receive traffic for this service. - // If empty, all pods are selected, if not specified, endpoints must be manually specified. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#overview - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // ClusterIP is usually assigned by the master and is the IP address of the service. - // If specified, it will be allocated to the service if it is unused - // or else creation of the service will fail. - // Valid values are None, empty string (""), or a valid IP address. - // 'None' can be specified for a headless service when proxying is not required. - // Cannot be updated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies - ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` - - // Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. - // Defaults to ClusterIP. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#external-services - Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` - - // externalIPs is a list of IP addresses for which nodes in the cluster - // will also accept traffic for this service. These IPs are not managed by - // Kubernetes. The user is responsible for ensuring that traffic arrives - // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. A previous form of this - // functionality exists as the deprecatedPublicIPs field. When using this - // field, callers should also clear the deprecatedPublicIPs field. - ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` - - // deprecatedPublicIPs is deprecated and replaced by the externalIPs field - // with almost the exact same semantics. This field is retained in the v1 - // API for compatibility until at least 8/20/2016. It will be removed from - // any new API revisions. If both deprecatedPublicIPs *and* externalIPs are - // set, deprecatedPublicIPs is used. - // +genconversion=false - DeprecatedPublicIPs []string `json:"deprecatedPublicIPs,omitempty" protobuf:"bytes,6,rep,name=deprecatedPublicIPs"` - - // Supports "ClientIP" and "None". Used to maintain session affinity. - // Enable client IP based session affinity. - // Must be ClientIP or None. - // Defaults to None. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies - SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` - - // If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md - LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` -} - -// ServicePort contains information on service's port. -type ServicePort struct { - // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - // Optional if only one ServicePort is defined on this service. - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // The IP protocol for this port. Supports "TCP" and "UDP". - // Default is TCP. - Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - - // The port that will be exposed by this service. - Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` - - // Number or name of the port to access on the pods targeted by the service. - // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - // If this is a string, it will be looked up as a named port in the - // target Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#defining-a-service - TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` - - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#type--nodeport - NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` -} - -// +genclient=true - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of a service. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the service. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// ServiceList holds a list of services. -type ServiceList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of services - Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md - Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` -} - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ServiceAccounts. - // More info: http://releases.k8s.io/release-1.3/docs/design/service_accounts.md#service-accounts - Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The set of all endpoints is the union of all subsets. Addresses are placed into - // subsets according to the IPs they share. A single address with multiple ports, - // some of which are ready and some of which are not (because they come from - // different containers) will result in the address being displayed in different - // subsets for the different ports. No address will appear in both Addresses and - // NotReadyAddresses in the same subset. - // Sets of addresses and ports that comprise a service. - Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"` -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - // IP addresses which offer the related ports that are marked as ready. These endpoints - // should be considered safe for load balancers and clients to utilize. - Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` - // IP addresses which offer the related ports but are not currently marked as ready - // because they have not yet finished starting, have recently failed a readiness check, - // or have recently failed a liveness check. - NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` - // Port numbers available on the related IP addresses. - Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. - IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` - // The Hostname of this endpoint - Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` - // Reference to object providing the endpoint. - TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // The port number of the endpoint. - Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` - - // The IP protocol for this port. - // Must be UDP or TCP. - // Default is TCP. - Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` -} - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of endpoints. - Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node. - PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` - // External ID of the node assigned by some machine database (e.g. a cloud provider). - // Deprecated. - ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` - // ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID> - ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` - // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#manual-node-administration"` - Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // Machine ID reported by the node. - MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` - // System UUID reported by the node. - SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` - // Boot ID reported by the node. - BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` - // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` - // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` - // Kubelet Version reported by the node. - KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` - // KubeProxy Version reported by the node. - KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` - // The Operating System reported by the node - OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` - // The Architecture reported by the node - Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#capacity for more details. - Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // Allocatable represents the resources of a node that are available for scheduling. - // Defaults to Capacity. - Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` - // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-phase - Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` - // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-condition - Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` - // List of addresses reachable to the node. - // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-addresses - Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` - // Endpoints of daemons running on the Node. - DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` - // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-info - NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` - // List of container images on this node - Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` - // List of attachable volumes in use (mounted) by the node. - VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` - // List of volumes that are attached to the node. - VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"` - - // DevicePath represents the device path where the volume should be avilable - DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"` -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - // e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` - // The size of the image in bytes. - SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" -) - -// NodeCondition contains condition infromation for a node. -type NodeCondition struct { - // Type of node condition. - Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we got an update on a given condition. - LastHeartbeatTime unversioned.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -type NodeAddressType string - -// These are valid address type of node. -const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" -) - -// NodeAddress contains information for the node's address. -type NodeAddress struct { - // Node address type, one of Hostname, ExternalIP or InternalIP. - Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` - // The node address. - Address string `json:"address" protobuf:"bytes,2,opt,name=address"` -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" - // Number of Pods that may be running on this Node: see ResourcePods -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient=true,nonNamespaced=true - -// Node is a worker node in Kubernetes, formerly known as minion. -// Each node will have a unique identifier in the cache (i.e. in etcd). -type Node struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of a node. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the node. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// NodeList is the whole list of all Nodes which have been registered with master. -type NodeList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of nodes - Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceSpec describes the attributes on a Namespace. -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/release-1.3/docs/design/namespaces.md#finalizers - Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` -} - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/release-1.3/docs/design/namespaces.md#phases - Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient=true,nonNamespaced=true - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional. -type Namespace struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Binding ties one object to another. -// For example, a pod is bound to a node by a scheduler. -type Binding struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The target object that you want to bind to the standard object. - Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` -} - -// DeleteOptions may be provided when deleting an API object -type DeleteOptions struct { - unversioned.TypeMeta `json:",inline"` - - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` - - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export" protobuf:"varint,1,opt,name=export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` -} - -// ListOptions is the query options to a standard REST list call. -type ListOptions struct { - unversioned.TypeMeta `json:",inline"` - - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` - // Timeout for the list/watch call. - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` -} - -// PodLogOptions is the query options for a Pod's logs REST call. -type PodLogOptions struct { - unversioned.TypeMeta `json:",inline"` - - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` - // Follow the log stream of the pod. Defaults to false. - Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` - // Return previous terminated container logs. Defaults to false. - Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *unversioned.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` -} - -// PodAttachOptions is the query options to a Pod's remote attach call. -// --- -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -type PodAttachOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Stdin if true, redirects the standard input stream of the pod for this call. - // Defaults to false. - Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` - - // Stdout if true indicates that stdout is to be redirected for the attach call. - // Defaults to true. - Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` - - // Stderr if true indicates that stderr is to be redirected for the attach call. - // Defaults to true. - Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` - - // TTY if true indicates that a tty will be allocated for the attach call. - // This is passed through the container runtime so the tty - // is allocated on the worker node by the container runtime. - // Defaults to false. - TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` - - // The container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` -} - -// PodExecOptions is the query options to a Pod's remote exec call. -// --- -// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -type PodExecOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Redirect the standard input stream of the pod for this call. - // Defaults to false. - Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` - - // Redirect the standard output stream of the pod for this call. - // Defaults to true. - Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` - - // Redirect the standard error stream of the pod for this call. - // Defaults to true. - Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` - - // TTY if true indicates that a tty will be allocated for the exec call. - // Defaults to false. - TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` - - // Container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` - - // Command is the remote command to execute. argv array. Not executed within a shell. - Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` -} - -// PodProxyOptions is the query options to a Pod's proxy call. -type PodProxyOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Path is the URL path to use for the current proxy request to pod. - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` -} - -// NodeProxyOptions is the query options to a Node's proxy call. -type NodeProxyOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Path is the URL path to use for the current proxy request to node. - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - unversioned.TypeMeta `json:",inline"` - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -type OwnerReference struct { - // API version of the referent. - APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // UID of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` - // If true, this reference points to the managing controller. - Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -type ObjectReference struct { - // Kind of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // Namespace of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` - // Name of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` - // UID of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids - UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` - // API version of the referent. - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` - // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` - - // If referring to a piece of an object instead of an entire object, this string - // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - // For example, if the object reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` -} - -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -type LocalObjectReference struct { - // Name of the referent. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - // TODO: Add other useful fields. apiVersion, kind, uid? - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// SerializedReference is a reference to serialized object. -type SerializedReference struct { - unversioned.TypeMeta `json:",inline"` - // The reference to an object in the system. - Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` -} - -// EventSource contains information for an event. -type EventSource struct { - // Component from which the event is generated. - Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` - // Host name on which the event is generated. - Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient=true - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - - // The object that this event is about. - InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` - - // This should be a short, machine understandable string that gives the reason - // for the transition into the object's current status. - // TODO: provide exact specification for format. - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - - // A human-readable description of the status of this operation. - // TODO: decide on maximum length. - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` - - // The component reporting this event. Should be a short machine understandable string. - Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - FirstTimestamp unversioned.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` - - // The time at which the most recent occurrence of this event was recorded. - LastTimestamp unversioned.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` - - // The number of times this event has occurred. - Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` - - // Type of this event (Normal, Warning), new types could be added in the future - Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` -} - -// EventList is a list of events. -type EventList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of events - Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// List holds a list of objects, which may not be known by the server. -type List struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of objects - Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// LimitType is a type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. -type LimitRangeItem struct { - // Type of resource that this limit applies to. - Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` - // Max usage constraints on this kind by resource name. - Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` - // Min usage constraints on this kind by resource name. - Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` - // Default resource requirement limit value by resource name if resource limit is omitted. - Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` - // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` - // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind. -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced. - Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` -} - -// +genclient=true - -// LimitRange sets resource usage limits for each kind of resource in a Namespace. -type LimitRange struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the limits enforced. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_limit_range.md - Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use. -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` - // Used is the current observed total usage of the resource in the namespace. - Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` -} - -// +genclient=true - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired quota. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ResourceQuotaList is a list of ResourceQuota items. -type ResourceQuotaList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota - Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN - // or leading dot followed by valid DNS_SUBDOMAIN. - // The serialized form of the secret data is a base64 encoded string, - // representing the arbitrary (possibly non-string) data value here. - // Described in https://tools.ietf.org/html/rfc4648#section-4 - Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` - - // Used to facilitate programmatic handling of secret data. - Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default. Arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secert. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" -) - -// SecretList is a list of Secret. -type SecretList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of secret objects. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md - Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true - -// ConfigMap holds configuration data for pods to consume. -type ConfigMap struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data contains the configuration data. - // Each key must be a valid DNS_SUBDOMAIN with an optional leading dot. - Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - unversioned.TypeMeta `json:",inline"` - - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ConfigMaps. - Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -// Information about the condition of a component. -type ComponentCondition struct { - // Type of condition for a component. - // Valid value: "Healthy" - Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` - // Status of the condition for a component. - // Valid values for "Healthy": "True", "False", or "Unknown". - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Message about the condition for a component. - // For example, information about a health check. - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // Condition error code for a component. - // For example, a health check error code. - Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` -} - -// +genclient=true,nonNamespaced=true - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of component conditions observed - Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` -} - -// Status of all the conditions for the component as a list of ComponentStatus objects. -type ComponentStatusList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ComponentStatus objects. - Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of downward API volume file - Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` -} - -// DownwardAPIVolumeFile represents information to create the file containing the pod field -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` - // Whether this container has a read-only root filesystem. - // Default is false. - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` -} - -// SELinuxOptions are the labels to be applied to the container -type SELinuxOptions struct { - // User is a SELinux user label that applies to the container. - User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` - // Role is a SELinux role label that applies to the container. - Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` - // Type is a SELinux type label that applies to the container. - Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` - // Level is SELinux level label that applies to the container. - Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` -} - -// RangeAllocation is not a public type. -type RangeAllocation struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Range is string that identifies the range represented by 'data'. - Range string `json:"range" protobuf:"bytes,2,opt,name=range"` - // Data is a bit array containing all allocated addresses in the previous segment. - Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go deleted file mode 100644 index 7587654aa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,1741 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_AWSElasticBlockStoreVolumeSource = map[string]string{ - "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore", -} - -func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { - return map_AWSElasticBlockStoreVolumeSource -} - -var map_Affinity = map[string]string{ - "": "Affinity is a group of affinity scheduling rules.", - "nodeAffinity": "Describes node affinity scheduling rules for the pod.", - "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", - "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", -} - -func (Affinity) SwaggerDoc() map[string]string { - return map_Affinity -} - -var map_AttachedVolume = map[string]string{ - "": "AttachedVolume describes a volume attached to a node", - "name": "Name of the attached volume", - "devicePath": "DevicePath represents the device path where the volume should be avilable", -} - -func (AttachedVolume) SwaggerDoc() map[string]string { - return map_AttachedVolume -} - -var map_AzureFileVolumeSource = map[string]string{ - "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "secretName": "the name of secret that contains Azure Storage Account Name and Key", - "shareName": "Share Name", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", -} - -func (AzureFileVolumeSource) SwaggerDoc() map[string]string { - return map_AzureFileVolumeSource -} - -var map_Binding = map[string]string{ - "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "target": "The target object that you want to bind to the standard object.", -} - -func (Binding) SwaggerDoc() map[string]string { - return map_Binding -} - -var map_Capabilities = map[string]string{ - "": "Adds and removes POSIX capabilities from running containers.", - "add": "Added capabilities", - "drop": "Removed capabilities", -} - -func (Capabilities) SwaggerDoc() map[string]string { - return map_Capabilities -} - -var map_CephFSVolumeSource = map[string]string{ - "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it", - "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/release-1.3/examples/cephfs/README.md#how-to-use-it", -} - -func (CephFSVolumeSource) SwaggerDoc() map[string]string { - return map_CephFSVolumeSource -} - -var map_CinderVolumeSource = map[string]string{ - "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md", -} - -func (CinderVolumeSource) SwaggerDoc() map[string]string { - return map_CinderVolumeSource -} - -var map_ComponentCondition = map[string]string{ - "": "Information about the condition of a component.", - "type": "Type of condition for a component. Valid value: \"Healthy\"", - "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", - "message": "Message about the condition for a component. For example, information about a health check.", - "error": "Condition error code for a component. For example, a health check error code.", -} - -func (ComponentCondition) SwaggerDoc() map[string]string { - return map_ComponentCondition -} - -var map_ComponentStatus = map[string]string{ - "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "conditions": "List of component conditions observed", -} - -func (ComponentStatus) SwaggerDoc() map[string]string { - return map_ComponentStatus -} - -var map_ComponentStatusList = map[string]string{ - "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of ComponentStatus objects.", -} - -func (ComponentStatusList) SwaggerDoc() map[string]string { - return map_ComponentStatusList -} - -var map_ConfigMap = map[string]string{ - "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.", -} - -func (ConfigMap) SwaggerDoc() map[string]string { - return map_ConfigMap -} - -var map_ConfigMapKeySelector = map[string]string{ - "": "Selects a key from a ConfigMap.", - "key": "The key to select.", -} - -func (ConfigMapKeySelector) SwaggerDoc() map[string]string { - return map_ConfigMapKeySelector -} - -var map_ConfigMapList = map[string]string{ - "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of ConfigMaps.", -} - -func (ConfigMapList) SwaggerDoc() map[string]string { - return map_ConfigMapList -} - -var map_ConfigMapVolumeSource = map[string]string{ - "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", - "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.", -} - -func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { - return map_ConfigMapVolumeSource -} - -var map_Container = map[string]string{ - "": "A single application container that you want to run within a pod.", - "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md#containers-and-commands", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md#containers-and-commands", - "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#resources", - "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes", - "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes", - "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md#updating-images", - "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md", - "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", -} - -func (Container) SwaggerDoc() map[string]string { - return map_Container -} - -var map_ContainerImage = map[string]string{ - "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", - "sizeBytes": "The size of the image in bytes.", -} - -func (ContainerImage) SwaggerDoc() map[string]string { - return map_ContainerImage -} - -var map_ContainerPort = map[string]string{ - "": "ContainerPort represents a network port in a single container.", - "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", - "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", - "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", - "protocol": "Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".", - "hostIP": "What host IP to bind the external port to.", -} - -func (ContainerPort) SwaggerDoc() map[string]string { - return map_ContainerPort -} - -var map_ContainerState = map[string]string{ - "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", - "waiting": "Details about a waiting container", - "running": "Details about a running container", - "terminated": "Details about a terminated container", -} - -func (ContainerState) SwaggerDoc() map[string]string { - return map_ContainerState -} - -var map_ContainerStateRunning = map[string]string{ - "": "ContainerStateRunning is a running state of a container.", - "startedAt": "Time at which the container was last (re-)started", -} - -func (ContainerStateRunning) SwaggerDoc() map[string]string { - return map_ContainerStateRunning -} - -var map_ContainerStateTerminated = map[string]string{ - "": "ContainerStateTerminated is a terminated state of a container.", - "exitCode": "Exit status from the last termination of the container", - "signal": "Signal from the last termination of the container", - "reason": "(brief) reason from the last termination of the container", - "message": "Message regarding the last termination of the container", - "startedAt": "Time at which previous execution of the container started", - "finishedAt": "Time at which the container last terminated", - "containerID": "Container's ID in the format 'docker://<container_id>'", -} - -func (ContainerStateTerminated) SwaggerDoc() map[string]string { - return map_ContainerStateTerminated -} - -var map_ContainerStateWaiting = map[string]string{ - "": "ContainerStateWaiting is a waiting state of a container.", - "reason": "(brief) reason the container is not yet running.", - "message": "Message regarding why the container is not yet running.", -} - -func (ContainerStateWaiting) SwaggerDoc() map[string]string { - return map_ContainerStateWaiting -} - -var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format 'docker://<container_id>'. More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#container-information", -} - -func (ContainerStatus) SwaggerDoc() map[string]string { - return map_ContainerStatus -} - -var map_DaemonEndpoint = map[string]string{ - "": "DaemonEndpoint contains information about a single Daemon endpoint.", - "Port": "Port number of the given endpoint.", -} - -func (DaemonEndpoint) SwaggerDoc() map[string]string { - return map_DaemonEndpoint -} - -var map_DeleteOptions = map[string]string{ - "": "DeleteOptions may be provided when deleting an API object", - "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", - "orphanDependents": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list.", -} - -func (DeleteOptions) SwaggerDoc() map[string]string { - return map_DeleteOptions -} - -var map_DownwardAPIVolumeFile = map[string]string{ - "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", - "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", - "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", -} - -func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { - return map_DownwardAPIVolumeFile -} - -var map_DownwardAPIVolumeSource = map[string]string{ - "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", - "items": "Items is a list of downward API volume file", -} - -func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { - return map_DownwardAPIVolumeSource -} - -var map_EmptyDirVolumeSource = map[string]string{ - "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#emptydir", -} - -func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { - return map_EmptyDirVolumeSource -} - -var map_EndpointAddress = map[string]string{ - "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", - "hostname": "The Hostname of this endpoint", - "targetRef": "Reference to object providing the endpoint.", -} - -func (EndpointAddress) SwaggerDoc() map[string]string { - return map_EndpointAddress -} - -var map_EndpointPort = map[string]string{ - "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", - "port": "The port number of the endpoint.", - "protocol": "The IP protocol for this port. Must be UDP or TCP. Default is TCP.", -} - -func (EndpointPort) SwaggerDoc() map[string]string { - return map_EndpointPort -} - -var map_EndpointSubset = map[string]string{ - "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", - "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", - "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", - "ports": "Port numbers available on the related IP addresses.", -} - -func (EndpointSubset) SwaggerDoc() map[string]string { - return map_EndpointSubset -} - -var map_Endpoints = map[string]string{ - "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", -} - -func (Endpoints) SwaggerDoc() map[string]string { - return map_Endpoints -} - -var map_EndpointsList = map[string]string{ - "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of endpoints.", -} - -func (EndpointsList) SwaggerDoc() map[string]string { - return map_EndpointsList -} - -var map_EnvVar = map[string]string{ - "": "EnvVar represents an environment variable present in a Container.", - "name": "Name of the environment variable. Must be a C_IDENTIFIER.", - "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", - "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", -} - -func (EnvVar) SwaggerDoc() map[string]string { - return map_EnvVar -} - -var map_EnvVarSource = map[string]string{ - "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod; only name and namespace are supported.", - "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", - "configMapKeyRef": "Selects a key of a ConfigMap.", - "secretKeyRef": "Selects a key of a secret in the pod's namespace", -} - -func (EnvVarSource) SwaggerDoc() map[string]string { - return map_EnvVarSource -} - -var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "involvedObject": "The object that this event is about.", - "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - "message": "A human-readable description of the status of this operation.", - "source": "The component reporting this event. Should be a short machine understandable string.", - "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", - "count": "The number of times this event has occurred.", - "type": "Type of this event (Normal, Warning), new types could be added in the future", -} - -func (Event) SwaggerDoc() map[string]string { - return map_Event -} - -var map_EventList = map[string]string{ - "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of events", -} - -func (EventList) SwaggerDoc() map[string]string { - return map_EventList -} - -var map_EventSource = map[string]string{ - "": "EventSource contains information for an event.", - "component": "Component from which the event is generated.", - "host": "Host name on which the event is generated.", -} - -func (EventSource) SwaggerDoc() map[string]string { - return map_EventSource -} - -var map_ExecAction = map[string]string{ - "": "ExecAction describes a \"run in container\" action.", - "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", -} - -func (ExecAction) SwaggerDoc() map[string]string { - return map_ExecAction -} - -var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", -} - -func (ExportOptions) SwaggerDoc() map[string]string { - return map_ExportOptions -} - -var map_FCVolumeSource = map[string]string{ - "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", - "targetWWNs": "Required: FC target world wide names (WWNs)", - "lun": "Required: FC target lun number", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", -} - -func (FCVolumeSource) SwaggerDoc() map[string]string { - return map_FCVolumeSource -} - -var map_FlexVolumeSource = map[string]string{ - "": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", - "driver": "Driver is the name of the driver to use for this volume.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "options": "Optional: Extra command options if any.", -} - -func (FlexVolumeSource) SwaggerDoc() map[string]string { - return map_FlexVolumeSource -} - -var map_FlockerVolumeSource = map[string]string{ - "": "Represents a Flocker volume mounted by the Flocker agent. Flocker volumes do not support ownership management or SELinux relabeling.", - "datasetName": "Required: the volume name. This is going to be store on metadata -> name on the payload for Flocker", -} - -func (FlockerVolumeSource) SwaggerDoc() map[string]string { - return map_FlockerVolumeSource -} - -var map_GCEPersistentDiskVolumeSource = map[string]string{ - "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk", -} - -func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { - return map_GCEPersistentDiskVolumeSource -} - -var map_GitRepoVolumeSource = map[string]string{ - "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.", - "repository": "Repository URL", - "revision": "Commit hash for the specified revision.", - "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", -} - -func (GitRepoVolumeSource) SwaggerDoc() map[string]string { - return map_GitRepoVolumeSource -} - -var map_GlusterfsVolumeSource = map[string]string{ - "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md#create-a-pod", -} - -func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { - return map_GlusterfsVolumeSource -} - -var map_HTTPGetAction = map[string]string{ - "": "HTTPGetAction describes an action based on HTTP Get requests.", - "path": "Path to access on the HTTP server.", - "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.", - "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.", -} - -func (HTTPGetAction) SwaggerDoc() map[string]string { - return map_HTTPGetAction -} - -var map_HTTPHeader = map[string]string{ - "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", - "value": "The header field value", -} - -func (HTTPHeader) SwaggerDoc() map[string]string { - return map_HTTPHeader -} - -var map_Handler = map[string]string{ - "": "Handler defines a specific action that should be taken", - "exec": "One and only one of the following should be specified. Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", -} - -func (Handler) SwaggerDoc() map[string]string { - return map_Handler -} - -var map_HostPathVolumeSource = map[string]string{ - "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - "path": "Path of the directory on the host. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath", -} - -func (HostPathVolumeSource) SwaggerDoc() map[string]string { - return map_HostPathVolumeSource -} - -var map_ISCSIVolumeSource = map[string]string{ - "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI target lun number.", - "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#iscsi", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", -} - -func (ISCSIVolumeSource) SwaggerDoc() map[string]string { - return map_ISCSIVolumeSource -} - -var map_KeyToPath = map[string]string{ - "": "Maps a string key to a path within a volume.", - "key": "The key to project.", - "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", -} - -func (KeyToPath) SwaggerDoc() map[string]string { - return map_KeyToPath -} - -var map_Lifecycle = map[string]string{ - "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#hook-details", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/release-1.3/docs/user-guide/container-environment.md#hook-details", -} - -func (Lifecycle) SwaggerDoc() map[string]string { - return map_Lifecycle -} - -var map_LimitRange = map[string]string{ - "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (LimitRange) SwaggerDoc() map[string]string { - return map_LimitRange -} - -var map_LimitRangeItem = map[string]string{ - "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", - "type": "Type of resource that this limit applies to.", - "max": "Max usage constraints on this kind by resource name.", - "min": "Min usage constraints on this kind by resource name.", - "default": "Default resource requirement limit value by resource name if resource limit is omitted.", - "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", - "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", -} - -func (LimitRangeItem) SwaggerDoc() map[string]string { - return map_LimitRangeItem -} - -var map_LimitRangeList = map[string]string{ - "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_limit_range.md", -} - -func (LimitRangeList) SwaggerDoc() map[string]string { - return map_LimitRangeList -} - -var map_LimitRangeSpec = map[string]string{ - "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", - "limits": "Limits is the list of LimitRangeItem objects that are enforced.", -} - -func (LimitRangeSpec) SwaggerDoc() map[string]string { - return map_LimitRangeSpec -} - -var map_List = map[string]string{ - "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of objects", -} - -func (List) SwaggerDoc() map[string]string { - return map_List -} - -var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", - "timeoutSeconds": "Timeout for the list/watch call.", -} - -func (ListOptions) SwaggerDoc() map[string]string { - return map_ListOptions -} - -var map_LoadBalancerIngress = map[string]string{ - "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", -} - -func (LoadBalancerIngress) SwaggerDoc() map[string]string { - return map_LoadBalancerIngress -} - -var map_LoadBalancerStatus = map[string]string{ - "": "LoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", -} - -func (LoadBalancerStatus) SwaggerDoc() map[string]string { - return map_LoadBalancerStatus -} - -var map_LocalObjectReference = map[string]string{ - "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", -} - -func (LocalObjectReference) SwaggerDoc() map[string]string { - return map_LocalObjectReference -} - -var map_NFSVolumeSource = map[string]string{ - "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "server": "Server is the hostname or IP address of the NFS server. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs", - "path": "Path that is exported by the NFS server. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs", - "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs", -} - -func (NFSVolumeSource) SwaggerDoc() map[string]string { - return map_NFSVolumeSource -} - -var map_Namespace = map[string]string{ - "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Namespace) SwaggerDoc() map[string]string { - return map_Namespace -} - -var map_NamespaceList = map[string]string{ - "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "Items is the list of Namespace objects in the list. More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md", -} - -func (NamespaceList) SwaggerDoc() map[string]string { - return map_NamespaceList -} - -var map_NamespaceSpec = map[string]string{ - "": "NamespaceSpec describes the attributes on a Namespace.", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/release-1.3/docs/design/namespaces.md#finalizers", -} - -func (NamespaceSpec) SwaggerDoc() map[string]string { - return map_NamespaceSpec -} - -var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/release-1.3/docs/design/namespaces.md#phases", -} - -func (NamespaceStatus) SwaggerDoc() map[string]string { - return map_NamespaceStatus -} - -var map_Node = map[string]string{ - "": "Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Node) SwaggerDoc() map[string]string { - return map_Node -} - -var map_NodeAddress = map[string]string{ - "": "NodeAddress contains information for the node's address.", - "type": "Node address type, one of Hostname, ExternalIP or InternalIP.", - "address": "The node address.", -} - -func (NodeAddress) SwaggerDoc() map[string]string { - return map_NodeAddress -} - -var map_NodeAffinity = map[string]string{ - "": "Node affinity is a group of node affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", -} - -func (NodeAffinity) SwaggerDoc() map[string]string { - return map_NodeAffinity -} - -var map_NodeCondition = map[string]string{ - "": "NodeCondition contains condition infromation for a node.", - "type": "Type of node condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastHeartbeatTime": "Last time we got an update on a given condition.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (NodeCondition) SwaggerDoc() map[string]string { - return map_NodeCondition -} - -var map_NodeDaemonEndpoints = map[string]string{ - "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", - "kubeletEndpoint": "Endpoint on which Kubelet is listening.", -} - -func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { - return map_NodeDaemonEndpoints -} - -var map_NodeList = map[string]string{ - "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of nodes", -} - -func (NodeList) SwaggerDoc() map[string]string { - return map_NodeList -} - -var map_NodeProxyOptions = map[string]string{ - "": "NodeProxyOptions is the query options to a Node's proxy call.", - "path": "Path is the URL path to use for the current proxy request to node.", -} - -func (NodeProxyOptions) SwaggerDoc() map[string]string { - return map_NodeProxyOptions -} - -var map_NodeSelector = map[string]string{ - "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", - "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", -} - -func (NodeSelector) SwaggerDoc() map[string]string { - return map_NodeSelector -} - -var map_NodeSelectorRequirement = map[string]string{ - "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "The label key that the selector applies to.", - "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", -} - -func (NodeSelectorRequirement) SwaggerDoc() map[string]string { - return map_NodeSelectorRequirement -} - -var map_NodeSelectorTerm = map[string]string{ - "": "A null or empty node selector term matches no objects.", - "matchExpressions": "Required. A list of node selector requirements. The requirements are ANDed.", -} - -func (NodeSelectorTerm) SwaggerDoc() map[string]string { - return map_NodeSelectorTerm -} - -var map_NodeSpec = map[string]string{ - "": "NodeSpec describes the attributes that a node is created with.", - "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", - "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", - "providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>", - "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#manual-node-administration\"`", -} - -func (NodeSpec) SwaggerDoc() map[string]string { - return map_NodeSpec -} - -var map_NodeStatus = map[string]string{ - "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#capacity for more details.", - "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", - "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-phase", - "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-addresses", - "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/release-1.3/docs/admin/node.md#node-info", - "images": "List of container images on this node", - "volumesInUse": "List of attachable volumes in use (mounted) by the node.", - "volumesAttached": "List of volumes that are attached to the node.", -} - -func (NodeStatus) SwaggerDoc() map[string]string { - return map_NodeStatus -} - -var map_NodeSystemInfo = map[string]string{ - "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", - "machineID": "Machine ID reported by the node.", - "systemUUID": "System UUID reported by the node.", - "bootID": "Boot ID reported by the node.", - "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", - "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", - "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", - "kubeletVersion": "Kubelet Version reported by the node.", - "kubeProxyVersion": "KubeProxy Version reported by the node.", - "operatingSystem": "The Operating System reported by the node", - "architecture": "The Architecture reported by the node", -} - -func (NodeSystemInfo) SwaggerDoc() map[string]string { - return map_NodeSystemInfo -} - -var map_ObjectFieldSelector = map[string]string{ - "": "ObjectFieldSelector selects an APIVersioned field of an object.", - "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", - "fieldPath": "Path of the field to select in the specified API version.", -} - -func (ObjectFieldSelector) SwaggerDoc() map[string]string { - return map_ObjectFieldSelector -} - -var map_ObjectMeta = map[string]string{ - "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency", - "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/release-1.3/docs/user-guide/annotations.md", - "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", -} - -func (ObjectMeta) SwaggerDoc() map[string]string { - return map_ObjectMeta -} - -var map_ObjectReference = map[string]string{ - "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "namespace": "Namespace of the referent. More info: http://releases.k8s.io/release-1.3/docs/user-guide/namespaces.md", - "name": "Name of the referent. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", - "uid": "UID of the referent. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids", - "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#concurrency-control-and-consistency", - "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", -} - -func (ObjectReference) SwaggerDoc() map[string]string { - return map_ObjectReference -} - -var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", - "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", - "uid": "UID of the referent. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#uids", - "controller": "If true, this reference points to the managing controller.", -} - -func (OwnerReference) SwaggerDoc() map[string]string { - return map_OwnerReference -} - -var map_PersistentVolume = map[string]string{ - "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistent-volumes", - "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistent-volumes", -} - -func (PersistentVolume) SwaggerDoc() map[string]string { - return map_PersistentVolume -} - -var map_PersistentVolumeClaim = map[string]string{ - "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", -} - -func (PersistentVolumeClaim) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaim -} - -var map_PersistentVolumeClaimList = map[string]string{ - "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "A list of persistent volume claims. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", -} - -func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimList -} - -var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes-1", - "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#resources", - "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", -} - -func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimSpec -} - -var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes-1", - "capacity": "Represents the actual resources of the underlying volume.", -} - -func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimStatus -} - -var map_PersistentVolumeClaimVolumeSource = map[string]string{ - "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", -} - -func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimVolumeSource -} - -var map_PersistentVolumeList = map[string]string{ - "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of persistent volumes. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md", -} - -func (PersistentVolumeList) SwaggerDoc() map[string]string { - return map_PersistentVolumeList -} - -var map_PersistentVolumeSource = map[string]string{ - "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore", - "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md", - "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", -} - -func (PersistentVolumeSource) SwaggerDoc() map[string]string { - return map_PersistentVolumeSource -} - -var map_PersistentVolumeSpec = map[string]string{ - "": "PersistentVolumeSpec is the specification of a persistent volume.", - "capacity": "A description of the persistent volume's resources and capacity. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#capacity", - "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#access-modes", - "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#binding", - "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recyling must be supported by the volume plugin underlying this persistent volume. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#recycling-policy", -} - -func (PersistentVolumeSpec) SwaggerDoc() map[string]string { - return map_PersistentVolumeSpec -} - -var map_PersistentVolumeStatus = map[string]string{ - "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#phase", - "message": "A human-readable message indicating details about why the volume is in this state.", - "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", -} - -func (PersistentVolumeStatus) SwaggerDoc() map[string]string { - return map_PersistentVolumeStatus -} - -var map_Pod = map[string]string{ - "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Pod) SwaggerDoc() map[string]string { - return map_Pod -} - -var map_PodAffinity = map[string]string{ - "": "Pod affinity is a group of inter pod affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", -} - -func (PodAffinity) SwaggerDoc() map[string]string { - return map_PodAffinity -} - -var map_PodAffinityTerm = map[string]string{ - "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> tches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods.", - "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); nil list means \"this pod's namespace,\" empty list means \"all namespaces\" The json tag here is not \"omitempty\" since we need to distinguish nil and empty. See https://golang.org/pkg/encoding/json/#Marshal for more details.", - "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", -} - -func (PodAffinityTerm) SwaggerDoc() map[string]string { - return map_PodAffinityTerm -} - -var map_PodAntiAffinity = map[string]string{ - "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system will try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:\"requiredDuringSchedulingRequiredDuringExecution,omitempty\"` If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", -} - -func (PodAntiAffinity) SwaggerDoc() map[string]string { - return map_PodAntiAffinity -} - -var map_PodAttachOptions = map[string]string{ - "": "PodAttachOptions is the query options to a Pod's remote attach call.", - "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", - "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", - "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", - "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", - "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", -} - -func (PodAttachOptions) SwaggerDoc() map[string]string { - return map_PodAttachOptions -} - -var map_PodCondition = map[string]string{ - "": "PodCondition contains details for the current condition of this pod.", - "type": "Type is the type of the condition. Currently only Ready. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions", - "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions", - "lastProbeTime": "Last time we probed the condition.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", - "message": "Human-readable message indicating details about last transition.", -} - -func (PodCondition) SwaggerDoc() map[string]string { - return map_PodCondition -} - -var map_PodExecOptions = map[string]string{ - "": "PodExecOptions is the query options to a Pod's remote exec call.", - "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", - "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", - "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", - "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", - "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", - "command": "Command is the remote command to execute. argv array. Not executed within a shell.", -} - -func (PodExecOptions) SwaggerDoc() map[string]string { - return map_PodExecOptions -} - -var map_PodList = map[string]string{ - "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pods.md", -} - -func (PodList) SwaggerDoc() map[string]string { - return map_PodList -} - -var map_PodLogOptions = map[string]string{ - "": "PodLogOptions is the query options for a Pod's logs REST call.", - "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow the log stream of the pod. Defaults to false.", - "previous": "Return previous terminated container logs. Defaults to false.", - "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", - "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", -} - -func (PodLogOptions) SwaggerDoc() map[string]string { - return map_PodLogOptions -} - -var map_PodProxyOptions = map[string]string{ - "": "PodProxyOptions is the query options to a Pod's proxy call.", - "path": "Path is the URL path to use for the current proxy request to pod.", -} - -func (PodProxyOptions) SwaggerDoc() map[string]string { - return map_PodProxyOptions -} - -var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", -} - -func (PodSecurityContext) SwaggerDoc() map[string]string { - return map_PodSecurityContext -} - -var map_PodSpec = map[string]string{ - "": "PodSpec is a description of a pod.", - "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md", - "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/containers.md", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#restartpolicy", - "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\".", - "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://releases.k8s.io/release-1.3/docs/user-guide/node-selection/README.md", - "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/release-1.3/docs/design/service_accounts.md", - "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", - "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", - "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", - "hostPID": "Use the host's pid namespace. Optional: Default to false.", - "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", - "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://releases.k8s.io/release-1.3/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod", - "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", - "subdomain": "If specified, the fully qualified Pod hostname will be \"<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>\". If not specified, the pod will not have a domainname at all.", -} - -func (PodSpec) SwaggerDoc() map[string]string { - return map_PodSpec -} - -var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", - "phase": "Current condition of the pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-phase", - "conditions": "Current service state of pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-statuses", -} - -func (PodStatus) SwaggerDoc() map[string]string { - return map_PodStatus -} - -var map_PodStatusResult = map[string]string{ - "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (PodStatusResult) SwaggerDoc() map[string]string { - return map_PodStatusResult -} - -var map_PodTemplate = map[string]string{ - "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (PodTemplate) SwaggerDoc() map[string]string { - return map_PodTemplate -} - -var map_PodTemplateList = map[string]string{ - "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of pod templates", -} - -func (PodTemplateList) SwaggerDoc() map[string]string { - return map_PodTemplateList -} - -var map_PodTemplateSpec = map[string]string{ - "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (PodTemplateSpec) SwaggerDoc() map[string]string { - return map_PodTemplateSpec -} - -var map_Preconditions = map[string]string{ - "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - "uid": "Specifies the target UID.", -} - -func (Preconditions) SwaggerDoc() map[string]string { - return map_Preconditions -} - -var map_PreferredSchedulingTerm = map[string]string{ - "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", - "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", - "preference": "A node selector term, associated with the corresponding weight.", -} - -func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { - return map_PreferredSchedulingTerm -} - -var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes", - "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://releases.k8s.io/release-1.3/docs/user-guide/pod-states.md#container-probes", - "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", - "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", -} - -func (Probe) SwaggerDoc() map[string]string { - return map_Probe -} - -var map_RBDVolumeSource = map[string]string{ - "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#rbd", - "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it.", - "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md#how-to-use-it", -} - -func (RBDVolumeSource) SwaggerDoc() map[string]string { - return map_RBDVolumeSource -} - -var map_RangeAllocation = map[string]string{ - "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "range": "Range is string that identifies the range represented by 'data'.", - "data": "Data is a bit array containing all allocated addresses in the previous segment.", -} - -func (RangeAllocation) SwaggerDoc() map[string]string { - return map_RangeAllocation -} - -var map_ReplicationController = map[string]string{ - "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (ReplicationController) SwaggerDoc() map[string]string { - return map_ReplicationController -} - -var map_ReplicationControllerList = map[string]string{ - "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of replication controllers. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md", -} - -func (ReplicationControllerList) SwaggerDoc() map[string]string { - return map_ReplicationControllerList -} - -var map_ReplicationControllerSpec = map[string]string{ - "": "ReplicationControllerSpec is the specification of a replication controller.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template", -} - -func (ReplicationControllerSpec) SwaggerDoc() map[string]string { - return map_ReplicationControllerSpec -} - -var map_ReplicationControllerStatus = map[string]string{ - "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", -} - -func (ReplicationControllerStatus) SwaggerDoc() map[string]string { - return map_ReplicationControllerStatus -} - -var map_ResourceFieldSelector = map[string]string{ - "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", - "containerName": "Container name: required for volumes, optional for env vars", - "resource": "Required: resource to select", - "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", -} - -func (ResourceFieldSelector) SwaggerDoc() map[string]string { - return map_ResourceFieldSelector -} - -var map_ResourceQuota = map[string]string{ - "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (ResourceQuota) SwaggerDoc() map[string]string { - return map_ResourceQuota -} - -var map_ResourceQuotaList = map[string]string{ - "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", -} - -func (ResourceQuotaList) SwaggerDoc() map[string]string { - return map_ResourceQuotaList -} - -var map_ResourceQuotaSpec = map[string]string{ - "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", - "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", -} - -func (ResourceQuotaSpec) SwaggerDoc() map[string]string { - return map_ResourceQuotaSpec -} - -var map_ResourceQuotaStatus = map[string]string{ - "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", - "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/release-1.3/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", - "used": "Used is the current observed total usage of the resource in the namespace.", -} - -func (ResourceQuotaStatus) SwaggerDoc() map[string]string { - return map_ResourceQuotaStatus -} - -var map_ResourceRequirements = map[string]string{ - "": "ResourceRequirements describes the compute resource requirements.", - "limits": "Limits describes the maximum amount of compute resources allowed. More info: http://releases.k8s.io/release-1.3/docs/design/resources.md#resource-specifications", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: http://releases.k8s.io/release-1.3/docs/design/resources.md#resource-specifications", -} - -func (ResourceRequirements) SwaggerDoc() map[string]string { - return map_ResourceRequirements -} - -var map_SELinuxOptions = map[string]string{ - "": "SELinuxOptions are the labels to be applied to the container", - "user": "User is a SELinux user label that applies to the container.", - "role": "Role is a SELinux role label that applies to the container.", - "type": "Type is a SELinux type label that applies to the container.", - "level": "Level is SELinux level label that applies to the container.", -} - -func (SELinuxOptions) SwaggerDoc() map[string]string { - return map_SELinuxOptions -} - -var map_Secret = map[string]string{ - "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", - "type": "Used to facilitate programmatic handling of secret data.", -} - -func (Secret) SwaggerDoc() map[string]string { - return map_Secret -} - -var map_SecretKeySelector = map[string]string{ - "": "SecretKeySelector selects a key of a Secret.", - "key": "The key of the secret to select from. Must be a valid secret key.", -} - -func (SecretKeySelector) SwaggerDoc() map[string]string { - return map_SecretKeySelector -} - -var map_SecretList = map[string]string{ - "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of secret objects. More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md", -} - -func (SecretList) SwaggerDoc() map[string]string { - return map_SecretList -} - -var map_SecretVolumeSource = map[string]string{ - "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#secrets", - "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.", -} - -func (SecretVolumeSource) SwaggerDoc() map[string]string { - return map_SecretVolumeSource -} - -var map_SecurityContext = map[string]string{ - "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", -} - -func (SecurityContext) SwaggerDoc() map[string]string { - return map_SecurityContext -} - -var map_SerializedReference = map[string]string{ - "": "SerializedReference is a reference to serialized object.", - "reference": "The reference to an object in the system.", -} - -func (SerializedReference) SwaggerDoc() map[string]string { - return map_SerializedReference -} - -var map_Service = map[string]string{ - "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Service) SwaggerDoc() map[string]string { - return map_Service -} - -var map_ServiceAccount = map[string]string{ - "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md", - "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/release-1.3/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret", -} - -func (ServiceAccount) SwaggerDoc() map[string]string { - return map_ServiceAccount -} - -var map_ServiceAccountList = map[string]string{ - "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of ServiceAccounts. More info: http://releases.k8s.io/release-1.3/docs/design/service_accounts.md#service-accounts", -} - -func (ServiceAccountList) SwaggerDoc() map[string]string { - return map_ServiceAccountList -} - -var map_ServiceList = map[string]string{ - "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of services", -} - -func (ServiceList) SwaggerDoc() map[string]string { - return map_ServiceList -} - -var map_ServicePort = map[string]string{ - "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", - "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", - "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#type--nodeport", -} - -func (ServicePort) SwaggerDoc() map[string]string { - return map_ServicePort -} - -var map_ServiceProxyOptions = map[string]string{ - "": "ServiceProxyOptions is the query options to a Service's proxy call.", - "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", -} - -func (ServiceProxyOptions) SwaggerDoc() map[string]string { - return map_ServiceProxyOptions -} - -var map_ServiceSpec = map[string]string{ - "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "selector": "This service will route traffic to pods having labels matching this selector. Label keys and values that must match in order to receive traffic for this service. If empty, all pods are selected, if not specified, endpoints must be manually specified. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#overview", - "clusterIP": "ClusterIP is usually assigned by the master and is the IP address of the service. If specified, it will be allocated to the service if it is unused or else creation of the service will fail. Valid values are None, empty string (\"\"), or a valid IP address. 'None' can be specified for a headless service when proxying is not required. Cannot be updated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "type": "Type of exposed service. Must be ClusterIP, NodePort, or LoadBalancer. Defaults to ClusterIP. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#external-services", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", - "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/release-1.3/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md", -} - -func (ServiceSpec) SwaggerDoc() map[string]string { - return map_ServiceSpec -} - -var map_ServiceStatus = map[string]string{ - "": "ServiceStatus represents the current status of a service.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", -} - -func (ServiceStatus) SwaggerDoc() map[string]string { - return map_ServiceStatus -} - -var map_TCPSocketAction = map[string]string{ - "": "TCPSocketAction describes an action based on opening a socket", - "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", -} - -func (TCPSocketAction) SwaggerDoc() map[string]string { - return map_TCPSocketAction -} - -var map_Taint = map[string]string{ - "": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.", - "key": "Required. The taint key to be applied to a node.", - "value": "Required. The taint value corresponding to the taint key.", - "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule and PreferNoSchedule.", -} - -func (Taint) SwaggerDoc() map[string]string { - return map_Taint -} - -var map_Toleration = map[string]string{ - "": "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.", - "key": "Required. Key is the taint key that the toleration applies to.", - "operator": "operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", - "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and PreferNoSchedule.", -} - -func (Toleration) SwaggerDoc() map[string]string { - return map_Toleration -} - -var map_Volume = map[string]string{ - "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", -} - -func (Volume) SwaggerDoc() map[string]string { - return map_Volume -} - -var map_VolumeMount = map[string]string{ - "": "VolumeMount describes a mounting of a Volume within a container.", - "name": "This must match the Name of a Volume.", - "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", - "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", -} - -func (VolumeMount) SwaggerDoc() map[string]string { - return map_VolumeMount -} - -var map_VolumeSource = map[string]string{ - "": "Represents the source of a volume to mount. Only one of its members may be specified.", - "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#hostpath", - "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#emptydir", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#awselasticblockstore", - "gitRepo": "GitRepo represents a git repository at a particular revision.", - "secret": "Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#secrets", - "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/release-1.3/docs/user-guide/volumes.md#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.3/examples/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.3/examples/glusterfs/README.md", - "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/release-1.3/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.3/examples/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.3/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", -} - -func (VolumeSource) SwaggerDoc() map[string]string { - return map_VolumeSource -} - -var map_VsphereVirtualDiskVolumeSource = map[string]string{ - "": "Represents a vSphere volume resource.", - "volumePath": "Path that identifies vSphere volume vmdk", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", -} - -func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { - return map_VsphereVirtualDiskVolumeSource -} - -var map_WeightedPodAffinityTerm = map[string]string{ - "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", -} - -func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { - return map_WeightedPodAffinityTerm -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go deleted file mode 100644 index 1b7051073..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package validation has functions for validating the correctness of api -// objects and explaining what is wrong with them when they aren't valid. -package validation diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go deleted file mode 100644 index 0a5de5443..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// ValidateEvent makes sure that the event makes sense. -func ValidateEvent(event *api.Event) field.ErrorList { - allErrs := field.ErrorList{} - // There is no namespace required for node or persistent volume. - // However, older client code accidentally sets event.Namespace - // to api.NamespaceDefault, so we accept that too, but "" is preferred. - if (event.InvolvedObject.Kind == "Node" || event.InvolvedObject.Kind == "PersistentVolume") && - event.Namespace != api.NamespaceDefault && - event.Namespace != "" { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "not allowed for node")) - } - if event.InvolvedObject.Kind != "Node" && - event.InvolvedObject.Kind != "PersistentVolume" && - event.Namespace != event.InvolvedObject.Namespace { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match involvedObject")) - } - for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { - allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/name.go b/vendor/k8s.io/kubernetes/pkg/api/validation/name.go deleted file mode 100644 index cf2eb8bb2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/name.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "strings" -) - -// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) -var NameMayNotBe = []string{".", ".."} - -// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) -var NameMayNotContain = []string{"/", "%"} - -// IsValidPathSegmentName validates the name can be safely encoded as a path segment -func IsValidPathSegmentName(name string) []string { - for _, illegalName := range NameMayNotBe { - if name == illegalName { - return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} - } - } - - for _, illegalContent := range NameMayNotContain { - if strings.Contains(name, illegalContent) { - return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)} - } - } - - return nil -} - -// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment -// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid -func IsValidPathSegmentPrefix(name string) []string { - for _, illegalContent := range NameMayNotContain { - if strings.Contains(name, illegalContent) { - return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)} - } - } - - return nil -} - -// ValidatePathSegmentName validates the name can be safely encoded as a path segment -func ValidatePathSegmentName(name string, prefix bool) []string { - if prefix { - return IsValidPathSegmentPrefix(name) - } else { - return IsValidPathSegmentName(name) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go b/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go deleted file mode 100644 index 345666c97..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/schema.go +++ /dev/null @@ -1,367 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/emicklei/go-restful/swagger" - "github.com/golang/glog" - apiutil "k8s.io/kubernetes/pkg/api/util" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/yaml" -) - -type InvalidTypeError struct { - ExpectedKind reflect.Kind - ObservedKind reflect.Kind - FieldName string -} - -func (i *InvalidTypeError) Error() string { - return fmt.Sprintf("expected type %s, for field %s, got %s", i.ExpectedKind.String(), i.FieldName, i.ObservedKind.String()) -} - -func NewInvalidTypeError(expected reflect.Kind, observed reflect.Kind, fieldName string) error { - return &InvalidTypeError{expected, observed, fieldName} -} - -// TypeNotFoundError is returned when specified type -// can not found in schema -type TypeNotFoundError string - -func (tnfe TypeNotFoundError) Error() string { - return fmt.Sprintf("couldn't find type: %s", string(tnfe)) -} - -// Schema is an interface that knows how to validate an API object serialized to a byte array. -type Schema interface { - ValidateBytes(data []byte) error -} - -type NullSchema struct{} - -func (NullSchema) ValidateBytes(data []byte) error { return nil } - -type SwaggerSchema struct { - api swagger.ApiDeclaration - delegate Schema // For delegating to other api groups -} - -func NewSwaggerSchemaFromBytes(data []byte, factory Schema) (Schema, error) { - schema := &SwaggerSchema{} - err := json.Unmarshal(data, &schema.api) - if err != nil { - return nil, err - } - schema.delegate = factory - return schema, nil -} - -// validateList unpacks a list and validate every item in the list. -// It return nil if every item is ok. -// Otherwise it return an error list contain errors of every item. -func (s *SwaggerSchema) validateList(obj map[string]interface{}) []error { - items, exists := obj["items"] - if !exists { - return []error{fmt.Errorf("no items field in %#v", obj)} - } - return s.validateItems(items) -} - -func (s *SwaggerSchema) validateItems(items interface{}) []error { - allErrs := []error{} - itemList, ok := items.([]interface{}) - if !ok { - return append(allErrs, fmt.Errorf("items isn't a slice")) - } - for i, item := range itemList { - fields, ok := item.(map[string]interface{}) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d] isn't a map[string]interface{}", i)) - continue - } - groupVersion := fields["apiVersion"] - if groupVersion == nil { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion not set", i)) - continue - } - itemVersion, ok := groupVersion.(string) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion isn't string type", i)) - continue - } - if len(itemVersion) == 0 { - allErrs = append(allErrs, fmt.Errorf("items[%d].apiVersion is empty", i)) - } - kind := fields["kind"] - if kind == nil { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind not set", i)) - continue - } - itemKind, ok := kind.(string) - if !ok { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind isn't string type", i)) - continue - } - if len(itemKind) == 0 { - allErrs = append(allErrs, fmt.Errorf("items[%d].kind is empty", i)) - } - version := apiutil.GetVersion(itemVersion) - errs := s.ValidateObject(item, "", version+"."+itemKind) - if len(errs) >= 1 { - allErrs = append(allErrs, errs...) - } - } - - return allErrs -} - -func (s *SwaggerSchema) ValidateBytes(data []byte) error { - var obj interface{} - out, err := yaml.ToJSON(data) - if err != nil { - return err - } - data = out - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - fields, ok := obj.(map[string]interface{}) - if !ok { - return fmt.Errorf("error in unmarshaling data %s", string(data)) - } - groupVersion := fields["apiVersion"] - if groupVersion == nil { - return fmt.Errorf("apiVersion not set") - } - if _, ok := groupVersion.(string); !ok { - return fmt.Errorf("apiVersion isn't string type") - } - kind := fields["kind"] - if kind == nil { - return fmt.Errorf("kind not set") - } - if _, ok := kind.(string); !ok { - return fmt.Errorf("kind isn't string type") - } - if strings.HasSuffix(kind.(string), "List") { - return utilerrors.NewAggregate(s.validateList(fields)) - } - version := apiutil.GetVersion(groupVersion.(string)) - allErrs := s.ValidateObject(obj, "", version+"."+kind.(string)) - if len(allErrs) == 1 { - return allErrs[0] - } - return utilerrors.NewAggregate(allErrs) -} - -func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) []error { - allErrs := []error{} - models := s.api.Models - model, ok := models.At(typeName) - - // Verify the api version matches. This is required for nested types with differing api versions because - // s.api only has schema for 1 api version (the parent object type's version). - // e.g. an extensions/v1beta1 Template embedding a /v1 Service requires the schema for the extensions/v1beta1 - // api to delegate to the schema for the /v1 api. - // Only do this for !ok objects so that cross ApiVersion vendored types take precedence. - if !ok && s.delegate != nil { - fields, mapOk := obj.(map[string]interface{}) - if !mapOk { - return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) - } - if delegated, err := s.delegateIfDifferentApiVersion(runtime.Unstructured{Object: fields}); delegated { - if err != nil { - allErrs = append(allErrs, err) - } - return allErrs - } - } - - if !ok { - return append(allErrs, TypeNotFoundError(typeName)) - } - properties := model.Properties - if len(properties.List) == 0 { - // The object does not have any sub-fields. - return nil - } - fields, ok := obj.(map[string]interface{}) - if !ok { - return append(allErrs, fmt.Errorf("field %s: expected object of type map[string]interface{}, but the actual type is %T", fieldName, obj)) - } - if len(fieldName) > 0 { - fieldName = fieldName + "." - } - // handle required fields - for _, requiredKey := range model.Required { - if _, ok := fields[requiredKey]; !ok { - allErrs = append(allErrs, fmt.Errorf("field %s: is required", requiredKey)) - } - } - for key, value := range fields { - details, ok := properties.At(key) - - // Special case for runtime.RawExtension and runtime.Objects because they always fail to validate - // This is because the actual values will be of some sub-type (e.g. Deployment) not the expected - // super-type (RawExtension) - if s.isGenericArray(details) { - errs := s.validateItems(value) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - continue - } - if !ok { - allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName)) - continue - } - if details.Type == nil && details.Ref == nil { - allErrs = append(allErrs, fmt.Errorf("could not find the type of %s from object: %v", key, details)) - } - var fieldType string - if details.Type != nil { - fieldType = *details.Type - } else { - fieldType = *details.Ref - } - if value == nil { - glog.V(2).Infof("Skipping nil field: %s", key) - continue - } - errs := s.validateField(value, fieldName+key, fieldType, &details) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - } - return allErrs -} - -// delegateIfDifferentApiVersion delegates the validation of an object if its ApiGroup does not match the -// current SwaggerSchema. -// First return value is true if the validation was delegated (by a different ApiGroup SwaggerSchema) -// Second return value is the result of the delegated validation if performed. -func (s *SwaggerSchema) delegateIfDifferentApiVersion(obj runtime.Unstructured) (bool, error) { - // Never delegate objects in the same ApiVersion or we will get infinite recursion - if !s.isDifferentApiVersion(obj) { - return false, nil - } - - // Convert the object back into bytes so that we can pass it to the ValidateBytes function - m, err := json.Marshal(obj.Object) - if err != nil { - return true, err - } - - // Delegate validation of this object to the correct SwaggerSchema for its ApiGroup - return true, s.delegate.ValidateBytes(m) -} - -// isDifferentApiVersion Returns true if obj lives in a different ApiVersion than the SwaggerSchema does. -// The SwaggerSchema will not be able to process objects in different ApiVersions unless they are vendored. -func (s *SwaggerSchema) isDifferentApiVersion(obj runtime.Unstructured) bool { - groupVersion := obj.GetAPIVersion() - return len(groupVersion) > 0 && s.api.ApiVersion != groupVersion -} - -// isGenericArray Returns true if p is an array of generic Objects - either RawExtension or Object. -func (s *SwaggerSchema) isGenericArray(p swagger.ModelProperty) bool { - return p.DataTypeFields.Type != nil && - *p.DataTypeFields.Type == "array" && - p.Items != nil && - p.Items.Ref != nil && - (*p.Items.Ref == "runtime.RawExtension" || *p.Items.Ref == "runtime.Object") -} - -// This matches type name in the swagger spec, such as "v1.Binding". -var versionRegexp = regexp.MustCompile(`^(v.+|unversioned)\..*`) - -func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) []error { - // TODO: caesarxuchao: because we have multiple group/versions and objects - // may reference objects in other group, the commented out way of checking - // if a filedType is a type defined by us is outdated. We use a hacky way - // for now. - // TODO: the type name in the swagger spec is something like "v1.Binding", - // and the "v1" is generated from the package name, not the groupVersion of - // the type. We need to fix go-restful to embed the group name in the type - // name, otherwise we couldn't handle identically named types in different - // groups correctly. - if versionRegexp.MatchString(fieldType) { - // if strings.HasPrefix(fieldType, apiVersion) { - return s.ValidateObject(value, fieldName, fieldType) - } - allErrs := []error{} - switch fieldType { - case "string": - // Be loose about what we accept for 'string' since we use IntOrString in a couple of places - _, isString := value.(string) - _, isNumber := value.(float64) - _, isInteger := value.(int) - if !isString && !isNumber && !isInteger { - return append(allErrs, NewInvalidTypeError(reflect.String, reflect.TypeOf(value).Kind(), fieldName)) - } - case "array": - arr, ok := value.([]interface{}) - if !ok { - return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) - } - var arrType string - if fieldDetails.Items.Ref == nil && fieldDetails.Items.Type == nil { - return append(allErrs, NewInvalidTypeError(reflect.Array, reflect.TypeOf(value).Kind(), fieldName)) - } - if fieldDetails.Items.Ref != nil { - arrType = *fieldDetails.Items.Ref - } else { - arrType = *fieldDetails.Items.Type - } - for ix := range arr { - errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil) - if len(errs) > 0 { - allErrs = append(allErrs, errs...) - } - } - case "uint64": - case "int64": - case "integer": - _, isNumber := value.(float64) - _, isInteger := value.(int) - if !isNumber && !isInteger { - return append(allErrs, NewInvalidTypeError(reflect.Int, reflect.TypeOf(value).Kind(), fieldName)) - } - case "float64": - if _, ok := value.(float64); !ok { - return append(allErrs, NewInvalidTypeError(reflect.Float64, reflect.TypeOf(value).Kind(), fieldName)) - } - case "boolean": - if _, ok := value.(bool); !ok { - return append(allErrs, NewInvalidTypeError(reflect.Bool, reflect.TypeOf(value).Kind(), fieldName)) - } - // API servers before release 1.3 produce swagger spec with `type: "any"` as the fallback type, while newer servers produce spec with `type: "object"`. - // We have both here so that kubectl can work with both old and new api servers. - case "object": - case "any": - default: - return append(allErrs, fmt.Errorf("unexpected type: %v", fieldType)) - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go deleted file mode 100644 index 47213e406..000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ /dev/null @@ -1,3169 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "encoding/json" - "fmt" - "math" - "net" - "os" - "path" - "reflect" - "regexp" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/endpoints" - utilpod "k8s.io/kubernetes/pkg/api/pod" - "k8s.io/kubernetes/pkg/api/resource" - apiservice "k8s.io/kubernetes/pkg/api/service" - "k8s.io/kubernetes/pkg/api/unversioned" - unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/capabilities" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// TODO: delete this global variable when we enable the validation of common -// fields by default. -var RepairMalformedUpdates bool = true - -const isNegativeErrorMsg string = `must be greater than or equal to 0` -const isInvalidQuotaResource string = `must be a standard resource for quota` -const fieldImmutableErrorMsg string = `field is immutable` -const cIdentifierErrorMsg string = `must be a C identifier (matching regex ` + validation.CIdentifierFmt + `): e.g. "my_name" or "MyName"` -const isNotIntegerErrorMsg string = `must be an integer` - -func InclusiveRangeErrorMsg(lo, hi int) string { - return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) -} - -var pdPartitionErrorMsg string = InclusiveRangeErrorMsg(1, 255) -var PortRangeErrorMsg string = InclusiveRangeErrorMsg(1, 65535) -var IdRangeErrorMsg string = InclusiveRangeErrorMsg(0, math.MaxInt32) -var PortNameErrorMsg string = fmt.Sprintf(`must be an IANA_SVC_NAME (at most 15 characters, matching regex %s, it must contain at least one letter [a-z], and hyphens cannot be adjacent to other hyphens): e.g. "http"`, validation.IdentifierNoHyphensBeginEndFmt) - -const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB - -// BannedOwners is a black list of object that are not allowed to be owners. -var BannedOwners = map[unversioned.GroupVersionKind]struct{}{ - v1.SchemeGroupVersion.WithKind("Event"): {}, -} - -// ValidateHasLabel requires that api.ObjectMeta has a Label with key and expectedValue -func ValidateHasLabel(meta api.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { - allErrs := field.ErrorList{} - actualValue, found := meta.Labels[key] - if !found { - allErrs = append(allErrs, field.Required(fldPath.Child("labels"), key+"="+expectedValue)) - return allErrs - } - if actualValue != expectedValue { - allErrs = append(allErrs, field.Invalid(fldPath.Child("labels"), meta.Labels, "expected "+key+"="+expectedValue)) - } - return allErrs -} - -// ValidateAnnotations validates that a set of annotations are correctly defined. -func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - var totalSize int64 - for k, v := range annotations { - for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { - allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) - } - totalSize += (int64)(len(k)) + (int64)(len(v)) - } - if totalSize > (int64)(totalAnnotationSizeLimitB) { - allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) - } - return allErrs -} - -func ValidatePodSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if annotations[api.AffinityAnnotationKey] != "" { - allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...) - } - - if annotations[api.TolerationsAnnotationKey] != "" { - allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) - } - - if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists { - for _, msg := range validation.IsDNS1123Label(hostname) { - allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodHostnameAnnotation, msg)) - } - } - - if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists { - for _, msg := range validation.IsDNS1123Label(subdomain) { - allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodSubdomainAnnotation, msg)) - } - } - - allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) - - return allErrs -} - -func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - hostnamesMap, exists := annotations[endpoints.PodHostnamesAnnotation] - if exists && !isValidHostnamesMap(hostnamesMap) { - allErrs = append(allErrs, field.Invalid(fldPath, endpoints.PodHostnamesAnnotation, - `must be a valid json representation of map[string(IP)][HostRecord] e.g. "{"10.245.1.6":{"HostName":"my-webserver"}}"`)) - } - - return allErrs -} - -func validateOwnerReference(ownerReference api.OwnerReference, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - gvk := unversioned.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) - // gvk.Group is empty for the legacy group. - if len(gvk.Version) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) - } - if len(gvk.Kind) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) - } - if len(ownerReference.Name) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) - } - if len(ownerReference.UID) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) - } - if _, ok := BannedOwners[gvk]; ok { - allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) - } - return allErrs -} - -func ValidateOwnerReferences(ownerReferences []api.OwnerReference, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - controllerName := "" - for _, ref := range ownerReferences { - allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) - if ref.Controller != nil && *ref.Controller { - if controllerName != "" { - allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, - fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) - } else { - controllerName = ref.Name - } - } - } - return allErrs -} - -// ValidateNameFunc validates that the provided name is valid for a given resource type. -// Not all resources have the same validation rules for names. Prefix is true -// if the name will have a value appended to it. If the name is not valid, -// this returns a list of descriptions of individual characteristics of the -// value that were not valid. Otherwise this returns an empty list or nil. -type ValidateNameFunc func(name string, prefix bool) []string - -// maskTrailingDash replaces the final character of a string with a subdomain safe -// value if is a dash. -func maskTrailingDash(name string) string { - if strings.HasSuffix(name, "-") { - return name[:len(name)-2] + "a" - } - return name -} - -// ValidatePodName can be used to check whether the given pod name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidatePodName = NameIsDNSSubdomain - -// ValidateReplicationControllerName can be used to check whether the given replication -// controller name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateReplicationControllerName = NameIsDNSSubdomain - -// ValidateServiceName can be used to check whether the given service name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateServiceName = NameIsDNS952Label - -// ValidateNodeName can be used to check whether the given node name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateNodeName = NameIsDNSSubdomain - -// ValidateNamespaceName can be used to check whether the given namespace name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateNamespaceName = NameIsDNSLabel - -// ValidateLimitRangeName can be used to check whether the given limit range name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateLimitRangeName = NameIsDNSSubdomain - -// ValidateResourceQuotaName can be used to check whether the given -// resource quota name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateResourceQuotaName = NameIsDNSSubdomain - -// ValidateSecretName can be used to check whether the given secret name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateSecretName = NameIsDNSSubdomain - -// ValidateServiceAccountName can be used to check whether the given service account name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateServiceAccountName = NameIsDNSSubdomain - -// ValidateEndpointsName can be used to check whether the given endpoints name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateEndpointsName = NameIsDNSSubdomain - -// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. -func NameIsDNSSubdomain(name string, prefix bool) []string { - if prefix { - name = maskTrailingDash(name) - } - return validation.IsDNS1123Subdomain(name) -} - -// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. -func NameIsDNSLabel(name string, prefix bool) []string { - if prefix { - name = maskTrailingDash(name) - } - return validation.IsDNS1123Label(name) -} - -// NameIsDNS952Label is a ValidateNameFunc for names that must be a DNS 952 label. -func NameIsDNS952Label(name string, prefix bool) []string { - if prefix { - name = maskTrailingDash(name) - } - return validation.IsDNS952Label(name) -} - -// Validates that given value is not negative. -func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value, isNegativeErrorMsg)) - } - return allErrs -} - -// Validates that a Quantity is not negative -func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if value.Cmp(resource.Quantity{}) < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) - } - return allErrs -} - -func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if !api.Semantic.DeepEqual(oldVal, newVal) { - allErrs = append(allErrs, field.Invalid(fldPath, newVal, fieldImmutableErrorMsg)) - } - return allErrs -} - -// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already -// been performed. -// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. -// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. -func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(meta.GenerateName) != 0 { - for _, msg := range nameFn(meta.GenerateName, true) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, msg)) - } - } - // If the generated name validates, but the calculated value does not, it's a problem with generation, and we - // report it here. This may confuse users, but indicates a programming bug and still must be validated. - // If there are multiple fields out of which one is required then add a or as a separator - if len(meta.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) - } else { - for _, msg := range nameFn(meta.Name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, msg)) - } - } - if requiresNamespace { - if len(meta.Namespace) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) - } else { - for _, msg := range ValidateNamespaceName(meta.Namespace, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, msg)) - } - } - } else { - if len(meta.Namespace) != 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) - } - } - allErrs = append(allErrs, ValidateNonnegativeField(meta.Generation, fldPath.Child("generation"))...) - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(meta.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(meta.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidateOwnerReferences(meta.OwnerReferences, fldPath.Child("ownerReferences"))...) - for _, finalizer := range meta.Finalizers { - allErrs = append(allErrs, validateFinalizerName(finalizer, fldPath.Child("finalizers"))...) - } - return allErrs -} - -// ValidateObjectMetaUpdate validates an object's metadata when updated -func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if !RepairMalformedUpdates && newMeta.UID != oldMeta.UID { - allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), newMeta.UID, "field is immutable")) - } - // in the event it is left empty, set it, to allow clients more flexibility - // TODO: remove the following code that repairs the update request when we retire the clients that modify the immutable fields. - // Please do not copy this pattern elsewhere; validation functions should not be modifying the objects they are passed! - if RepairMalformedUpdates { - if len(newMeta.UID) == 0 { - newMeta.UID = oldMeta.UID - } - // ignore changes to timestamp - if oldMeta.CreationTimestamp.IsZero() { - oldMeta.CreationTimestamp = newMeta.CreationTimestamp - } else { - newMeta.CreationTimestamp = oldMeta.CreationTimestamp - } - // an object can never remove a deletion timestamp or clear/change grace period seconds - if !oldMeta.DeletionTimestamp.IsZero() { - newMeta.DeletionTimestamp = oldMeta.DeletionTimestamp - } - if oldMeta.DeletionGracePeriodSeconds != nil && newMeta.DeletionGracePeriodSeconds == nil { - newMeta.DeletionGracePeriodSeconds = oldMeta.DeletionGracePeriodSeconds - } - } - - // TODO: needs to check if newMeta==nil && oldMeta !=nil after the repair logic is removed. - if newMeta.DeletionGracePeriodSeconds != nil && (oldMeta.DeletionGracePeriodSeconds == nil || *newMeta.DeletionGracePeriodSeconds != *oldMeta.DeletionGracePeriodSeconds) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionGracePeriodSeconds"), newMeta.DeletionGracePeriodSeconds, "field is immutable; may only be changed via deletion")) - } - if newMeta.DeletionTimestamp != nil && (oldMeta.DeletionTimestamp == nil || !newMeta.DeletionTimestamp.Equal(*oldMeta.DeletionTimestamp)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionTimestamp"), newMeta.DeletionTimestamp, "field is immutable; may only be changed via deletion")) - } - - // Reject updates that don't specify a resource version - if len(newMeta.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.ResourceVersion, "must be specified for an update")) - } - - allErrs = append(allErrs, ValidateImmutableField(newMeta.Name, oldMeta.Name, fldPath.Child("name"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.Namespace, oldMeta.Namespace, fldPath.Child("namespace"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.UID, oldMeta.UID, fldPath.Child("uid"))...) - allErrs = append(allErrs, ValidateImmutableField(newMeta.CreationTimestamp, oldMeta.CreationTimestamp, fldPath.Child("creationTimestamp"))...) - - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(newMeta.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(newMeta.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidateOwnerReferences(newMeta.OwnerReferences, fldPath.Child("ownerReferences"))...) - - return allErrs -} - -func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { - allErrs := field.ErrorList{} - - allNames := sets.String{} - for i, vol := range volumes { - idxPath := fldPath.Index(i) - el := validateVolumeSource(&vol.VolumeSource, idxPath) - if len(vol.Name) == 0 { - el = append(el, field.Required(idxPath.Child("name"), "")) - } else if msgs := validation.IsDNS1123Label(vol.Name); len(msgs) != 0 { - for i := range msgs { - el = append(el, field.Invalid(idxPath.Child("name"), vol.Name, msgs[i])) - } - } else if allNames.Has(vol.Name) { - el = append(el, field.Duplicate(idxPath.Child("name"), vol.Name)) - } - if len(el) == 0 { - allNames.Insert(vol.Name) - } else { - allErrs = append(allErrs, el...) - } - - } - return allNames, allErrs -} - -func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.ErrorList { - numVolumes := 0 - allErrs := field.ErrorList{} - if source.EmptyDir != nil { - numVolumes++ - // EmptyDirs have nothing to validate - } - if source.HostPath != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...) - } - } - if source.GitRepo != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...) - } - } - if source.GCEPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...) - } - } - if source.AWSElasticBlockStore != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...) - } - } - if source.Secret != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...) - } - } - if source.NFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...) - } - } - if source.ISCSI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...) - } - } - if source.Glusterfs != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGlusterfs(source.Glusterfs, fldPath.Child("glusterfs"))...) - } - } - if source.Flocker != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...) - } - } - if source.PersistentVolumeClaim != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...) - } - } - if source.RBD != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...) - } - } - if source.Cinder != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...) - } - } - if source.CephFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...) - } - } - if source.DownwardAPI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...) - } - } - if source.FC != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...) - } - } - if source.FlexVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specifiy more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...) - } - } - if source.ConfigMap != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specifiy more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...) - } - } - if source.AzureFile != nil { - numVolumes++ - allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) - } - if source.VsphereVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) - } - } - if numVolumes == 0 { - allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) - } - - return allErrs -} - -func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(hostPath.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - return allErrs -} - -func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(gitRepo.Repository) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) - } - - pathErrs := validateVolumeSourcePath(gitRepo.Directory, fldPath.Child("directory")) - allErrs = append(allErrs, pathErrs...) - return allErrs -} - -func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(iscsi.TargetPortal) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) - } - if len(iscsi.IQN) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) - } - if iscsi.Lun < 0 || iscsi.Lun > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, InclusiveRangeErrorMsg(0, 255))) - } - return allErrs -} - -func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(fc.TargetWWNs) < 1 { - allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "")) - } - - if fc.Lun == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "")) - } else { - if *fc.Lun < 0 || *fc.Lun > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, InclusiveRangeErrorMsg(0, 255))) - } - } - return allErrs -} - -func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(pd.PDName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) - } - if pd.Partition < 0 || pd.Partition > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg)) - } - return allErrs -} - -func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(PD.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - if PD.Partition < 0 || PD.Partition > 255 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg)) - } - return allErrs -} - -func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(secretSource.SecretName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - return allErrs -} - -func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(configMapSource.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - return allErrs -} - -func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(claim.ClaimName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) - } - return allErrs -} - -func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(nfs.Server) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) - } - if len(nfs.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - if !path.IsAbs(nfs.Path) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path")) - } - return allErrs -} - -func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(glusterfs.EndpointsName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) - } - if len(glusterfs.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - return allErrs -} - -func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(flocker.DatasetName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("datasetName"), "")) - } - if strings.Contains(flocker.DatasetName, "/") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'")) - } - return allErrs -} - -var validDownwardAPIFieldPathExpressions = sets.NewString("metadata.name", "metadata.namespace", "metadata.labels", "metadata.annotations") - -func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, downwardAPIVolumeFile := range downwardAPIVolume.Items { - if len(downwardAPIVolumeFile.Path) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) - } - allErrs = append(allErrs, validateVolumeSourcePath(downwardAPIVolumeFile.Path, fldPath.Child("path"))...) - if downwardAPIVolumeFile.FieldRef != nil { - allErrs = append(allErrs, validateObjectFieldSelector(downwardAPIVolumeFile.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) - if downwardAPIVolumeFile.ResourceFieldRef != nil { - allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) - } - } else if downwardAPIVolumeFile.ResourceFieldRef != nil { - allErrs = append(allErrs, validateContainerResourceFieldSelector(downwardAPIVolumeFile.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) - } else { - allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) - } - } - return allErrs -} - -// This validate will make sure targetPath: -// 1. is not abs path -// 2. does not start with '../' -// 3. does not contain '/../' -// 4. does not end with '/..' -func validateSubPath(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if path.IsAbs(targetPath) { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) - } - if strings.HasPrefix(targetPath, "../") { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '../'")) - } - if strings.Contains(targetPath, "/../") { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '/../'")) - } - if strings.HasSuffix(targetPath, "/..") { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not end with '/..'")) - } - return allErrs -} - -// This validate will make sure targetPath: -// 1. is not abs path -// 2. does not contain '..' -// 3. does not start with '..' -func validateVolumeSourcePath(targetPath string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if path.IsAbs(targetPath) { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) - } - // TODO assume OS of api server & nodes are the same for now - items := strings.Split(targetPath, string(os.PathSeparator)) - - for _, item := range items { - if item == ".." { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'")) - } - } - if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 { - allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'")) - } - return allErrs -} - -func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(rbd.CephMonitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - if len(rbd.RBDImage) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) - } - return allErrs -} - -func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.VolumeID) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) - } - return allErrs -} - -func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cephfs.Monitors) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) - } - return allErrs -} - -func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(fv.Driver) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) - } - return allErrs -} - -func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if azure.SecretName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) - } - if azure.ShareName == "" { - allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) - } - return allErrs -} - -func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(cd.VolumePath) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) - } - return allErrs -} - -// ValidatePersistentVolumeName checks that a name is appropriate for a -// PersistentVolumeName object. -var ValidatePersistentVolumeName = NameIsDNSSubdomain - -var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) - -func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { - allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, field.NewPath("metadata")) - - specPath := field.NewPath("spec") - if len(pv.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "")) - } - for _, mode := range pv.Spec.AccessModes { - if !supportedAccessModes.Has(string(mode)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) - } - } - - if len(pv.Spec.Capacity) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) - } - - if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { - allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)})) - } - capPath := specPath.Child("capacity") - for r, qty := range pv.Spec.Capacity { - allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) - } - - numVolumes := 0 - if pv.Spec.HostPath != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...) - } - } - if pv.Spec.GCEPersistentDisk != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...) - } - } - if pv.Spec.AWSElasticBlockStore != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...) - } - } - if pv.Spec.Glusterfs != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateGlusterfs(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) - } - } - if pv.Spec.Flocker != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...) - } - } - if pv.Spec.NFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...) - } - } - if pv.Spec.RBD != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateRBDVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...) - } - } - if pv.Spec.CephFS != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCephFSVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...) - } - } - if pv.Spec.ISCSI != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) - } - } - if pv.Spec.Cinder != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...) - } - } - if pv.Spec.FC != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...) - } - } - if pv.Spec.FlexVolume != nil { - numVolumes++ - allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) - } - if pv.Spec.AzureFile != nil { - numVolumes++ - allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...) - } - if pv.Spec.VsphereVolume != nil { - if numVolumes > 0 { - allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) - } else { - numVolumes++ - allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) - } - } - if numVolumes == 0 { - allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) - } - return allErrs -} - -// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. -// newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = ValidatePersistentVolume(newPv) - newPv.Status = oldPv.Status - return allErrs -} - -// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. -// newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) - if len(newPv.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - newPv.Spec = oldPv.Spec - return allErrs -} - -func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) - specPath := field.NewPath("spec") - if len(pvc.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "at least 1 accessMode is required")) - } - if pvc.Spec.Selector != nil { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(pvc.Spec.Selector, specPath.Child("selector"))...) - } - for _, mode := range pvc.Spec.AccessModes { - if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany { - allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) - } - } - if _, ok := pvc.Spec.Resources.Requests[api.ResourceStorage]; !ok { - allErrs = append(allErrs, field.Required(specPath.Child("resources").Key(string(api.ResourceStorage)), "")) - } - return allErrs -} - -func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) - // if a pvc had a bound volume, we should not allow updates to resources or access modes - if len(oldPvc.Spec.VolumeName) != 0 { - if !api.Semantic.DeepEqual(newPvc.Spec, oldPvc.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "spec is immutable once a claim has been bound to a volume")) - } - } - newPvc.Status = oldPvc.Status - return allErrs -} - -func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) - if len(newPvc.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - if len(newPvc.Spec.AccessModes) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), "")) - } - capPath := field.NewPath("status", "capacity") - for r, qty := range newPvc.Status.Capacity { - allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) - } - newPvc.Spec = oldPvc.Spec - return allErrs -} - -var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) - -func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allNames := sets.String{} - for i, port := range ports { - idxPath := fldPath.Index(i) - if len(port.Name) > 0 { - if !validation.IsValidPortName(port.Name) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, PortNameErrorMsg)) - } else if allNames.Has(port.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name)) - } else { - allNames.Insert(port.Name) - } - } - if port.ContainerPort == 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg)) - } else if !validation.IsValidPortNum(int(port.ContainerPort)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, PortRangeErrorMsg)) - } - if port.HostPort != 0 && !validation.IsValidPortNum(int(port.HostPort)) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, PortRangeErrorMsg)) - } - if len(port.Protocol) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) - } - } - return allErrs -} - -func validateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i, ev := range vars { - idxPath := fldPath.Index(i) - if len(ev.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !validation.IsCIdentifier(ev.Name) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, cIdentifierErrorMsg)) - } - allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...) - } - return allErrs -} - -var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "status.podIP") -var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "requests.cpu", "requests.memory") - -func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if ev.ValueFrom == nil { - return allErrs - } - - numSources := 0 - - if ev.ValueFrom.FieldRef != nil { - numSources++ - allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) - } - if ev.ValueFrom.ResourceFieldRef != nil { - numSources++ - allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) - } - if ev.ValueFrom.ConfigMapKeyRef != nil { - numSources++ - allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) - } - if ev.ValueFrom.SecretKeyRef != nil { - numSources++ - allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...) - } - - if len(ev.Value) != 0 { - if numSources != 0 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty")) - } - } else if numSources != 1 { - allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) - } - - return allErrs -} - -func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(fs.APIVersion) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) - } else if len(fs.FieldPath) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) - } else { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) - } else if !expressions.Has(internalFieldPath) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List())) - } - } - - return allErrs -} - -func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { - allErrs := field.ErrorList{} - - if volume && len(fs.ContainerName) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) - } else if len(fs.Resource) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) - } else if !expressions.Has(fs.Resource) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) - } - allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) - return allErrs -} - -var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") -var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") - -func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - unsetDivisor := resource.Quantity{} - if unsetDivisor.Cmp(divisor) == 0 { - return allErrs - } - switch rName { - case "limits.cpu", "requests.cpu": - if !validContainerResourceDivisorForCPU.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource")) - } - case "limits.memory", "requests.memory": - if !validContainerResourceDivisorForMemory.Has(divisor.String()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")) - } - } - return allErrs -} - -func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(s.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - if len(s.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } else if !IsSecretKey(s.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, fmt.Sprintf("must have at most %d characters and match regex %s", validation.DNS1123SubdomainMaxLength, SecretKeyFmt))) - } - - return allErrs -} - -func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(s.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } - if len(s.Key) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) - } else if !IsSecretKey(s.Key) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, fmt.Sprintf("must have at most %d characters and match regex %s", validation.DNS1123SubdomainMaxLength, SecretKeyFmt))) - } - - return allErrs -} - -func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - mountpoints := sets.NewString() - - for i, mnt := range mounts { - idxPath := fldPath.Index(i) - if len(mnt.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !volumes.Has(mnt.Name) { - allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) - } - if len(mnt.MountPath) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), "")) - } else if strings.Contains(mnt.MountPath, ":") { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not contain ':'")) - } - if mountpoints.Has(mnt.MountPath) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) - } - mountpoints.Insert(mnt.MountPath) - if len(mnt.SubPath) > 0 { - allErrs = append(allErrs, validateSubPath(mnt.SubPath, fldPath.Child("subPath"))...) - } - } - return allErrs -} - -func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if probe == nil { - return allErrs - } - allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...) - - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...) - return allErrs -} - -// AccumulateUniqueHostPorts extracts each HostPort of each Container, -// accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for ci, ctr := range containers { - idxPath := fldPath.Index(ci) - portsPath := idxPath.Child("ports") - for pi := range ctr.Ports { - idxPath := portsPath.Index(pi) - port := ctr.Ports[pi].HostPort - if port == 0 { - continue - } - str := fmt.Sprintf("%d/%s", port, ctr.Ports[pi].Protocol) - if accumulator.Has(str) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) - } else { - accumulator.Insert(str) - } - } - } - return allErrs -} - -// checkHostPortConflicts checks for colliding Port.HostPort values across -// a slice of containers. -func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList { - allPorts := sets.String{} - return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) -} - -func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if len(exec.Command) == 0 { - allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) - } - return allErrors -} - -func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if len(http.Path) == 0 { - allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) - } - if http.Port.Type == intstr.Int && !validation.IsValidPortNum(http.Port.IntValue()) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), http.Port, PortRangeErrorMsg)) - } else if http.Port.Type == intstr.String && !validation.IsValidPortName(http.Port.StrVal) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), http.Port.StrVal, PortNameErrorMsg)) - } - supportedSchemes := sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) - if !supportedSchemes.Has(string(http.Scheme)) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("scheme"), http.Scheme, fmt.Sprintf("must be one of %v", supportedSchemes.List()))) - } - for _, header := range http.HTTPHeaders { - if !validation.IsHTTPHeaderName(header.Name) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, fmt.Sprintf("name must match %s", validation.HTTPHeaderNameFmt))) - } - } - return allErrors -} - -func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if tcp.Port.Type == intstr.Int && !validation.IsValidPortNum(tcp.Port.IntValue()) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), tcp.Port, PortRangeErrorMsg)) - } else if tcp.Port.Type == intstr.String && !validation.IsValidPortName(tcp.Port.StrVal) { - allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), tcp.Port.StrVal, PortNameErrorMsg)) - } - return allErrors -} - -func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList { - numHandlers := 0 - allErrors := field.ErrorList{} - if handler.Exec != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...) - } - } - if handler.HTTPGet != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...) - } - } - if handler.TCPSocket != nil { - if numHandlers > 0 { - allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type")) - } else { - numHandlers++ - allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...) - } - } - if numHandlers == 0 { - allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) - } - return allErrors -} - -func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if lifecycle.PostStart != nil { - allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) - } - if lifecycle.PreStop != nil { - allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...) - } - return allErrs -} - -var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever)) - -func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - - switch policy { - case api.PullAlways, api.PullIfNotPresent, api.PullNever: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) - } - - return allErrors -} - -func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - if len(containers) > 0 { - allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) - } - - allNames := sets.String{} - for _, ctr := range otherContainers { - allNames.Insert(ctr.Name) - } - for i, ctr := range containers { - idxPath := fldPath.Index(i) - if allNames.Has(ctr.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) - } - if len(ctr.Name) > 0 { - allNames.Insert(ctr.Name) - } - if ctr.Lifecycle != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) - } - if ctr.LivenessProbe != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) - } - if ctr.ReadinessProbe != nil { - allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) - } - } - return allErrs -} - -func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(containers) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - - allNames := sets.String{} - for i, ctr := range containers { - idxPath := fldPath.Index(i) - if len(ctr.Name) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if msgs := validation.IsDNS1123Label(ctr.Name); len(msgs) != 0 { - for i := range msgs { - allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ctr.Name, msgs[i])) - } - } else if allNames.Has(ctr.Name) { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) - } else { - allNames.Insert(ctr.Name) - } - if len(ctr.Image) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("image"), "")) - } - if ctr.Lifecycle != nil { - allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) - } - allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) - // Liveness-specific validation - if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1")) - } - - allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) - allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) - allErrs = append(allErrs, validateEnv(ctr.Env, idxPath.Child("env"))...) - allErrs = append(allErrs, validateVolumeMounts(ctr.VolumeMounts, volumes, idxPath.Child("volumeMounts"))...) - allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) - allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) - allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) - } - // Check for colliding ports across all containers. - allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...) - - return allErrs -} - -func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - switch *restartPolicy { - case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)} - allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) - } - - return allErrors -} - -func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - switch *dnsPolicy { - case api.DNSClusterFirst, api.DNSDefault: - break - case "": - allErrors = append(allErrors, field.Required(fldPath, "")) - default: - validValues := []string{string(api.DNSClusterFirst), string(api.DNSDefault)} - allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) - } - return allErrors -} - -func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - if hostNetwork { - for i, container := range containers { - portsPath := fldPath.Index(i).Child("ports") - for i, port := range container.Ports { - idxPath := portsPath.Index(i) - if port.HostPort != port.ContainerPort { - allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) - } - } - } - } - return allErrors -} - -// validateImagePullSecrets checks to make sure the pull secrets are well -// formed. Right now, we only expect name to be set (it's the only field). If -// this ever changes and someone decides to set those fields, we'd like to -// know. -func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, currPullSecret := range imagePullSecrets { - idxPath := fldPath.Index(i) - strippedRef := api.LocalObjectReference{Name: currPullSecret.Name} - if !reflect.DeepEqual(strippedRef, currPullSecret) { - allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) - } - } - return allErrors -} - -func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { - if !allowEmpty && len(*effect) == 0 { - return field.ErrorList{field.Required(fldPath, "")} - } - - allErrors := field.ErrorList{} - switch *effect { - // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. - case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule: - // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoScheduleNoAdmitNoExecute: - default: - validValues := []string{ - string(api.TaintEffectNoSchedule), - string(api.TaintEffectPreferNoSchedule), - // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute. - // string(api.TaintEffectNoScheduleNoAdmit), - // string(api.TaintEffectNoScheduleNoAdmitNoExecute), - } - allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) - } - return allErrors -} - -// validateTolerations tests if given tolerations have valid data. -func validateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, toleration := range tolerations { - idxPath := fldPath.Index(i) - // validate the toleration key - allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) - - // validate toleration operator and value - switch toleration.Operator { - case api.TolerationOpEqual, "": - if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) - } - case api.TolerationOpExists: - if len(toleration.Value) > 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) - } - default: - validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} - allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) - } - - // validate toleration effect - if len(toleration.Effect) > 0 { - allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) - } - } - return allErrors -} - -// ValidatePod tests if required fields in the pod are set. -func ValidatePod(pod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidatePodSpec tests that the specified PodSpec has valid data. -// This includes checking formatting and uniqueness. It also canonicalizes the -// structure by setting default values and implementing any backwards-compatibility -// tricks. -func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes")) - allErrs = append(allErrs, vErrs...) - allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) - allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) - allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) - allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) - allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) - if len(spec.ServiceAccountName) > 0 { - for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) - } - } - - if len(spec.NodeName) > 0 { - for _, msg := range ValidateNodeName(spec.NodeName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) - } - } - - if spec.ActiveDeadlineSeconds != nil { - if *spec.ActiveDeadlineSeconds <= 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), spec.ActiveDeadlineSeconds, "must be greater than 0")) - } - } - - if len(spec.Hostname) > 0 { - for _, msg := range validation.IsDNS1123Label(spec.Hostname) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), spec.Hostname, msg)) - } - } - - if len(spec.Subdomain) > 0 { - for _, msg := range validation.IsDNS1123Label(spec.Subdomain) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("subdomain"), spec.Subdomain, msg)) - } - } - - return allErrs -} - -// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data -func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch rq.Operator { - case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn: - if len(rq.Values) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) - } - case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist: - if len(rq.Values) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) - } - - case api.NodeSelectorOpGt, api.NodeSelectorOpLt: - if len(rq.Values) != 1 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) - } - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) - } - allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) - return allErrs -} - -// ValidateNodeSelectorTerm tests that the specified node selector term has valid data -func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if len(term.MatchExpressions) == 0 { - return append(allErrs, field.Required(fldPath.Child("matchExpressions"), "must have at least one node selector requirement")) - } - for j, req := range term.MatchExpressions { - allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) - } - return allErrs -} - -// ValidateNodeSelector tests that the specified nodeSelector fields has valid data -func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - termFldPath := fldPath.Child("nodeSelectorTerms") - if len(nodeSelector.NodeSelectorTerms) == 0 { - return append(allErrs, field.Required(termFldPath, "must have at least one node selector term")) - } - - for i, term := range nodeSelector.NodeSelectorTerms { - allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) - } - - return allErrs -} - -// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data -func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i, term := range terms { - if term.Weight <= 0 || term.Weight > 100 { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) - } - - allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) - } - return allErrs -} - -// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data -func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) - for _, name := range podAffinityTerm.Namespaces { - for _, msg := range ValidateNamespaceName(name, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) - } - } - if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can only be empty for PreferredDuringScheduling pod anti affinity")) - } - if len(podAffinityTerm.TopologyKey) != 0 { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) - } - return allErrs -} - -// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data -func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, podAffinityTerm := range podAffinityTerms { - allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, allowEmptyTopologyKey, fldPath.Index(i))...) - } - return allErrs -} - -// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data -func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, allowEmptyTopologyKey bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for j, weightedTerm := range weightedPodAffinityTerms { - if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { - allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) - } - allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, allowEmptyTopologyKey, fldPath.Index(j).Child("podAffinityTerm"))...) - } - return allErrs -} - -// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data -func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, - // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - //} - if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is not allowed for hard pod anti-affinity - allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, - fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is allowed for soft pod anti-affinity - allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, true, - fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - return allErrs -} - -// validatePodAffinity tests that the specified podAffinity fields have valid data -func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, - // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - //} - if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is not allowed for hard pod affinity - allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, false, - fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { - // empty topologyKey is not allowed for soft pod affinity - allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, false, - fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - return allErrs -} - -// ValidateAffinityInPodAnnotations tests that the serialized Affinity in Pod.Annotations has valid data -func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - affinity, err := api.GetAffinityFromPodAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.AffinityAnnotationKey, err.Error())) - return allErrs - } - - affinityFldPath := fldPath.Child(api.AffinityAnnotationKey) - if affinity.NodeAffinity != nil { - na := affinity.NodeAffinity - naFldPath := affinityFldPath.Child("nodeAffinity") - // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. - // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { - // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, naFldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) - // } - - if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { - allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) - } - - if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, naFldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) - } - } - if affinity.PodAffinity != nil { - allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, affinityFldPath.Child("podAffinity"))...) - } - if affinity.PodAntiAffinity != nil { - allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, affinityFldPath.Child("podAntiAffinity"))...) - } - - return allErrs -} - -// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data -func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - tolerations, err := api.GetTolerationsFromPodAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) - return allErrs - } - if len(tolerations) > 0 { - allErrs = append(allErrs, validateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) - } - - return allErrs -} - -func validateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { - if p == "docker/default" { - return nil - } - if p == "unconfined" { - return nil - } - if strings.HasPrefix(p, "localhost/") { - return validateSubPath(strings.TrimPrefix(p, "localhost/"), fldPath) - } - return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")} -} - -func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if p, exists := annotations[api.SeccompPodAnnotationKey]; exists { - allErrs = append(allErrs, validateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...) - } - for k, p := range annotations { - if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) { - allErrs = append(allErrs, validateSeccompProfile(p, fldPath.Child(k))...) - } - } - - return allErrs -} - -// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. -func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if securityContext != nil { - allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) - if securityContext.FSGroup != nil && !validation.IsValidGroupId(*securityContext.FSGroup) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), IdRangeErrorMsg)) - } - if securityContext.RunAsUser != nil && !validation.IsValidUserId(*securityContext.RunAsUser) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), IdRangeErrorMsg)) - } - for i, gid := range securityContext.SupplementalGroups { - if !validation.IsValidGroupId(gid) { - supplementalGroup := fmt.Sprintf(`supplementalGroups[%d]`, i) - allErrs = append(allErrs, field.Invalid(fldPath.Child(supplementalGroup), gid, IdRangeErrorMsg)) - } - } - } - - return allErrs -} - -// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(newPod.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - specPath := field.NewPath("spec") - if len(newPod.Spec.Containers) != len(oldPod.Spec.Containers) { - //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff - allErrs = append(allErrs, field.Forbidden(specPath.Child("containers"), "pod updates may not add or remove containers")) - return allErrs - } - - // validate updateable fields: - // 1. containers[*].image - // 2. spec.activeDeadlineSeconds - - // validate updated container images - for i, ctr := range newPod.Spec.Containers { - if len(ctr.Image) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("containers").Index(i).Child("image"), "")) - } - } - - // validate updated spec.activeDeadlineSeconds. two types of updates are allowed: - // 1. from nil to a positive value - // 2. from a positive value to a lesser, non-negative value - if newPod.Spec.ActiveDeadlineSeconds != nil { - newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds - if newActiveDeadlineSeconds < 0 { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, isNegativeErrorMsg)) - return allErrs - } - if oldPod.Spec.ActiveDeadlineSeconds != nil { - oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds - if oldActiveDeadlineSeconds < newActiveDeadlineSeconds { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value")) - return allErrs - } - } - } else if oldPod.Spec.ActiveDeadlineSeconds != nil { - allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value")) - } - - // handle updateable fields by munging those fields prior to deep equal comparison. - mungedPod := *newPod - // munge containers[*].image - var newContainers []api.Container - for ix, container := range mungedPod.Spec.Containers { - container.Image = oldPod.Spec.Containers[ix].Image - newContainers = append(newContainers, container) - } - mungedPod.Spec.Containers = newContainers - // munge spec.activeDeadlineSeconds - mungedPod.Spec.ActiveDeadlineSeconds = nil - if oldPod.Spec.ActiveDeadlineSeconds != nil { - activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds - mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - } - if !api.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) { - //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff - allErrs = append(allErrs, field.Forbidden(specPath, "pod updates may not change fields other than `containers[*].image` or `spec.activeDeadlineSeconds`")) - } - - return allErrs -} - -// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata")) - - // TODO: allow change when bindings are properly decoupled from pods - if newPod.Spec.NodeName != oldPod.Spec.NodeName { - allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "nodeName"), "may not be changed directly")) - } - - // For status update we ignore changes to pod spec. - newPod.Spec = oldPod.Spec - - return allErrs -} - -// ValidatePodBinding tests if required fields in the pod binding are legal. -func ValidatePodBinding(binding *api.Binding) field.ErrorList { - allErrs := field.ErrorList{} - - if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { - // TODO: When validation becomes versioned, this gets more complicated. - allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", "<empty>"})) - } - if len(binding.Target.Name) == 0 { - // TODO: When validation becomes versioned, this gets more complicated. - allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), "")) - } - - return allErrs -} - -// ValidatePodTemplate tests if required fields in the pod template are set. -func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { - allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) - return allErrs -} - -// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields -// that cannot be changed. -func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) - return allErrs -} - -var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) -var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), - string(api.ServiceTypeLoadBalancer)) - -// ValidateService tests if required fields in the service are set. -func ValidateService(service *api.Service) field.ErrorList { - allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) - - specPath := field.NewPath("spec") - if len(service.Spec.Ports) == 0 && service.Spec.ClusterIP != api.ClusterIPNone { - allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) - } - if service.Spec.Type == api.ServiceTypeLoadBalancer { - for ix := range service.Spec.Ports { - port := &service.Spec.Ports[ix] - // This is a workaround for broken cloud environments that - // over-open firewalls. Hopefully it can go away when more clouds - // understand containers better. - if port.Port == 10250 { - portPath := specPath.Child("ports").Index(ix) - allErrs = append(allErrs, field.Invalid(portPath, port.Port, "may not expose port 10250 externally since it is used by kubelet")) - } - } - } - - isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone - allPortNames := sets.String{} - portsPath := specPath.Child("ports") - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...) - } - - if service.Spec.Selector != nil { - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) - } - - if len(service.Spec.SessionAffinity) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) - } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) - } - - if api.IsServiceIPSet(service) { - if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { - allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address")) - } - } - - ipPath := specPath.Child("externalIPs") - for i, ip := range service.Spec.ExternalIPs { - idxPath := ipPath.Index(i) - if ip == "0.0.0.0" { - allErrs = append(allErrs, field.Invalid(idxPath, ip, "must be a valid IP address")) - } - allErrs = append(allErrs, validateIpIsNotLinkLocalOrLoopback(ip, idxPath)...) - } - - if len(service.Spec.Type) == 0 { - allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) - } else if !supportedServiceType.Has(string(service.Spec.Type)) { - allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) - } - - if service.Spec.Type == api.ServiceTypeLoadBalancer { - portsPath := specPath.Child("ports") - includeProtocols := sets.NewString() - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) { - allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP ports")) - } else { - includeProtocols.Insert(string(service.Spec.Ports[i].Protocol)) - } - } - if includeProtocols.Len() > 1 { - allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols")) - } - } - - if service.Spec.Type == api.ServiceTypeClusterIP { - portsPath := specPath.Child("ports") - for i := range service.Spec.Ports { - portPath := portsPath.Index(i) - if service.Spec.Ports[i].NodePort != 0 { - allErrs = append(allErrs, field.Invalid(portPath.Child("nodePort"), service.Spec.Ports[i].NodePort, "may not be used when `type` is 'ClusterIP'")) - } - } - } - - // Check for duplicate NodePorts, considering (protocol,port) pairs - portsPath = specPath.Child("ports") - nodePorts := make(map[api.ServicePort]bool) - for i := range service.Spec.Ports { - port := &service.Spec.Ports[i] - if port.NodePort == 0 { - continue - } - portPath := portsPath.Index(i) - var key api.ServicePort - key.Protocol = port.Protocol - key.NodePort = port.NodePort - _, found := nodePorts[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort)) - } - nodePorts[key] = true - } - - // Validate SourceRange field and annotation - _, ok := service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] - if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { - var fieldPath *field.Path - var val string - if len(service.Spec.LoadBalancerSourceRanges) > 0 { - fieldPath = specPath.Child("LoadBalancerSourceRanges") - val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) - } else { - fieldPath = field.NewPath("metadata", "annotations").Key(apiservice.AnnotationLoadBalancerSourceRangesKey) - val = service.Annotations[apiservice.AnnotationLoadBalancerSourceRangesKey] - } - if service.Spec.Type != api.ServiceTypeLoadBalancer { - allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) - } - _, err := apiservice.GetLoadBalancerSourceRanges(service) - if err != nil { - allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) - } - } - return allErrs -} - -func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - if requireName && len(sp.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if len(sp.Name) != 0 { - if msgs := validation.IsDNS1123Label(sp.Name); len(msgs) != 0 { - for i := range msgs { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), sp.Name, msgs[i])) - } - } else if allNames.Has(sp.Name) { - allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) - } else { - allNames.Insert(sp.Name) - } - } - - if !validation.IsValidPortNum(int(sp.Port)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, PortRangeErrorMsg)) - } - - if len(sp.Protocol) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(sp.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) - } - - if sp.TargetPort.Type == intstr.Int && !validation.IsValidPortNum(sp.TargetPort.IntValue()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, PortRangeErrorMsg)) - } - if sp.TargetPort.Type == intstr.String && !validation.IsValidPortName(sp.TargetPort.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, PortNameErrorMsg)) - } - - // in the v1 API, targetPorts on headless services were tolerated. - // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. - // - // if isHeadlessService { - // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) { - // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None")) - // } - // } - - return allErrs -} - -// ValidateServiceUpdate tests if required fields in the service are set during an update -func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - - if api.IsServiceIPSet(oldService) { - allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) - } - - allErrs = append(allErrs, ValidateService(service)...) - return allErrs -} - -// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. -func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) - return allErrs -} - -// ValidateReplicationController tests if required fields in the replication controller are set. -func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) - statusPath := field.NewPath("status") - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.Replicas), statusPath.Child("replicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(controller.Status.ObservedGeneration), statusPath.Child("observedGeneration"))...) - return allErrs -} - -// Validates that the given selector is non-empty. -func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - selector := labels.Set(selectorMap).AsSelector() - if selector.Empty() { - allErrs = append(allErrs, field.Required(fldPath, "")) - } - return allErrs -} - -// Validates the given template and ensures that it is in accordance with the desrired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if template == nil { - allErrs = append(allErrs, field.Required(fldPath, "")) - } else { - selector := labels.Set(selectorMap).AsSelector() - if !selector.Empty() { - // Verify that the RC selector matches the labels in template. - labels := labels.Set(template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) - } - } - allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...) - if replicas > 1 { - allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) - } - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - } - return allErrs -} - -// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. -func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) - allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...) - return allErrs -} - -// ValidatePodTemplateSpec validates the spec of a pod template -func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) - allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, fldPath.Child("annotations"))...) - allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) - return allErrs -} - -func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i := range volumes { - vol := &volumes[i] - idxPath := fldPath.Index(i) - if vol.GCEPersistentDisk != nil { - if vol.GCEPersistentDisk.ReadOnly == false { - allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) - } - } - // TODO: What to do for AWS? It doesn't support replicas - } - return allErrs -} - -// validateTaints tests if given taints have valid data. -func validateTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { - allErrors := field.ErrorList{} - for i, currTaint := range taints { - idxPath := fldPath.Index(i) - // validate the taint key - allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) - // validate the taint value - if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { - allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) - } - // validate the taint effect - allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) - } - return allErrors -} - -// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data -func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - taints, err := api.GetTaintsFromNodeAnnotations(annotations) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) - return allErrs - } - if len(taints) > 0 { - allErrs = append(allErrs, validateTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) - } - - return allErrs -} - -func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - if annotations[api.TaintsAnnotationKey] != "" { - return ValidateTaintsInNodeAnnotations(annotations, fldPath) - } - return field.ErrorList{} -} - -// ValidateNode tests if required fields in the node are set. -func ValidateNode(node *api.Node) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) - allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - - // Only validate spec. All status fields are optional and can be updated later. - - // external ID is required. - if len(node.Spec.ExternalID) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("spec", "externalID"), "")) - } - - // TODO(rjnagal): Ignore PodCIDR till its completely implemented. - return allErrs -} - -// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. -func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { - fldPath := field.NewPath("metadata") - allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) - allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - - // TODO: Enable the code once we have better api object.status update model. Currently, - // anyone can update node status. - // if !api.Semantic.DeepEqual(node.Status, api.NodeStatus{}) { - // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) - // } - - // Validte no duplicate addresses in node status. - addresses := make(map[api.NodeAddress]bool) - for i, address := range node.Status.Addresses { - if _, ok := addresses[address]; ok { - allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) - } - addresses[address] = true - } - - if len(oldNode.Spec.PodCIDR) == 0 { - // Allow the controller manager to assign a CIDR to a node if it doesn't have one. - oldNode.Spec.PodCIDR = node.Spec.PodCIDR - } else { - if oldNode.Spec.PodCIDR != node.Spec.PodCIDR { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) - } - } - // TODO: move reset function to its own location - // Ignore metadata changes now that they have been tested - oldNode.ObjectMeta = node.ObjectMeta - // Allow users to update capacity - oldNode.Status.Capacity = node.Status.Capacity - // Allow users to unschedule node - oldNode.Spec.Unschedulable = node.Spec.Unschedulable - // Clear status - oldNode.Status = node.Status - - // TODO: Add a 'real' error type for this error and provide print actual diffs. - if !api.Semantic.DeepEqual(oldNode, node) { - glog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels or capacity")) - } - - return allErrs -} - -// Validate compute resource typename. -// Refer to docs/design/resources.md for more details. -func validateResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) - } - } - - return field.ErrorList{} -} - -// Validate container resource name -// Refer to docs/design/resources.md for more details. -func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardContainerResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) - } - } - return field.ErrorList{} -} - -// Validate resource names that can go in a resource quota -// Refer to docs/design/resources.md for more details. -func validateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { - allErrs := validateResourceName(value, fldPath) - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardQuotaResourceName(value) { - return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) - } - } - return field.ErrorList{} -} - -// Validate limit range types -func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(value) { - allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(value, "/")) == 1 { - if !api.IsStandardLimitRangeType(value) { - return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) - } - } - - return allErrs -} - -// Validate limit range resource name -// limit types (other than Pod/Container) could contain storage not just cpu or memory -func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList { - switch limitType { - case api.LimitTypePod, api.LimitTypeContainer: - return validateContainerResourceName(value, fldPath) - default: - return validateResourceName(value, fldPath) - } -} - -// ValidateLimitRange tests if required fields in the LimitRange are set. -func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { - allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) - - // ensure resource names are properly qualified per docs/design/resources.md - limitTypeSet := map[api.LimitType]bool{} - fldPath := field.NewPath("spec", "limits") - for i := range limitRange.Spec.Limits { - idxPath := fldPath.Index(i) - limit := &limitRange.Spec.Limits[i] - allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) - - _, found := limitTypeSet[limit.Type] - if found { - allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type)) - } - limitTypeSet[limit.Type] = true - - keys := sets.String{} - min := map[string]resource.Quantity{} - max := map[string]resource.Quantity{} - defaults := map[string]resource.Quantity{} - defaultRequests := map[string]resource.Quantity{} - maxLimitRequestRatios := map[string]resource.Quantity{} - - for k, q := range limit.Max { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) - keys.Insert(string(k)) - max[string(k)] = q - } - for k, q := range limit.Min { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) - keys.Insert(string(k)) - min[string(k)] = q - } - - if limit.Type == api.LimitTypePod { - if len(limit.Default) > 0 { - allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) - } - if len(limit.DefaultRequest) > 0 { - allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'")) - } - } else { - for k, q := range limit.Default { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) - keys.Insert(string(k)) - defaults[string(k)] = q - } - for k, q := range limit.DefaultRequest { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) - keys.Insert(string(k)) - defaultRequests[string(k)] = q - } - } - - for k, q := range limit.MaxLimitRequestRatio { - allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) - keys.Insert(string(k)) - maxLimitRequestRatios[string(k)] = q - } - - for k := range keys { - minQuantity, minQuantityFound := min[k] - maxQuantity, maxQuantityFound := max[k] - defaultQuantity, defaultQuantityFound := defaults[k] - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] - maxRatio, maxRatioFound := maxLimitRequestRatios[k] - - if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) - } - - if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) - } - - if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) - } - - if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) - } - - if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) - } - - if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) - } - if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 { - allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String()))) - } - if maxRatioFound && minQuantityFound && maxQuantityFound { - maxRatioValue := float64(maxRatio.Value()) - minQuantityValue := minQuantity.Value() - maxQuantityValue := maxQuantity.Value() - if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue { - maxRatioValue = float64(maxRatio.MilliValue()) / 1000 - minQuantityValue = minQuantity.MilliValue() - maxQuantityValue = maxQuantity.MilliValue() - } - maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue) - if maxRatioValue > maxRatioLimit { - allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) - } - } - } - } - - return allErrs -} - -// ValidateServiceAccount tests if required fields in the ServiceAccount are set. -func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList { - allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) - return allErrs -} - -// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. -func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) - return allErrs -} - -const SecretKeyFmt string = "\\.?" + validation.DNS1123LabelFmt + "(\\." + validation.DNS1123LabelFmt + ")*" - -var secretKeyRegexp = regexp.MustCompile("^" + SecretKeyFmt + "$") - -// IsSecretKey tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123), except that a leading dot is allowed -func IsSecretKey(value string) bool { - return len(value) <= validation.DNS1123SubdomainMaxLength && secretKeyRegexp.MatchString(value) -} - -// ValidateSecret tests if required fields in the Secret are set. -func ValidateSecret(secret *api.Secret) field.ErrorList { - allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) - - dataPath := field.NewPath("data") - totalSize := 0 - for key, value := range secret.Data { - if !IsSecretKey(key) { - allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, fmt.Sprintf("must have at most %d characters and match regex %s", validation.DNS1123SubdomainMaxLength, SecretKeyFmt))) - } - totalSize += len(value) - } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize)) - } - - switch secret.Type { - case api.SecretTypeServiceAccountToken: - // Only require Annotations[kubernetes.io/service-account.name] - // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop - if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), "")) - } - case api.SecretTypeOpaque, "": - // no-op - case api.SecretTypeDockercfg: - dockercfgBytes, exists := secret.Data[api.DockerConfigKey] - if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), "")) - break - } - - // make sure that the content is well-formed json. - if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "<secret contents redacted>", err.Error())) - } - case api.SecretTypeDockerConfigJson: - dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey] - if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), "")) - break - } - - // make sure that the content is well-formed json. - if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "<secret contents redacted>", err.Error())) - } - case api.SecretTypeBasicAuth: - _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey] - _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey] - - // username or password might be empty, but the field must be present - if !usernameFieldExists && !passwordFieldExists { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), "")) - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), "")) - break - } - case api.SecretTypeSSHAuth: - if len(secret.Data[api.SSHAuthPrivateKey]) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), "")) - break - } - - case api.SecretTypeTLS: - if _, exists := secret.Data[api.TLSCertKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), "")) - } - if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) - } - // TODO: Verify that the key matches the cert. - default: - // no-op - } - - return allErrs -} - -// ValidateSecretUpdate tests if required fields in the Secret are set. -func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) - - if len(newSecret.Type) == 0 { - newSecret.Type = oldSecret.Type - } - - allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) - - allErrs = append(allErrs, ValidateSecret(newSecret)...) - return allErrs -} - -// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateConfigMapName = NameIsDNSSubdomain - -// ValidateConfigMap tests whether required fields in the ConfigMap are set. -func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) - - totalSize := 0 - - for key, value := range cfg.Data { - if !IsSecretKey(key) { - allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, fmt.Sprintf("must have at most %d characters and match regex %s", validation.DNS1123SubdomainMaxLength, SecretKeyFmt))) - } - totalSize += len(value) - } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize)) - } - - return allErrs -} - -// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. -func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateConfigMap(newCfg)...) - - return allErrs -} - -func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList { - if quantity.Value() < 0 { - return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")} - } - return field.ErrorList{} -} - -// Validates resource requirement spec. -func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - limPath := fldPath.Child("limits") - reqPath := fldPath.Child("requests") - for resourceName, quantity := range requirements.Limits { - fldPath := limPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - if api.IsStandardResourceName(string(resourceName)) { - allErrs = append(allErrs, validateBasicResource(quantity, fldPath.Key(string(resourceName)))...) - } - // Check that request <= limit. - requestQuantity, exists := requirements.Requests[resourceName] - if exists { - // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. - if resourceName == api.ResourceNvidiaGPU && quantity.Cmp(requestQuantity) != 0 { - allErrs = append(allErrs, field.Invalid(reqPath, requestQuantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU))) - } else if quantity.Cmp(requestQuantity) < 0 { - allErrs = append(allErrs, field.Invalid(limPath, quantity.String(), fmt.Sprintf("must be greater than or equal to %s request", resourceName))) - } - } - } - for resourceName, quantity := range requirements.Requests { - fldPath := reqPath.Key(string(resourceName)) - // Validate resource name. - allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) - if api.IsStandardResourceName(string(resourceName)) { - allErrs = append(allErrs, validateBasicResource(quantity, fldPath.Key(string(resourceName)))...) - } - } - return allErrs -} - -// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes -func validateResourceQuotaScopes(resourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := field.ErrorList{} - if len(resourceQuota.Spec.Scopes) == 0 { - return allErrs - } - hardLimits := sets.NewString() - for k := range resourceQuota.Spec.Hard { - hardLimits.Insert(string(k)) - } - fldPath := field.NewPath("spec", "scopes") - scopeSet := sets.NewString() - for _, scope := range resourceQuota.Spec.Scopes { - if !api.IsStandardResourceQuotaScope(string(scope)) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuota.Spec.Scopes, "unsupported scope")) - } - for _, k := range hardLimits.List() { - if api.IsStandardQuotaResourceName(k) && !api.IsResourceQuotaScopeValidForResource(scope, k) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuota.Spec.Scopes, "unsupported scope applied to resource")) - } - } - scopeSet.Insert(string(scope)) - } - invalidScopePairs := []sets.String{ - sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)), - } - for _, invalidScopePair := range invalidScopePairs { - if scopeSet.HasAll(invalidScopePair.List()...) { - allErrs = append(allErrs, field.Invalid(fldPath, resourceQuota.Spec.Scopes, "conflicting scopes")) - } - } - return allErrs -} - -// ValidateResourceQuota tests if required fields in the ResourceQuota are set. -func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) - - fldPath := field.NewPath("spec", "hard") - for k, v := range resourceQuota.Spec.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...) - } - allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuota)...) - - fldPath = field.NewPath("status", "hard") - for k, v := range resourceQuota.Status.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...) - } - fldPath = field.NewPath("status", "used") - for k, v := range resourceQuota.Status.Used { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...) - } - - return allErrs -} - -// validateResourceQuantityValue enforces that specified quantity is valid for specified resource -func validateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) - if api.IsIntegerResourceName(resource) { - if value.MilliValue()%int64(1000) != int64(0) { - allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) - } - } - return allErrs -} - -// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. -// newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) - fldPath := field.NewPath("spec", "hard") - for k, v := range newResourceQuota.Spec.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...) - } - - // ensure scopes cannot change, and that resources are still valid for scope - fldPath = field.NewPath("spec", "scopes") - oldScopes := sets.NewString() - newScopes := sets.NewString() - for _, scope := range newResourceQuota.Spec.Scopes { - newScopes.Insert(string(scope)) - } - for _, scope := range oldResourceQuota.Spec.Scopes { - oldScopes.Insert(string(scope)) - } - if !oldScopes.Equal(newScopes) { - allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, "field is immutable")) - } - allErrs = append(allErrs, validateResourceQuotaScopes(newResourceQuota)...) - - newResourceQuota.Status = oldResourceQuota.Status - return allErrs -} - -// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. -// newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) - if len(newResourceQuota.ResourceVersion) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) - } - fldPath := field.NewPath("status", "hard") - for k, v := range newResourceQuota.Status.Hard { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...) - } - fldPath = field.NewPath("status", "used") - for k, v := range newResourceQuota.Status.Used { - resPath := fldPath.Key(string(k)) - allErrs = append(allErrs, validateResourceQuotaResourceName(string(k), resPath)...) - allErrs = append(allErrs, validateResourceQuantityValue(string(k), v, resPath)...) - } - newResourceQuota.Spec = oldResourceQuota.Spec - return allErrs -} - -// ValidateNamespace tests if required fields are set. -func ValidateNamespace(namespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) - for i := range namespace.Spec.Finalizers { - allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) - } - return allErrs -} - -// Validate finalizer names -func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(stringValue) { - allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) - } - if len(allErrs) != 0 { - return allErrs - } - - if len(strings.Split(stringValue, "/")) == 1 { - if !api.IsStandardFinalizerName(stringValue) { - return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) - } - } - - return field.ErrorList{} -} - -// ValidateNamespaceUpdate tests to make sure a namespace update can be applied. -// newNamespace is updated with fields that cannot be changed -func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers - newNamespace.Status = oldNamespace.Status - return allErrs -} - -// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields -// that cannot be changed. -func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - newNamespace.Spec = oldNamespace.Spec - if newNamespace.DeletionTimestamp.IsZero() { - if newNamespace.Status.Phase != api.NamespaceActive { - allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) - } - } else { - if newNamespace.Status.Phase != api.NamespaceTerminating { - allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) - } - } - return allErrs -} - -// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. -// newNamespace is updated with fields that cannot be changed. -func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) - - fldPath := field.NewPath("spec", "finalizers") - for i := range newNamespace.Spec.Finalizers { - idxPath := fldPath.Index(i) - allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...) - } - newNamespace.Status = oldNamespace.Status - return allErrs -} - -// ValidateEndpoints tests if required fields are set. -func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList { - allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))...) - return allErrs -} - -func validateEndpointSubsets(subsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - for i := range subsets { - ss := &subsets[i] - idxPath := fldPath.Index(i) - - if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { - //TODO: consider adding a RequiredOneOf() error for this and similar cases - allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) - } - if len(ss.Ports) == 0 { - allErrs = append(allErrs, field.Required(idxPath.Child("ports"), "")) - } - for addr := range ss.Addresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...) - } - for addr := range ss.NotReadyAddresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...) - } - for port := range ss.Ports { - allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) - } - } - - return allErrs -} - -func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if !validation.IsValidIP(address.IP) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, "must be a valid IP address")) - } - if len(address.Hostname) > 0 { - for _, msg := range validation.IsDNS1123Label(address.Hostname) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), address.Hostname, msg)) - } - } - if len(allErrs) > 0 { - return allErrs - } - return validateIpIsNotLinkLocalOrLoopback(address.IP, fldPath.Child("ip")) -} - -func validateIpIsNotLinkLocalOrLoopback(ipAddress string, fldPath *field.Path) field.ErrorList { - // We disallow some IPs as endpoints or external-ips. Specifically, loopback addresses are - // nonsensical and link-local addresses tend to be used for node-centric purposes (e.g. metadata service). - allErrs := field.ErrorList{} - ip := net.ParseIP(ipAddress) - if ip == nil { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) - return allErrs - } - if ip.IsLoopback() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)")) - } - if ip.IsLinkLocalUnicast() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)")) - } - if ip.IsLinkLocalMulticast() { - allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)")) - } - return allErrs -} - -func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if requireName && len(port.Name) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) - } else if len(port.Name) != 0 { - for _, msg := range validation.IsDNS1123Label(port.Name) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, msg)) - } - } - if !validation.IsValidPortNum(int(port.Port)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, PortRangeErrorMsg)) - } - if len(port.Protocol) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) - } else if !supportedPortProtocols.Has(string(port.Protocol)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) - } - return allErrs -} - -// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. -func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList { - allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, field.NewPath("subsets"))...) - allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) - return allErrs -} - -// ValidateSecurityContext ensure the security context contains valid settings -func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - //this should only be true for testing since SecurityContext is defaulted by the api - if sc == nil { - return allErrs - } - - if sc.Privileged != nil { - if *sc.Privileged && !capabilities.Get().AllowPrivileged { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by policy")) - } - } - - if sc.RunAsUser != nil { - if *sc.RunAsUser < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, isNegativeErrorMsg)) - } - } - return allErrs -} - -func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { - allErrs := field.ErrorList{} - if opts.TailLines != nil && *opts.TailLines < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) - } - if opts.LimitBytes != nil && *opts.LimitBytes < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) - } - switch { - case opts.SinceSeconds != nil && opts.SinceTime != nil: - allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) - case opts.SinceSeconds != nil: - if *opts.SinceSeconds < 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) - } - } - return allErrs -} - -// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus -func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for i, ingress := range status.Ingress { - idxPath := fldPath.Child("ingress").Index(i) - if len(ingress.IP) > 0 { - if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) - } - } - if len(ingress.Hostname) > 0 { - for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) - } - if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { - allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) - } - } - } - return allErrs -} - -func isValidHostnamesMap(serializedPodHostNames string) bool { - if len(serializedPodHostNames) == 0 { - return false - } - podHostNames := map[string]endpoints.HostRecord{} - err := json.Unmarshal([]byte(serializedPodHostNames), &podHostNames) - if err != nil { - return false - } - - for ip, hostRecord := range podHostNames { - if len(validation.IsDNS1123Label(hostRecord.HostName)) != 0 { - return false - } - if net.ParseIP(ip) == nil { - return false - } - } - return true -} diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go b/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go deleted file mode 100644 index 7c5a261b4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package apimachinery contains the generic API machinery code that -// is common to both server and clients. -// This package should never import specific API objects. -package apimachinery diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go deleted file mode 100644 index c418de3b0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/registered/registered.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. -package registered - -import ( - "fmt" - "os" - "sort" - "strings" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/util/sets" -) - -var ( - // registeredGroupVersions stores all API group versions for which RegisterGroup is called. - registeredVersions = map[unversioned.GroupVersion]struct{}{} - - // thirdPartyGroupVersions are API versions which are dynamically - // registered (and unregistered) via API calls to the apiserver - thirdPartyGroupVersions []unversioned.GroupVersion - - // enabledVersions represents all enabled API versions. It should be a - // subset of registeredVersions. Please call EnableVersions() to add - // enabled versions. - enabledVersions = map[unversioned.GroupVersion]struct{}{} - - // map of group meta for all groups. - groupMetaMap = map[string]*apimachinery.GroupMeta{} - - // envRequestedVersions represents the versions requested via the - // KUBE_API_VERSIONS environment variable. The install package of each group - // checks this list before add their versions to the latest package and - // Scheme. This list is small and order matters, so represent as a slice - envRequestedVersions = []unversioned.GroupVersion{} -) - -func init() { - // Env var KUBE_API_VERSIONS is a comma separated list of API versions that - // should be registered in the scheme. - kubeAPIVersions := os.Getenv("KUBE_API_VERSIONS") - if len(kubeAPIVersions) != 0 { - for _, version := range strings.Split(kubeAPIVersions, ",") { - gv, err := unversioned.ParseGroupVersion(version) - if err != nil { - glog.Fatalf("invalid api version: %s in KUBE_API_VERSIONS: %s.", - version, os.Getenv("KUBE_API_VERSIONS")) - } - envRequestedVersions = append(envRequestedVersions, gv) - } - } -} - -// RegisterVersions adds the given group versions to the list of registered group versions. -func RegisterVersions(availableVersions []unversioned.GroupVersion) { - for _, v := range availableVersions { - registeredVersions[v] = struct{}{} - } -} - -// RegisterGroup adds the given group to the list of registered groups. -func RegisterGroup(groupMeta apimachinery.GroupMeta) error { - groupName := groupMeta.GroupVersion.Group - if _, found := groupMetaMap[groupName]; found { - return fmt.Errorf("group %v is already registered", groupMetaMap) - } - groupMetaMap[groupName] = &groupMeta - return nil -} - -// EnableVersions adds the versions for the given group to the list of enabled versions. -// Note that the caller should call RegisterGroup before calling this method. -// The caller of this function is responsible to add the versions to scheme and RESTMapper. -func EnableVersions(versions ...unversioned.GroupVersion) error { - var unregisteredVersions []unversioned.GroupVersion - for _, v := range versions { - if _, found := registeredVersions[v]; !found { - unregisteredVersions = append(unregisteredVersions, v) - } - enabledVersions[v] = struct{}{} - } - if len(unregisteredVersions) != 0 { - return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) - } - return nil -} - -// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS -// environment variable. If the environment variable is empty, then it always -// returns true. -func IsAllowedVersion(v unversioned.GroupVersion) bool { - if len(envRequestedVersions) == 0 { - return true - } - for _, envGV := range envRequestedVersions { - if v == envGV { - return true - } - } - return false -} - -// IsEnabledVersion returns if a version is enabled. -func IsEnabledVersion(v unversioned.GroupVersion) bool { - _, found := enabledVersions[v] - return found -} - -// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups -// are priority order from best to worst -func EnabledVersions() []unversioned.GroupVersion { - ret := []unversioned.GroupVersion{} - for _, groupMeta := range groupMetaMap { - ret = append(ret, groupMeta.GroupVersions...) - } - return ret -} - -// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst -func EnabledVersionsForGroup(group string) []unversioned.GroupVersion { - groupMeta, ok := groupMetaMap[group] - if !ok { - return []unversioned.GroupVersion{} - } - - return append([]unversioned.GroupVersion{}, groupMeta.GroupVersions...) -} - -// Group returns the metadata of a group if the gruop is registered, otherwise -// an erorr is returned. -func Group(group string) (*apimachinery.GroupMeta, error) { - groupMeta, found := groupMetaMap[group] - if !found { - return nil, fmt.Errorf("group %v has not been registered", group) - } - groupMetaCopy := *groupMeta - return &groupMetaCopy, nil -} - -// IsRegistered takes a string and determines if it's one of the registered groups -func IsRegistered(group string) bool { - _, found := groupMetaMap[group] - return found -} - -// IsRegisteredVersion returns if a version is registered. -func IsRegisteredVersion(v unversioned.GroupVersion) bool { - _, found := registeredVersions[v] - return found -} - -// RegisteredGroupVersions returns all registered group versions. -func RegisteredGroupVersions() []unversioned.GroupVersion { - ret := []unversioned.GroupVersion{} - for groupVersion := range registeredVersions { - ret = append(ret, groupVersion) - } - return ret -} - -// IsThirdPartyAPIGroupVersion returns true if the api version is a user-registered group/version. -func IsThirdPartyAPIGroupVersion(gv unversioned.GroupVersion) bool { - for ix := range thirdPartyGroupVersions { - if thirdPartyGroupVersions[ix] == gv { - return true - } - } - return false -} - -// AddThirdPartyAPIGroupVersions sets the list of third party versions, -// registers them in the API machinery and enables them. -// Skips GroupVersions that are already registered. -// Returns the list of GroupVersions that were skipped. -func AddThirdPartyAPIGroupVersions(gvs ...unversioned.GroupVersion) []unversioned.GroupVersion { - filteredGVs := []unversioned.GroupVersion{} - skippedGVs := []unversioned.GroupVersion{} - for ix := range gvs { - if !IsRegisteredVersion(gvs[ix]) { - filteredGVs = append(filteredGVs, gvs[ix]) - } else { - glog.V(3).Infof("Skipping %s, because its already registered", gvs[ix].String()) - skippedGVs = append(skippedGVs, gvs[ix]) - } - } - if len(filteredGVs) == 0 { - return skippedGVs - } - RegisterVersions(filteredGVs) - EnableVersions(filteredGVs...) - next := make([]unversioned.GroupVersion, len(gvs)) - for ix := range filteredGVs { - next[ix] = filteredGVs[ix] - } - thirdPartyGroupVersions = next - - return skippedGVs -} - -// TODO: This is an expedient function, because we don't check if a Group is -// supported throughout the code base. We will abandon this function and -// checking the error returned by the Group() function. -func GroupOrDie(group string) *apimachinery.GroupMeta { - groupMeta, found := groupMetaMap[group] - if !found { - if group == "" { - panic("The legacy v1 API is not registered.") - } else { - panic(fmt.Sprintf("Group %s is not registered.", group)) - } - } - groupMetaCopy := *groupMeta - return &groupMetaCopy -} - -// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: -// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR -// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy -// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, -// all other groups alphabetical. -func RESTMapper(versionPatterns ...unversioned.GroupVersion) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - unionedGroups := sets.NewString() - for enabledVersion := range enabledVersions { - if !unionedGroups.Has(enabledVersion.Group) { - unionedGroups.Insert(enabledVersion.Group) - groupMeta := groupMetaMap[enabledVersion.Group] - unionMapper = append(unionMapper, groupMeta.RESTMapper) - } - } - - if len(versionPatterns) != 0 { - resourcePriority := []unversioned.GroupVersionResource{} - kindPriority := []unversioned.GroupVersionKind{} - for _, versionPriority := range versionPatterns { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - if len(envRequestedVersions) != 0 { - resourcePriority := []unversioned.GroupVersionResource{} - kindPriority := []unversioned.GroupVersionKind{} - - for _, versionPriority := range envRequestedVersions { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - prioritizedGroups := []string{"", "extensions", "metrics"} - resourcePriority, kindPriority := prioritiesForGroups(prioritizedGroups...) - - prioritizedGroupsSet := sets.NewString(prioritizedGroups...) - remainingGroups := sets.String{} - for enabledVersion := range enabledVersions { - if !prioritizedGroupsSet.Has(enabledVersion.Group) { - remainingGroups.Insert(enabledVersion.Group) - } - } - - remainingResourcePriority, remainingKindPriority := prioritiesForGroups(remainingGroups.List()...) - resourcePriority = append(resourcePriority, remainingResourcePriority...) - kindPriority = append(kindPriority, remainingKindPriority...) - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} -} - -// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, -// then any non-preferred version of the group second. -func prioritiesForGroups(groups ...string) ([]unversioned.GroupVersionResource, []unversioned.GroupVersionKind) { - resourcePriority := []unversioned.GroupVersionResource{} - kindPriority := []unversioned.GroupVersionKind{} - - for _, group := range groups { - availableVersions := EnabledVersionsForGroup(group) - if len(availableVersions) > 0 { - resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) - } - } - for _, group := range groups { - resourcePriority = append(resourcePriority, unversioned.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) - kindPriority = append(kindPriority, unversioned.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) - } - - return resourcePriority, kindPriority -} - -// AllPreferredGroupVersions returns the preferred versions of all registered -// groups in the form of "group1/version1,group2/version2,..." -func AllPreferredGroupVersions() string { - if len(groupMetaMap) == 0 { - return "" - } - var defaults []string - for _, groupMeta := range groupMetaMap { - defaults = append(defaults, groupMeta.GroupVersion.String()) - } - sort.Strings(defaults) - return strings.Join(defaults, ",") -} - -// ValidateEnvRequestedVersions returns a list of versions that are requested in -// the KUBE_API_VERSIONS environment variable, but not enabled. -func ValidateEnvRequestedVersions() []unversioned.GroupVersion { - var missingVersions []unversioned.GroupVersion - for _, v := range envRequestedVersions { - if _, found := enabledVersions[v]; !found { - missingVersions = append(missingVersions, v) - } - } - return missingVersions -} - -// Resets the state. -// Should not be used by anyone else than tests. -func reset() { - registeredVersions = map[unversioned.GroupVersion]struct{}{} - enabledVersions = map[unversioned.GroupVersion]struct{}{} - groupMetaMap = map[string]*apimachinery.GroupMeta{} - -} diff --git a/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go b/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go deleted file mode 100644 index 3e86921ce..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apimachinery/types.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apimachinery - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupMeta stores the metadata of a group. -type GroupMeta struct { - // GroupVersion represents the preferred version of the group. - GroupVersion unversioned.GroupVersion - - // GroupVersions is Group + all versions in that group. - GroupVersions []unversioned.GroupVersion - - // Codec is the default codec for serializing output that should use - // the preferred version. Use this Codec when writing to - // disk, a data store that is not dynamically versioned, or in tests. - // This codec can decode any object that the schema is aware of. - Codec runtime.Codec - - // SelfLinker can set or get the SelfLink field of all API types. - // TODO: when versioning changes, make this part of each API definition. - // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses - // to go through the InterfacesFor method below. - SelfLinker runtime.SelfLinker - - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known - // versions. - RESTMapper meta.RESTMapper - - // InterfacesFor returns the default Codec and ResourceVersioner for a given version - // or an error if the version is not known. - InterfacesFor func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go deleted file mode 100644 index 5a2135c6a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/deep_copy_generated.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package apps - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_apps_PetSet, - DeepCopy_apps_PetSetList, - DeepCopy_apps_PetSetSpec, - DeepCopy_apps_PetSetStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_apps_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_apps_PetSetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_apps_PetSetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_apps_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PetSet, len(in)) - for i := range in { - if err := DeepCopy_apps_PetSet(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_apps_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(in)) - for i := range in { - if err := api.DeepCopy_api_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - out.ServiceName = in.ServiceName - return nil -} - -func DeepCopy_apps_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error { - if in.ObservedGeneration != nil { - in, out := in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = *in - } else { - out.ObservedGeneration = nil - } - out.Replicas = in.Replicas - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go deleted file mode 100644 index b4d9011d3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/install/install.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the apps API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/apps/v1alpha1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/apps" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", apps.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1alpha1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(apps.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - apps.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1alpha1.SchemeGroupVersion: - v1alpha1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go deleted file mode 100644 index dd5b9a900..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) -} - -// GroupName is the group name use in this package -const GroupName = "apps" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - // TODO this will get cleaned up with the scheme types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &PetSet{}, - &PetSetList{}, - &api.ListOptions{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go deleted file mode 100644 index a08450922..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.generated.go +++ /dev/null @@ -1,1634 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package apps - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg4_resource.Quantity - var v2 pkg1_unversioned.TypeMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *PetSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PetSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PetSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PetSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PetSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PetSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - yyq2[1] = x.Selector != nil - yyq2[3] = len(x.VolumeClaimTemplates) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.Template - yy10.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceapi_PersistentVolumeClaim(([]pkg2_api.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } - case "volumeClaimTemplates": - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv8 := &x.VolumeClaimTemplates - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv8), d) - } - } - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv15 := &x.Template - yyv15.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv16 := &x.VolumeClaimTemplates - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - h.decSliceapi_PersistentVolumeClaim((*[]pkg2_api.PersistentVolumeClaim)(yyv16), d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PetSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int(r.DecodeInt(codecSelferBitsize1234)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PetSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePetSet(([]PetSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePetSet(([]PetSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePetSet((*[]PetSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePetSet((*[]PetSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceapi_PersistentVolumeClaim(v []pkg2_api.PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_PersistentVolumeClaim(v *[]pkg2_api.PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_api.PersistentVolumeClaim{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_api.PersistentVolumeClaim, yyrl1) - } - } else { - yyv1 = make([]pkg2_api.PersistentVolumeClaim, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg2_api.PersistentVolumeClaim{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg2_api.PersistentVolumeClaim{}) // var yyz1 pkg2_api.PersistentVolumeClaim - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_api.PersistentVolumeClaim{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_api.PersistentVolumeClaim{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePetSet(v []PetSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PetSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PetSet, yyrl1) - } - } else { - yyv1 = make([]PetSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PetSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PetSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PetSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PetSet{}) // var yyz1 PetSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PetSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PetSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go deleted file mode 100644 index 87019c456..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apps - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// PetSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha and -// and subject to change without notice. -type PetSet struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired identities of pets in this set. - Spec PetSetSpec `json:"spec,omitempty"` - - // Status is the current status of Pets in this PetSet. This data - // may be out of date by some window of time. - Status PetSetStatus `json:"status,omitempty"` -} - -// A PetSetSpec is the specification of a PetSet. -type PetSetSpec struct { - // Replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - Replicas int `json:"replicas,omitempty"` - - // Selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet - // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. - Template api.PodTemplateSpec `json:"template"` - - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - VolumeClaimTemplates []api.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` - - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. - ServiceName string `json:"serviceName"` -} - -// PetSetStatus represents the current state of a PetSet. -type PetSetStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - - // Replicas is the number of actual replicas. - Replicas int `json:"replicas"` -} - -// PetSetList is a collection of PetSets. -type PetSetList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PetSet `json:"items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go deleted file mode 100644 index 48f1f2b4d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions to handle the *int32 -> int - // conversion. A pointer is useful in the versioned type so we can default - // it, but a plain int32 is more convenient in the internal type. These - // functions are the same as the autogenerated ones in every other way. - err := scheme.AddConversionFuncs( - Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, - Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - - err = api.Scheme.AddFieldLabelConversionFunc("apps/v1alpha1", "PetSet", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(in *PetSetSpec, out *apps.PetSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = int(*in.Replicas) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]api.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - out.ServiceName = in.ServiceName - return nil -} - -func Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(in *apps.PetSetSpec, out *PetSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := s.Convert(&in.Template, &out.Template, 0); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - out.ServiceName = in.ServiceName - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go deleted file mode 100644 index cfd6ce44b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/conversion_generated.go +++ /dev/null @@ -1,156 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - apps "k8s.io/kubernetes/pkg/apis/apps" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_PetSet_To_apps_PetSet, - Convert_apps_PetSet_To_v1alpha1_PetSet, - Convert_v1alpha1_PetSetList_To_apps_PetSetList, - Convert_apps_PetSetList_To_v1alpha1_PetSetList, - Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec, - Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec, - Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus, - Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error { - SetDefaults_PetSet(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1alpha1_PetSetSpec_To_apps_PetSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_PetSet_To_apps_PetSet(in *PetSet, out *apps.PetSet, s conversion.Scope) error { - return autoConvert_v1alpha1_PetSet_To_apps_PetSet(in, out, s) -} - -func autoConvert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_apps_PetSetSpec_To_v1alpha1_PetSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_apps_PetSet_To_v1alpha1_PetSet(in *apps.PetSet, out *PetSet, s conversion.Scope) error { - return autoConvert_apps_PetSet_To_v1alpha1_PetSet(in, out, s) -} - -func autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]apps.PetSet, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PetSet_To_apps_PetSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_PetSetList_To_apps_PetSetList(in *PetSetList, out *apps.PetSetList, s conversion.Scope) error { - return autoConvert_v1alpha1_PetSetList_To_apps_PetSetList(in, out, s) -} - -func autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PetSet, len(*in)) - for i := range *in { - if err := Convert_apps_PetSet_To_v1alpha1_PetSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_apps_PetSetList_To_v1alpha1_PetSetList(in *apps.PetSetList, out *PetSetList, s conversion.Scope) error { - return autoConvert_apps_PetSetList_To_v1alpha1_PetSetList(in, out, s) -} - -func autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = int(in.Replicas) - return nil -} - -func Convert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in *PetSetStatus, out *apps.PetSetStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_PetSetStatus_To_apps_PetSetStatus(in, out, s) -} - -func autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = int32(in.Replicas) - return nil -} - -func Convert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in *apps.PetSetStatus, out *PetSetStatus, s conversion.Scope) error { - return autoConvert_apps_PetSetStatus_To_v1alpha1_PetSetStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go deleted file mode 100644 index 6e51cacbc..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/deep_copy_generated.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1alpha1_PetSet, - DeepCopy_v1alpha1_PetSetList, - DeepCopy_v1alpha1_PetSetSpec, - DeepCopy_v1alpha1_PetSetStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1alpha1_PetSet(in PetSet, out *PetSet, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_PetSetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_PetSetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1alpha1_PetSetList(in PetSetList, out *PetSetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PetSet, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_PetSet(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1alpha1_PetSetSpec(in PetSetSpec, out *PetSetSpec, c *conversion.Cloner) error { - if in.Replicas != nil { - in, out := in.Replicas, &out.Replicas - *out = new(int32) - **out = *in - } else { - out.Replicas = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - if in.VolumeClaimTemplates != nil { - in, out := in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(in)) - for i := range in { - if err := v1.DeepCopy_v1_PersistentVolumeClaim(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.VolumeClaimTemplates = nil - } - out.ServiceName = in.ServiceName - return nil -} - -func DeepCopy_v1alpha1_PetSetStatus(in PetSetStatus, out *PetSetStatus, c *conversion.Cloner) error { - if in.ObservedGeneration != nil { - in, out := in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = *in - } else { - out.ObservedGeneration = nil - } - out.Replicas = in.Replicas - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go deleted file mode 100644 index e41028138..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/defaults.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_PetSet, - ) -} - -func SetDefaults_PetSet(obj *PetSet) { - labels := obj.Spec.Template.Labels - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &unversioned.LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go deleted file mode 100644 index 65a03a209..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go deleted file mode 100644 index 88f1bcd40..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.pb.go +++ /dev/null @@ -1,969 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto - - It has these top-level messages: - PetSet - PetSetList - PetSetSpec - PetSetStatus -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *PetSet) Reset() { *m = PetSet{} } -func (m *PetSet) String() string { return proto.CompactTextString(m) } -func (*PetSet) ProtoMessage() {} - -func (m *PetSetList) Reset() { *m = PetSetList{} } -func (m *PetSetList) String() string { return proto.CompactTextString(m) } -func (*PetSetList) ProtoMessage() {} - -func (m *PetSetSpec) Reset() { *m = PetSetSpec{} } -func (m *PetSetSpec) String() string { return proto.CompactTextString(m) } -func (*PetSetSpec) ProtoMessage() {} - -func (m *PetSetStatus) Reset() { *m = PetSetStatus{} } -func (m *PetSetStatus) String() string { return proto.CompactTextString(m) } -func (*PetSetStatus) ProtoMessage() {} - -func init() { - proto.RegisterType((*PetSet)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSet") - proto.RegisterType((*PetSetList)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetList") - proto.RegisterType((*PetSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetSpec") - proto.RegisterType((*PetSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.apps.v1alpha1.PetSetStatus") -} -func (m *PetSet) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PetSet) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *PetSetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PetSetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PetSetSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PetSetSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n5, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n6, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) - i += copy(data[i:], m.ServiceName) - return i, nil -} - -func (m *PetSetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PetSetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) - } - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *PetSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PetSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PetSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PetSetStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PetSet) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PetSetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PetSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PetSetSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_kubernetes_pkg_api_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PetSetStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PetSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PetSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto deleted file mode 100644 index 3bf7e6936..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/generated.proto +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.apps.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PetSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha -// and subject to change without notice. -message PetSet { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the desired identities of pets in this set. - optional PetSetSpec spec = 2; - - // Status is the current status of Pets in this PetSet. This data - // may be out of date by some window of time. - optional PetSetStatus status = 3; -} - -// PetSetList is a collection of PetSets. -message PetSetList { - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - repeated PetSet items = 2; -} - -// A PetSetSpec is the specification of a PetSet. -message PetSetSpec { - // Replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet - // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; - - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - repeated k8s.io.kubernetes.pkg.api.v1.PersistentVolumeClaim volumeClaimTemplates = 4; - - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. - optional string serviceName = 5; -} - -// PetSetStatus represents the current state of a PetSet. -message PetSetStatus { - // most recent generation observed by this autoscaler. - optional int64 observedGeneration = 1; - - // Replicas is the number of actual replicas. - optional int32 replicas = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go deleted file mode 100644 index 9ab37dfb0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "apps" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &PetSet{}, - &PetSetList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} - -func (obj *PetSet) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *PetSetList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go deleted file mode 100644 index a544c4e6a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.generated.go +++ /dev/null @@ -1,1664 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *PetSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PetSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PetSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PetSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PetSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PetSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.Selector != nil - yyq2[3] = len(x.VolumeClaimTemplates) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Template - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeClaimTemplates")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.VolumeClaimTemplates == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - h.encSlicev1_PersistentVolumeClaim(([]pkg2_v1.PersistentVolumeClaim)(x.VolumeClaimTemplates), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv8 := &x.Template - yyv8.CodecDecodeSelf(d) - } - case "volumeClaimTemplates": - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv9 := &x.VolumeClaimTemplates - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv9), d) - } - } - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv17 := &x.Template - yyv17.CodecDecodeSelf(d) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeClaimTemplates = nil - } else { - yyv18 := &x.VolumeClaimTemplates - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - h.decSlicev1_PersistentVolumeClaim((*[]pkg2_v1.PersistentVolumeClaim)(yyv18), d) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PetSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PetSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePetSet(([]PetSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePetSet(([]PetSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PetSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PetSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePetSet((*[]PetSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PetSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePetSet((*[]PetSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicev1_PersistentVolumeClaim(v []pkg2_v1.PersistentVolumeClaim, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_PersistentVolumeClaim(v *[]pkg2_v1.PersistentVolumeClaim, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_v1.PersistentVolumeClaim{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 352) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_v1.PersistentVolumeClaim, yyrl1) - } - } else { - yyv1 = make([]pkg2_v1.PersistentVolumeClaim, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg2_v1.PersistentVolumeClaim{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg2_v1.PersistentVolumeClaim{}) // var yyz1 pkg2_v1.PersistentVolumeClaim - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.PersistentVolumeClaim{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_v1.PersistentVolumeClaim{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePetSet(v []PetSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PetSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PetSet, yyrl1) - } - } else { - yyv1 = make([]PetSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PetSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PetSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PetSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PetSet{}) // var yyz1 PetSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PetSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PetSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go deleted file mode 100644 index fb0aa48a1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// PetSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The PetSet guarantees that a given network identity will always -// map to the same storage identity. PetSet is currently in alpha -// and subject to change without notice. -type PetSet struct { - unversioned.TypeMeta `json:",inline"` - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired identities of pets in this set. - Spec PetSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current status of Pets in this PetSet. This data - // may be out of date by some window of time. - Status PetSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// A PetSetSpec is the specification of a PetSet. -type PetSetSpec struct { - // Replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the PetSet - // will fulfill this Template, but have a unique identity from the rest - // of the PetSet. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // VolumeClaimTemplates is a list of claims that pets are allowed to reference. - // The PetSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pet. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` - - // ServiceName is the name of the service that governs this PetSet. - // This service must exist before the PetSet, and is responsible for - // the network identity of the set. Pets get DNS/hostnames that follow the - // pattern: pet-specific-string.serviceName.default.svc.cluster.local - // where "pet-specific-string" is managed by the PetSet controller. - ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` -} - -// PetSetStatus represents the current state of a PetSet. -type PetSetStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Replicas is the number of actual replicas. - Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` -} - -// PetSetList is a collection of PetSets. -type PetSetList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PetSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 8ce682b4b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_PetSet = map[string]string{ - "": "PetSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe PetSet guarantees that a given network identity will always map to the same storage identity. PetSet is currently in alpha and subject to change without notice.", - "spec": "Spec defines the desired identities of pets in this set.", - "status": "Status is the current status of Pets in this PetSet. This data may be out of date by some window of time.", -} - -func (PetSet) SwaggerDoc() map[string]string { - return map_PetSet -} - -var map_PetSetList = map[string]string{ - "": "PetSetList is a collection of PetSets.", -} - -func (PetSetList) SwaggerDoc() map[string]string { - return map_PetSetList -} - -var map_PetSetSpec = map[string]string{ - "": "A PetSetSpec is the specification of a PetSet.", - "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the PetSet will fulfill this Template, but have a unique identity from the rest of the PetSet.", - "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pets are allowed to reference. The PetSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pet. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "ServiceName is the name of the service that governs this PetSet. This service must exist before the PetSet, and is responsible for the network identity of the set. Pets get DNS/hostnames that follow the pattern: pet-specific-string.serviceName.default.svc.cluster.local where \"pet-specific-string\" is managed by the PetSet controller.", -} - -func (PetSetSpec) SwaggerDoc() map[string]string { - return map_PetSetSpec -} - -var map_PetSetStatus = map[string]string{ - "": "PetSetStatus represents the current state of a PetSet.", - "observedGeneration": "most recent generation observed by this autoscaler.", - "replicas": "Replicas is the number of actual replicas.", -} - -func (PetSetStatus) SwaggerDoc() map[string]string { - return map_PetSetStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go deleted file mode 100644 index 75ac7281e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/deep_copy_generated.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package authentication - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_authenticationk8sio_TokenReview, - DeepCopy_authenticationk8sio_TokenReviewSpec, - DeepCopy_authenticationk8sio_TokenReviewStatus, - DeepCopy_authenticationk8sio_UserInfo, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_authenticationk8sio_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_authenticationk8sio_TokenReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_authenticationk8sio_TokenReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_authenticationk8sio_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error { - out.Token = in.Token - return nil -} - -func DeepCopy_authenticationk8sio_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error { - out.Authenticated = in.Authenticated - if err := DeepCopy_authenticationk8sio_UserInfo(in.User, &out.User, c); err != nil { - return err - } - return nil -} - -func DeepCopy_authenticationk8sio_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error { - out.Username = in.Username - out.UID = in.UID - if in.Groups != nil { - in, out := in.Groups, &out.Groups - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Groups = nil - } - if in.Extra != nil { - in, out := in.Extra, &out.Extra - *out = make(map[string][]string) - for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { - return err - } else { - (*out)[key] = newVal.([]string) - } - } - } else { - out.Extra = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go deleted file mode 100644 index 29447d217..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install/install.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" - "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", authentication.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - authentication.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1beta1.SchemeGroupVersion: - v1beta1.AddToScheme(api.Scheme) - } - } -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - rootScoped := sets.NewString("TokenReview") - ignoredKinds := sets.NewString() - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1beta1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(authentication.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go deleted file mode 100644 index 4dda3140f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authentication - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) -} - -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, - ) -} - -func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go deleted file mode 100644 index b3b72d653..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.generated.go +++ /dev/null @@ -1,1265 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package authentication - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - "reflect" - "runtime" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - _ = v0 - } -} - -func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Token")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Token": - if r.TryDecodeAsNil() { - x.Token = "" - } else { - x.Token = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Token = "" - } else { - x.Token = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Authenticated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.User - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("User")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.User - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Authenticated": - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - x.Authenticated = bool(r.DecodeBool()) - } - case "User": - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv5 := &x.User - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - x.Authenticated = bool(r.DecodeBool()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv8 := &x.User - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("UID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - case "UID": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - case "Groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv6 := &x.Groups - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "Extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv8 := &x.Extra - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv8), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv13 := &x.Groups - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecSliceStringX(yyv13, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv15 := &x.Extra - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv15), d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - z.F.EncSliceStringV(yyv1, false, e) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]string, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []string - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else { - z.F.DecSliceStringX(yyv3, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go deleted file mode 100644 index 02ec0d2b2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/types.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authentication - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// TokenReview attempts to authenticate a token to a known user. -type TokenReview struct { - unversioned.TypeMeta - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec - - // Status is filled in by the server and indicates whether the request can be authenticated. - Status TokenReviewStatus -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - Token string -} - -// TokenReviewStatus is the result of the token authentication request. -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - Authenticated bool - // User is the UserInfo associated with the provided token. - User UserInfo -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - Username string - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - UID string - // The names of groups this user is a part of. - Groups []string - // Any additional information provided by the authenticator. - Extra map[string][]string -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go deleted file mode 100644 index 6a8545d13..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs() - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go deleted file mode 100644 index 9972f82ed..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/conversion_generated.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - authentication_k8s_io "k8s.io/kubernetes/pkg/apis/authentication.k8s.io" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview, - Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview, - Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec, - Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec, - Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus, - Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus, - Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo, - Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in *TokenReview, out *authentication_k8s_io.TokenReview, s conversion.Scope) error { - return autoConvert_v1beta1_TokenReview_To_authenticationk8sio_TokenReview(in, out, s) -} - -func autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in *authentication_k8s_io.TokenReview, out *TokenReview, s conversion.Scope) error { - return autoConvert_authenticationk8sio_TokenReview_To_v1beta1_TokenReview(in, out, s) -} - -func autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -func Convert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in *TokenReviewSpec, out *authentication_k8s_io.TokenReviewSpec, s conversion.Scope) error { - return autoConvert_v1beta1_TokenReviewSpec_To_authenticationk8sio_TokenReviewSpec(in, out, s) -} - -func autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - out.Token = in.Token - return nil -} - -func Convert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in *authentication_k8s_io.TokenReviewSpec, out *TokenReviewSpec, s conversion.Scope) error { - return autoConvert_authenticationk8sio_TokenReviewSpec_To_v1beta1_TokenReviewSpec(in, out, s) -} - -func autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in *TokenReviewStatus, out *authentication_k8s_io.TokenReviewStatus, s conversion.Scope) error { - return autoConvert_v1beta1_TokenReviewStatus_To_authenticationk8sio_TokenReviewStatus(in, out, s) -} - -func autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - out.Authenticated = in.Authenticated - if err := Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(&in.User, &out.User, s); err != nil { - return err - } - return nil -} - -func Convert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in *authentication_k8s_io.TokenReviewStatus, out *TokenReviewStatus, s conversion.Scope) error { - return autoConvert_authenticationk8sio_TokenReviewStatus_To_v1beta1_TokenReviewStatus(in, out, s) -} - -func autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = in.Groups - out.Extra = in.Extra - return nil -} - -func Convert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in *UserInfo, out *authentication_k8s_io.UserInfo, s conversion.Scope) error { - return autoConvert_v1beta1_UserInfo_To_authenticationk8sio_UserInfo(in, out, s) -} - -func autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error { - out.Username = in.Username - out.UID = in.UID - out.Groups = in.Groups - out.Extra = in.Extra - return nil -} - -func Convert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in *authentication_k8s_io.UserInfo, out *UserInfo, s conversion.Scope) error { - return autoConvert_authenticationk8sio_UserInfo_To_v1beta1_UserInfo(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go deleted file mode 100644 index e44dfc86b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/deep_copy_generated.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1beta1_TokenReview, - DeepCopy_v1beta1_TokenReviewSpec, - DeepCopy_v1beta1_TokenReviewStatus, - DeepCopy_v1beta1_UserInfo, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1beta1_TokenReview(in TokenReview, out *TokenReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_TokenReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_TokenReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_TokenReviewSpec(in TokenReviewSpec, out *TokenReviewSpec, c *conversion.Cloner) error { - out.Token = in.Token - return nil -} - -func DeepCopy_v1beta1_TokenReviewStatus(in TokenReviewStatus, out *TokenReviewStatus, c *conversion.Cloner) error { - out.Authenticated = in.Authenticated - if err := DeepCopy_v1beta1_UserInfo(in.User, &out.User, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_UserInfo(in UserInfo, out *UserInfo, c *conversion.Cloner) error { - out.Username = in.Username - out.UID = in.UID - if in.Groups != nil { - in, out := in.Groups, &out.Groups - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Groups = nil - } - if in.Extra != nil { - in, out := in.Extra, &out.Extra - *out = make(map[string][]string) - for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { - return err - } else { - (*out)[key] = newVal.([]string) - } - } - } else { - out.Extra = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go deleted file mode 100644 index 0f3732e36..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs() -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go deleted file mode 100644 index cfdb87c53..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go deleted file mode 100644 index e183299c0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/register.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, - ) -} - -func (obj *TokenReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go deleted file mode 100644 index 62d287ff5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.generated.go +++ /dev/null @@ -1,1321 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - "reflect" - "runtime" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - _ = v0 - } -} - -func (x *TokenReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "spec": - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = TokenReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = TokenReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Token != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("token")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Token)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "token": - if r.TryDecodeAsNil() { - x.Token = "" - } else { - x.Token = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Token = "" - } else { - x.Token = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *TokenReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Authenticated != false - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("authenticated")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Authenticated)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.User - yy7.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.User - yy9.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *TokenReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *TokenReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "authenticated": - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - x.Authenticated = bool(r.DecodeBool()) - } - case "user": - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv5 := &x.User - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *TokenReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Authenticated = false - } else { - x.Authenticated = bool(r.DecodeBool()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = UserInfo{} - } else { - yyv8 := &x.User - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *UserInfo) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Username != "" - yyq2[1] = x.UID != "" - yyq2[2] = len(x.Groups) != 0 - yyq2[3] = len(x.Extra) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("username")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Username)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("uid")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.UID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Extra == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *UserInfo) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *UserInfo) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "username": - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - case "uid": - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - case "groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv6 := &x.Groups - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - case "extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv8 := &x.Extra - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv8), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *UserInfo) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Username = "" - } else { - x.Username = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UID = "" - } else { - x.UID = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv13 := &x.Groups - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - z.F.DecSliceStringX(yyv13, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv15 := &x.Extra - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv15), d) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - z.F.EncSliceStringV(yyv1, false, e) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]string, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []string - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else { - z.F.DecSliceStringX(yyv3, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go deleted file mode 100644 index fc136877a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authentication.k8s.io/v1beta1/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -type TokenReview struct { - unversioned.TypeMeta `json:",inline"` - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec `json:"spec"` - - // Status is filled in by the server and indicates whether the request can be authenticated. - Status TokenReviewStatus `json:"status,omitempty"` -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - Token string `json:"token,omitempty"` -} - -// TokenReviewStatus is the result of the token authentication request. -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - Authenticated bool `json:"authenticated,omitempty"` - // User is the UserInfo associated with the provided token. - User UserInfo `json:"user,omitempty"` -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - Username string `json:"username,omitempty"` - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - UID string `json:"uid,omitempty"` - // The names of groups this user is a part of. - Groups []string `json:"groups,omitempty"` - // Any additional information provided by the authenticator. - Extra map[string][]string `json:"extra,omitempty"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go deleted file mode 100644 index bc40fb33e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/deep_copy_generated.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package authorization - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_authorization_LocalSubjectAccessReview, - DeepCopy_authorization_NonResourceAttributes, - DeepCopy_authorization_ResourceAttributes, - DeepCopy_authorization_SelfSubjectAccessReview, - DeepCopy_authorization_SelfSubjectAccessReviewSpec, - DeepCopy_authorization_SubjectAccessReview, - DeepCopy_authorization_SubjectAccessReviewSpec, - DeepCopy_authorization_SubjectAccessReviewStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_authorization_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_authorization_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func DeepCopy_authorization_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func DeepCopy_authorization_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_authorization_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_authorization_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error { - if in.ResourceAttributes != nil { - in, out := in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - if err := DeepCopy_authorization_ResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - if err := DeepCopy_authorization_NonResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - return nil -} - -func DeepCopy_authorization_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_authorization_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_authorization_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_authorization_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error { - if in.ResourceAttributes != nil { - in, out := in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - if err := DeepCopy_authorization_ResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - if err := DeepCopy_authorization_NonResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - out.User = in.User - if in.Groups != nil { - in, out := in.Groups, &out.Groups - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Groups = nil - } - if in.Extra != nil { - in, out := in.Extra, &out.Extra - *out = make(map[string][]string) - for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { - return err - } else { - (*out)[key] = newVal.([]string) - } - } - } else { - out.Extra = nil - } - return nil -} - -func DeepCopy_authorization_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go deleted file mode 100644 index bf8814dd5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/install/install.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/authorization" - "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/authorization" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", authorization.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - authorization.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1beta1.SchemeGroupVersion: - v1beta1.AddToScheme(api.Scheme) - } - } -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - worstToBestGroupVersions := []unversioned.GroupVersion{} - for i := len(externalVersions) - 1; i >= 0; i-- { - worstToBestGroupVersions = append(worstToBestGroupVersions, externalVersions[i]) - } - - rootScoped := sets.NewString("SubjectAccessReview", "SelfSubjectAccessReview") - ignoredKinds := sets.NewString() - return api.NewDefaultRESTMapper(worstToBestGroupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1beta1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(authorization.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go deleted file mode 100644 index fdb6c4f4b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authorization - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) -} - -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &SelfSubjectAccessReview{}, - &SubjectAccessReview{}, - &LocalSubjectAccessReview{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go deleted file mode 100644 index 1c071376c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.generated.go +++ /dev/null @@ -1,2570 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package authorization - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - "reflect" - "runtime" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - _ = v0 - } -} - -func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "Status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 7 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Version")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "Verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - case "Group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - case "Version": - if r.TryDecodeAsNil() { - x.Version = "" - } else { - x.Version = string(r.DecodeString()) - } - case "Resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - case "Subresource": - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - case "Name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Version = "" - } else { - x.Version = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "Verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 5 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("NonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("User")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Groups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ResourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "NonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - case "User": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "Groups": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv7 := &x.Groups - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecSliceStringX(yyv7, false, d) - } - } - case "Extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv9 := &x.Extra - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv9), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv15 := &x.Groups - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecSliceStringX(yyv15, false, d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv17 := &x.Extra - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv17), d) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("NonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ResourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "NonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "Allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - case "Reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - z.F.EncSliceStringV(yyv1, false, e) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]string, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []string - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else { - z.F.DecSliceStringX(yyv3, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go deleted file mode 100644 index 8cfdfbe97..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/types.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package authorization - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// SubjectAccessReview checks whether or not a user or group can perform an action. Not filling in a -// spec.namespace means "in all namespaces". -type SubjectAccessReview struct { - unversioned.TypeMeta - - // Spec holds information about the request being evaluated - Spec SubjectAccessReviewSpec - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus -} - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -type SelfSubjectAccessReview struct { - unversioned.TypeMeta - - // Spec holds information about the request being evaluated. - Spec SelfSubjectAccessReviewSpec - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus -} - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -type LocalSubjectAccessReview struct { - unversioned.TypeMeta - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - Spec SubjectAccessReviewSpec - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -type ResourceAttributes struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - Namespace string - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - Verb string - // Group is the API Group of the Resource. "*" means all. - Group string - // Version is the API Version of the Resource. "*" means all. - Version string - // Resource is one of the existing resource types. "*" means all. - Resource string - // Subresource is one of the existing resource types. "" means none. - Subresource string - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - Name string -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -type NonResourceAttributes struct { - // Path is the URL path of the request - Path string - // Verb is the standard HTTP verb - Verb string -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes -// and NonResourceAttributes must be set -type SubjectAccessReviewSpec struct { - // ResourceAttributes describes information for a resource access request - ResourceAttributes *ResourceAttributes - // NonResourceAttributes describes information for a non-resource access request - NonResourceAttributes *NonResourceAttributes - - // User is the user you're testing for. - // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups - User string - // Groups is the groups you're testing for. - Groups []string - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - Extra map[string][]string -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAttributes -// and NonResourceAttributes must be set -type SelfSubjectAccessReviewSpec struct { - // ResourceAttributes describes information for a resource access request - ResourceAttributes *ResourceAttributes - // NonResourceAttributes describes information for a non-resource access request - NonResourceAttributes *NonResourceAttributes -} - -// SubjectAccessReviewStatus -type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. - Allowed bool - // Reason is optional. It indicates why a request was allowed or denied. - Reason string -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go deleted file mode 100644 index 0b45ed5fb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs() - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go deleted file mode 100644 index a475c0fd0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/conversion_generated.go +++ /dev/null @@ -1,333 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - authorization "k8s.io/kubernetes/pkg/apis/authorization" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview, - Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview, - Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes, - Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes, - Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes, - Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes, - Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview, - Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview, - Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec, - Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec, - Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview, - Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview, - Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec, - Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec, - Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus, - Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in *LocalSubjectAccessReview, out *authorization.LocalSubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1beta1_LocalSubjectAccessReview_To_authorization_LocalSubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in *authorization.LocalSubjectAccessReview, out *LocalSubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_LocalSubjectAccessReview_To_v1beta1_LocalSubjectAccessReview(in, out, s) -} - -func autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in *NonResourceAttributes, out *authorization.NonResourceAttributes, s conversion.Scope) error { - return autoConvert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(in, out, s) -} - -func autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in *authorization.NonResourceAttributes, out *NonResourceAttributes, s conversion.Scope) error { - return autoConvert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(in, out, s) -} - -func autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in *ResourceAttributes, out *authorization.ResourceAttributes, s conversion.Scope) error { - return autoConvert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(in, out, s) -} - -func autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in *authorization.ResourceAttributes, out *ResourceAttributes, s conversion.Scope) error { - return autoConvert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(in, out, s) -} - -func autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in *SelfSubjectAccessReview, out *authorization.SelfSubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1beta1_SelfSubjectAccessReview_To_authorization_SelfSubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in *authorization.SelfSubjectAccessReview, out *SelfSubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_SelfSubjectAccessReview_To_v1beta1_SelfSubjectAccessReview(in, out, s) -} - -func autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(authorization.ResourceAttributes) - if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(authorization.NonResourceAttributes) - if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - return nil -} - -func Convert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in *SelfSubjectAccessReviewSpec, out *authorization.SelfSubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_v1beta1_SelfSubjectAccessReviewSpec_To_authorization_SelfSubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - return nil -} - -func Convert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in *authorization.SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_authorization_SelfSubjectAccessReviewSpec_To_v1beta1_SelfSubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in *SubjectAccessReview, out *authorization.SubjectAccessReview, s conversion.Scope) error { - return autoConvert_v1beta1_SubjectAccessReview_To_authorization_SubjectAccessReview(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in *authorization.SubjectAccessReview, out *SubjectAccessReview, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReview_To_v1beta1_SubjectAccessReview(in, out, s) -} - -func autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(authorization.ResourceAttributes) - if err := Convert_v1beta1_ResourceAttributes_To_authorization_ResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(authorization.NonResourceAttributes) - if err := Convert_v1beta1_NonResourceAttributes_To_authorization_NonResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - out.User = in.User - out.Groups = in.Groups - out.Extra = in.Extra - return nil -} - -func Convert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in *SubjectAccessReviewSpec, out *authorization.SubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_v1beta1_SubjectAccessReviewSpec_To_authorization_SubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - if err := Convert_authorization_ResourceAttributes_To_v1beta1_ResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - if err := Convert_authorization_NonResourceAttributes_To_v1beta1_NonResourceAttributes(*in, *out, s); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - out.User = in.User - out.Groups = in.Groups - out.Extra = in.Extra - return nil -} - -func Convert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in *authorization.SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReviewSpec_To_v1beta1_SubjectAccessReviewSpec(in, out, s) -} - -func autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil -} - -func Convert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in *SubjectAccessReviewStatus, out *authorization.SubjectAccessReviewStatus, s conversion.Scope) error { - return autoConvert_v1beta1_SubjectAccessReviewStatus_To_authorization_SubjectAccessReviewStatus(in, out, s) -} - -func autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil -} - -func Convert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in *authorization.SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, s conversion.Scope) error { - return autoConvert_authorization_SubjectAccessReviewStatus_To_v1beta1_SubjectAccessReviewStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go deleted file mode 100644 index 94a35650c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/deep_copy_generated.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1beta1_LocalSubjectAccessReview, - DeepCopy_v1beta1_NonResourceAttributes, - DeepCopy_v1beta1_ResourceAttributes, - DeepCopy_v1beta1_SelfSubjectAccessReview, - DeepCopy_v1beta1_SelfSubjectAccessReviewSpec, - DeepCopy_v1beta1_SubjectAccessReview, - DeepCopy_v1beta1_SubjectAccessReviewSpec, - DeepCopy_v1beta1_SubjectAccessReviewStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1beta1_LocalSubjectAccessReview(in LocalSubjectAccessReview, out *LocalSubjectAccessReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_NonResourceAttributes(in NonResourceAttributes, out *NonResourceAttributes, c *conversion.Cloner) error { - out.Path = in.Path - out.Verb = in.Verb - return nil -} - -func DeepCopy_v1beta1_ResourceAttributes(in ResourceAttributes, out *ResourceAttributes, c *conversion.Cloner) error { - out.Namespace = in.Namespace - out.Verb = in.Verb - out.Group = in.Group - out.Version = in.Version - out.Resource = in.Resource - out.Subresource = in.Subresource - out.Name = in.Name - return nil -} - -func DeepCopy_v1beta1_SelfSubjectAccessReview(in SelfSubjectAccessReview, out *SelfSubjectAccessReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_SelfSubjectAccessReviewSpec(in SelfSubjectAccessReviewSpec, out *SelfSubjectAccessReviewSpec, c *conversion.Cloner) error { - if in.ResourceAttributes != nil { - in, out := in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - if err := DeepCopy_v1beta1_ResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - if err := DeepCopy_v1beta1_NonResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - return nil -} - -func DeepCopy_v1beta1_SubjectAccessReview(in SubjectAccessReview, out *SubjectAccessReview, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SubjectAccessReviewSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SubjectAccessReviewStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_SubjectAccessReviewSpec(in SubjectAccessReviewSpec, out *SubjectAccessReviewSpec, c *conversion.Cloner) error { - if in.ResourceAttributes != nil { - in, out := in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - if err := DeepCopy_v1beta1_ResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.ResourceAttributes = nil - } - if in.NonResourceAttributes != nil { - in, out := in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - if err := DeepCopy_v1beta1_NonResourceAttributes(*in, *out, c); err != nil { - return err - } - } else { - out.NonResourceAttributes = nil - } - out.User = in.User - if in.Groups != nil { - in, out := in.Groups, &out.Groups - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Groups = nil - } - if in.Extra != nil { - in, out := in.Extra, &out.Extra - *out = make(map[string][]string) - for key, val := range in { - if newVal, err := c.DeepCopy(val); err != nil { - return err - } else { - (*out)[key] = newVal.([]string) - } - } - } else { - out.Extra = nil - } - return nil -} - -func DeepCopy_v1beta1_SubjectAccessReviewStatus(in SubjectAccessReviewStatus, out *SubjectAccessReviewStatus, c *conversion.Cloner) error { - out.Allowed = in.Allowed - out.Reason = in.Reason - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go deleted file mode 100644 index 340f80755..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/defaults.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs() -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go deleted file mode 100644 index cfdb87c53..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go deleted file mode 100644 index d9e33ed5a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/register.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &SelfSubjectAccessReview{}, - &SubjectAccessReview{}, - &LocalSubjectAccessReview{}, - ) -} - -func (obj *LocalSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *SubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *SelfSubjectAccessReview) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go deleted file mode 100644 index 3b5e4fbe9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.generated.go +++ /dev/null @@ -1,2710 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - "reflect" - "runtime" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - _ = v0 - } -} - -func (x *SubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SelfSubjectAccessReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LocalSubjectAccessReview) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.Spec - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.Spec - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Status - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Status - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LocalSubjectAccessReview) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "spec": - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv4 := &x.Spec - yyv4.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv5 := &x.Status - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LocalSubjectAccessReview) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = SubjectAccessReviewSpec{} - } else { - yyv9 := &x.Spec - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = SubjectAccessReviewStatus{} - } else { - yyv10 := &x.Status - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Namespace != "" - yyq2[1] = x.Verb != "" - yyq2[2] = x.Group != "" - yyq2[3] = x.Version != "" - yyq2[4] = x.Resource != "" - yyq2[5] = x.Subresource != "" - yyq2[6] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Group)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("version")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Version)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Resource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - case "verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - case "group": - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - case "version": - if r.TryDecodeAsNil() { - x.Version = "" - } else { - x.Version = string(r.DecodeString()) - } - case "resource": - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - case "subresource": - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Group = "" - } else { - x.Group = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Version = "" - } else { - x.Version = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resource = "" - } else { - x.Resource = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NonResourceAttributes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - yyq2[1] = x.Verb != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verb")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Verb)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NonResourceAttributes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NonResourceAttributes) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "verb": - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NonResourceAttributes) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verb = "" - } else { - x.Verb = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ResourceAttributes != nil - yyq2[1] = x.NonResourceAttributes != nil - yyq2[2] = x.User != "" - yyq2[3] = len(x.Groups) != 0 - yyq2[4] = len(x.Extra) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("user")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.User)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Groups == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("group")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Groups == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - z.F.EncSliceStringV(x.Groups, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Extra == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("extra")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Extra == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encMapstringSlicestring((map[string][]string)(x.Extra), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "resourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "nonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - case "user": - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - case "group": - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv7 := &x.Groups - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - z.F.DecSliceStringX(yyv7, false, d) - } - } - case "extra": - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv9 := &x.Extra - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv9), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.User = "" - } else { - x.User = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Groups = nil - } else { - yyv15 := &x.Groups - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - z.F.DecSliceStringX(yyv15, false, d) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Extra = nil - } else { - yyv17 := &x.Extra - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - h.decMapstringSlicestring((*map[string][]string)(yyv17), d) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ResourceAttributes != nil - yyq2[1] = x.NonResourceAttributes != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceAttributes == nil { - r.EncodeNil() - } else { - x.ResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceAttributes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceAttributes == nil { - r.EncodeNil() - } else { - x.NonResourceAttributes.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SelfSubjectAccessReviewSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "resourceAttributes": - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - case "nonResourceAttributes": - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SelfSubjectAccessReviewSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ResourceAttributes != nil { - x.ResourceAttributes = nil - } - } else { - if x.ResourceAttributes == nil { - x.ResourceAttributes = new(ResourceAttributes) - } - x.ResourceAttributes.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NonResourceAttributes != nil { - x.NonResourceAttributes = nil - } - } else { - if x.NonResourceAttributes == nil { - x.NonResourceAttributes = new(NonResourceAttributes) - } - x.NonResourceAttributes.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubjectAccessReviewStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Reason != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Allowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubjectAccessReviewStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "allowed": - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubjectAccessReviewStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Allowed = false - } else { - x.Allowed = bool(r.DecodeBool()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encMapstringSlicestring(v map[string][]string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk1, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyv1 == nil { - r.EncodeNil() - } else { - yym3 := z.EncBinary() - _ = yym3 - if false { - } else { - z.F.EncSliceStringV(yyv1, false, e) - } - } - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringSlicestring(v *map[string][]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyl1 := r.ReadMapStart() - yybh1 := z.DecBasicHandle() - if yyv1 == nil { - yyrl1, _ := z.DecInferLen(yyl1, yybh1.MaxInitLen, 40) - yyv1 = make(map[string][]string, yyrl1) - *v = yyv1 - } - var yymk1 string - var yymv1 []string - var yymg1 bool - if yybh1.MapValueReset { - yymg1 = true - } - if yyl1 > 0 { - for yyj1 := 0; yyj1 < yyl1; yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv3 := &yymv1 - yym4 := z.DecBinary() - _ = yym4 - if false { - } else { - z.F.DecSliceStringX(yyv3, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } else if yyl1 < 0 { - for yyj1 := 0; !r.CheckBreak(); yyj1++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk1 = "" - } else { - yymk1 = string(r.DecodeString()) - } - - if yymg1 { - yymv1 = yyv1[yymk1] - } else { - yymv1 = nil - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv1 = nil - } else { - yyv6 := &yymv1 - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - - if yyv1 != nil { - yyv1[yymk1] = yymv1 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) encSlicestring(v []string, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym2 := z.EncBinary() - _ = yym2 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1)) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicestring(v *[]string, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]string, yyrl1) - } - } else { - yyv1 = make([]string, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 string - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = string(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []string{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go deleted file mode 100644 index 27078e9fc..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// SubjectAccessReview checks whether or not a user or group can perform an action. -type SubjectAccessReview struct { - unversioned.TypeMeta `json:",inline"` - - // Spec holds information about the request being evaluated - Spec SubjectAccessReviewSpec `json:"spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus `json:"status,omitempty"` -} - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -type SelfSubjectAccessReview struct { - unversioned.TypeMeta `json:",inline"` - - // Spec holds information about the request being evaluated. user and groups must be empty - Spec SelfSubjectAccessReviewSpec `json:"spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus `json:"status,omitempty"` -} - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -type LocalSubjectAccessReview struct { - unversioned.TypeMeta `json:",inline"` - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - Spec SubjectAccessReviewSpec `json:"spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - Status SubjectAccessReviewStatus `json:"status,omitempty"` -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -type ResourceAttributes struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - Namespace string `json:"namespace,omitempty"` - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - Verb string `json:"verb,omitempty"` - // Group is the API Group of the Resource. "*" means all. - Group string `json:"group,omitempty"` - // Version is the API Version of the Resource. "*" means all. - Version string `json:"version,omitempty"` - // Resource is one of the existing resource types. "*" means all. - Resource string `json:"resource,omitempty"` - // Subresource is one of the existing resource types. "" means none. - Subresource string `json:"subresource,omitempty"` - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - Name string `json:"name,omitempty"` -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -type NonResourceAttributes struct { - // Path is the URL path of the request - Path string `json:"path,omitempty"` - // Verb is the standard HTTP verb - Verb string `json:"verb,omitempty"` -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -type SubjectAccessReviewSpec struct { - // ResourceAuthorizationAttributes describes information for a resource access request - ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty"` - // NonResourceAttributes describes information for a non-resource access request - NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty"` - - // User is the user you're testing for. - // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups - User string `json:"user,omitempty"` - // Groups is the groups you're testing for. - Groups []string `json:"group,omitempty"` - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - Extra map[string][]string `json:"extra,omitempty"` -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -type SelfSubjectAccessReviewSpec struct { - // ResourceAuthorizationAttributes describes information for a resource access request - ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty"` - // NonResourceAttributes describes information for a non-resource access request - NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty"` -} - -// SubjectAccessReviewStatus -type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. - Allowed bool `json:"allowed"` - // Reason is optional. It indicates why a request was allowed or denied. - Reason string `json:"reason,omitempty"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index d4c337db7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/authorization/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_LocalSubjectAccessReview = map[string]string{ - "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", - "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { - return map_LocalSubjectAccessReview -} - -var map_NonResourceAttributes = map[string]string{ - "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", - "path": "Path is the URL path of the request", - "verb": "Verb is the standard HTTP verb", -} - -func (NonResourceAttributes) SwaggerDoc() map[string]string { - return map_NonResourceAttributes -} - -var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", -} - -func (ResourceAttributes) SwaggerDoc() map[string]string { - return map_ResourceAttributes -} - -var map_SelfSubjectAccessReview = map[string]string{ - "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", - "spec": "Spec holds information about the request being evaluated. user and groups must be empty", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { - return map_SelfSubjectAccessReview -} - -var map_SelfSubjectAccessReviewSpec = map[string]string{ - "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", - "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", - "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", -} - -func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { - return map_SelfSubjectAccessReviewSpec -} - -var map_SubjectAccessReview = map[string]string{ - "": "SubjectAccessReview checks whether or not a user or group can perform an action.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (SubjectAccessReview) SwaggerDoc() map[string]string { - return map_SubjectAccessReview -} - -var map_SubjectAccessReviewSpec = map[string]string{ - "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", - "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", - "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", - "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", - "group": "Groups is the groups you're testing for.", - "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", -} - -func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { - return map_SubjectAccessReviewSpec -} - -var map_SubjectAccessReviewStatus = map[string]string{ - "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", - "reason": "Reason is optional. It indicates why a request was allowed or denied.", -} - -func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { - return map_SubjectAccessReviewStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go deleted file mode 100644 index d78bad7cb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/deep_copy_generated.go +++ /dev/null @@ -1,165 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package autoscaling - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_autoscaling_CrossVersionObjectReference, - DeepCopy_autoscaling_HorizontalPodAutoscaler, - DeepCopy_autoscaling_HorizontalPodAutoscalerList, - DeepCopy_autoscaling_HorizontalPodAutoscalerSpec, - DeepCopy_autoscaling_HorizontalPodAutoscalerStatus, - DeepCopy_autoscaling_Scale, - DeepCopy_autoscaling_ScaleSpec, - DeepCopy_autoscaling_ScaleStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_autoscaling_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func DeepCopy_autoscaling_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_autoscaling_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(in)) - for i := range in { - if err := DeepCopy_autoscaling_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_autoscaling_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if err := DeepCopy_autoscaling_CrossVersionObjectReference(in.ScaleTargetRef, &out.ScaleTargetRef, c); err != nil { - return err - } - if in.MinReplicas != nil { - in, out := in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = *in - } else { - out.MinReplicas = nil - } - out.MaxReplicas = in.MaxReplicas - if in.TargetCPUUtilizationPercentage != nil { - in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage - *out = new(int32) - **out = *in - } else { - out.TargetCPUUtilizationPercentage = nil - } - return nil -} - -func DeepCopy_autoscaling_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { - if in.ObservedGeneration != nil { - in, out := in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = *in - } else { - out.ObservedGeneration = nil - } - if in.LastScaleTime != nil { - in, out := in.LastScaleTime, &out.LastScaleTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentCPUUtilizationPercentage != nil { - in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage - *out = new(int32) - **out = *in - } else { - out.CurrentCPUUtilizationPercentage = nil - } - return nil -} - -func DeepCopy_autoscaling_Scale(in Scale, out *Scale, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_autoscaling_ScaleSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_autoscaling_ScaleStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_autoscaling_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - return nil -} - -func DeepCopy_autoscaling_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - out.Selector = in.Selector - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go deleted file mode 100644 index 6e226a066..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/install/install.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/autoscaling/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/autoscaling" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", autoscaling.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(autoscaling.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - autoscaling.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1.SchemeGroupVersion: - v1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go deleted file mode 100644 index 6a4fb747b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Scale{}, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - &api.ListOptions{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go deleted file mode 100644 index fdd059190..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.generated.go +++ /dev/null @@ -1,2659 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package autoscaling - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg1_unversioned.TypeMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Selector != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - x.Selector = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - x.Selector = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.MinReplicas != nil - yyq2[3] = x.TargetCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ScaleTargetRef - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ScaleTargetRef - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy9 := *x.MinReplicas - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy11 := *x.MinReplicas - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy17 := *x.TargetCPUUtilizationPercentage - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(yy17)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy19 := *x.TargetCPUUtilizationPercentage - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(yy19)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "scaleTargetRef": - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv4 := &x.ScaleTargetRef - yyv4.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - case "targetCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv11 := &x.ScaleTargetRef - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - yyq2[1] = x.LastScaleTime != nil - yyq2[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym9 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym10 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy18 := *x.CurrentCPUUtilizationPercentage - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy20 := *x.CurrentCPUUtilizationPercentage - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "lastScaleTime": - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - case "desiredReplicas": - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - case "currentCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym16 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go deleted file mode 100644 index caafcde30..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscaling - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// Scale represents a scaling request for a resource. -type Scale struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. - api.ObjectMeta `json:"metadata,omitempty"` - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec ScaleSpec `json:"spec,omitempty"` - - // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. - Status ScaleStatus `json:"status,omitempty"` -} - -// ScaleSpec describes the attributes of a scale subresource. -type ScaleSpec struct { - // desired number of instances for the scaled object. - Replicas int32 `json:"replicas,omitempty"` -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas"` - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector string `json:"selector,omitempty"` -} - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` -} - -// specification of a horizontal pod autoscaler. -type HorizontalPodAutoscalerSpec struct { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. - MinReplicas *int32 `json:"minReplicas,omitempty"` - // upper limit for the number of pods that can be set by the autoscaler. It cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. - TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"` -} - -// current status of a horizontal pod autoscaler -type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty"` - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty"` - - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas"` - - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas"` - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"` -} - -// +genclient=true - -// configuration of a horizontal pod autoscaler. -type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` - - // current information about the autoscaler. - Status HorizontalPodAutoscalerStatus `json:"status,omitempty"` -} - -// list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go deleted file mode 100644 index 11ca6a056..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/conversion_generated.go +++ /dev/null @@ -1,300 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - api "k8s.io/kubernetes/pkg/api" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference, - Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference, - Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler, - Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, - Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList, - Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec, - Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus, - Convert_v1_Scale_To_autoscaling_Scale, - Convert_autoscaling_Scale_To_v1_Scale, - Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec, - Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec, - Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus, - Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in *CrossVersionObjectReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - return autoConvert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(in, out, s) -} - -func autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in *autoscaling.CrossVersionObjectReference, out *CrossVersionObjectReference, s conversion.Scope) error { - return autoConvert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - SetDefaults_HorizontalPodAutoscaler(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_v1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_v1_CrossVersionObjectReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - out.MinReplicas = in.MinReplicas - out.MaxReplicas = in.MaxReplicas - out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage - return nil -} - -func Convert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1_CrossVersionObjectReference(&in.ScaleTargetRef, &out.ScaleTargetRef, s); err != nil { - return err - } - out.MinReplicas = in.MinReplicas - out.MaxReplicas = in.MaxReplicas - out.TargetCPUUtilizationPercentage = in.TargetCPUUtilizationPercentage - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerSpec_To_v1_HorizontalPodAutoscalerSpec(in, out, s) -} - -func autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.LastScaleTime = in.LastScaleTime - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage - return nil -} - -func Convert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.LastScaleTime = in.LastScaleTime - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Scale_To_autoscaling_Scale(in *Scale, out *autoscaling.Scale, s conversion.Scope) error { - return autoConvert_v1_Scale_To_autoscaling_Scale(in, out, s) -} - -func autoConvert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_Scale_To_v1_Scale(in *autoscaling.Scale, out *Scale, s conversion.Scope) error { - return autoConvert_autoscaling_Scale_To_v1_Scale(in, out, s) -} - -func autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in *ScaleSpec, out *autoscaling.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1_ScaleSpec_To_autoscaling_ScaleSpec(in, out, s) -} - -func autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in *autoscaling.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - return autoConvert_autoscaling_ScaleSpec_To_v1_ScaleSpec(in, out, s) -} - -func autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = in.Selector - return nil -} - -func Convert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in *ScaleStatus, out *autoscaling.ScaleStatus, s conversion.Scope) error { - return autoConvert_v1_ScaleStatus_To_autoscaling_ScaleStatus(in, out, s) -} - -func autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = in.Selector - return nil -} - -func Convert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in *autoscaling.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - return autoConvert_autoscaling_ScaleStatus_To_v1_ScaleStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go deleted file mode 100644 index 6932ba638..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/deep_copy_generated.go +++ /dev/null @@ -1,166 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - api_v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1_CrossVersionObjectReference, - DeepCopy_v1_HorizontalPodAutoscaler, - DeepCopy_v1_HorizontalPodAutoscalerList, - DeepCopy_v1_HorizontalPodAutoscalerSpec, - DeepCopy_v1_HorizontalPodAutoscalerStatus, - DeepCopy_v1_Scale, - DeepCopy_v1_ScaleSpec, - DeepCopy_v1_ScaleStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1_CrossVersionObjectReference(in CrossVersionObjectReference, out *CrossVersionObjectReference, c *conversion.Cloner) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func DeepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(in)) - for i := range in { - if err := DeepCopy_v1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if err := DeepCopy_v1_CrossVersionObjectReference(in.ScaleTargetRef, &out.ScaleTargetRef, c); err != nil { - return err - } - if in.MinReplicas != nil { - in, out := in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = *in - } else { - out.MinReplicas = nil - } - out.MaxReplicas = in.MaxReplicas - if in.TargetCPUUtilizationPercentage != nil { - in, out := in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage - *out = new(int32) - **out = *in - } else { - out.TargetCPUUtilizationPercentage = nil - } - return nil -} - -func DeepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { - if in.ObservedGeneration != nil { - in, out := in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = *in - } else { - out.ObservedGeneration = nil - } - if in.LastScaleTime != nil { - in, out := in.LastScaleTime, &out.LastScaleTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentCPUUtilizationPercentage != nil { - in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage - *out = new(int32) - **out = *in - } else { - out.CurrentCPUUtilizationPercentage = nil - } - return nil -} - -func DeepCopy_v1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_ScaleStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - return nil -} - -func DeepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - out.Selector = in.Selector - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go deleted file mode 100644 index 3fb24c46b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/defaults.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_HorizontalPodAutoscaler, - ) -} - -func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go deleted file mode 100644 index 1c67cc3a9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go deleted file mode 100644 index e90dd5d62..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.pb.go +++ /dev/null @@ -1,1612 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - HorizontalPodAutoscaler - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - Scale - ScaleSpec - ScaleStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } -func (m *CrossVersionObjectReference) String() string { return proto.CompactTextString(m) } -func (*CrossVersionObjectReference) ProtoMessage() {} - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscaler) ProtoMessage() {} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} - -func (m *Scale) Reset() { *m = Scale{} } -func (m *Scale) String() string { return proto.CompactTextString(m) } -func (*Scale) ProtoMessage() {} - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } -func (*ScaleSpec) ProtoMessage() {} - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } -func (*ScaleStatus) ProtoMessage() {} - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.autoscaling.v1.ScaleStatus") -} -func (m *CrossVersionObjectReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CrossVersionObjectReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleTargetRef.Size())) - n5, err := m.ScaleTargetRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.MinReplicas != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TargetCPUUtilizationPercentage)) - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) - n6, err := m.LastScaleTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) - } - return i, nil -} - -func (m *Scale) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Scale) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *ScaleSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Selector))) - i += copy(data[i:], m.Selector) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *CrossVersionObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - l = m.ScaleTargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) - } - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CrossVersionObjectReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ScaleTargetRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - m.MaxReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetCPUUtilizationPercentage = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - m.DesiredReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentCPUUtilizationPercentage = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scale) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto deleted file mode 100644 index b41980743..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/generated.proto +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.autoscaling.v1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -message CrossVersionObjectReference { - // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds" - optional string kind = 1; - - // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - optional string name = 2; - - // API version of the referent - optional string apiVersion = 3; -} - -// configuration of a horizontal pod autoscaler. -message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - optional HorizontalPodAutoscalerSpec spec = 2; - - // current information about the autoscaler. - optional HorizontalPodAutoscalerStatus status = 3; -} - -// list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// specification of a horizontal pod autoscaler. -message HorizontalPodAutoscalerSpec { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - optional CrossVersionObjectReference scaleTargetRef = 1; - - // lower limit for the number of pods that can be set by the autoscaler, default 1. - optional int32 minReplicas = 2; - - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - optional int32 maxReplicas = 3; - - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. - optional int32 targetCPUUtilizationPercentage = 4; -} - -// current status of a horizontal pod autoscaler -message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. - optional int64 observedGeneration = 1; - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; - - // current number of replicas of pods managed by this autoscaler. - optional int32 currentReplicas = 3; - - // desired number of replicas of pods managed by this autoscaler. - optional int32 desiredReplicas = 4; - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - optional int32 currentCPUUtilizationPercentage = 5; -} - -// Scale represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - optional ScaleSpec spec = 2; - - // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. - optional ScaleStatus status = 3; -} - -// ScaleSpec describes the attributes of a scale subresource. -message ScaleSpec { - // desired number of instances for the scaled object. - optional int32 replicas = 1; -} - -// ScaleStatus represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional string selector = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go deleted file mode 100644 index fed2cdf48..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/register.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - &Scale{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go deleted file mode 100644 index d8401bb8b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.generated.go +++ /dev/null @@ -1,2659 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.Time - var v1 pkg2_v1.ObjectMeta - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *CrossVersionObjectReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CrossVersionObjectReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CrossVersionObjectReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.MinReplicas != nil - yyq2[3] = x.TargetCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ScaleTargetRef - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleTargetRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ScaleTargetRef - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy9 := *x.MinReplicas - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy11 := *x.MinReplicas - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy17 := *x.TargetCPUUtilizationPercentage - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeInt(int64(yy17)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TargetCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy19 := *x.TargetCPUUtilizationPercentage - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(yy19)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "scaleTargetRef": - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv4 := &x.ScaleTargetRef - yyv4.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - case "targetCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleTargetRef = CrossVersionObjectReference{} - } else { - yyv11 := &x.ScaleTargetRef - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TargetCPUUtilizationPercentage != nil { - x.TargetCPUUtilizationPercentage = nil - } - } else { - if x.TargetCPUUtilizationPercentage == nil { - x.TargetCPUUtilizationPercentage = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.TargetCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - yyq2[1] = x.LastScaleTime != nil - yyq2[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym9 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym10 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy18 := *x.CurrentCPUUtilizationPercentage - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy20 := *x.CurrentCPUUtilizationPercentage - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "lastScaleTime": - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - case "desiredReplicas": - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - case "currentCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym16 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Selector != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Selector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - x.Selector = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = "" - } else { - x.Selector = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 344) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go deleted file mode 100644 index 7acb52720..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` -} - -// specification of a horizontal pod autoscaler. -type HorizontalPodAutoscalerSpec struct { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. - TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` -} - -// current status of a horizontal pod autoscaler -type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` -} - -// +genclient=true - -// configuration of a horizontal pod autoscaler. -type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current information about the autoscaler. - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Scale represents a scaling request for a resource. -type Scale struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. - Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ScaleSpec describes the attributes of a scale subresource. -type ScaleSpec struct { - // desired number of instances for the scaled object. - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go deleted file mode 100644 index 904e36a6a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_CrossVersionObjectReference = map[string]string{ - "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", - "apiVersion": "API version of the referent", -} - -func (CrossVersionObjectReference) SwaggerDoc() map[string]string { - return map_CrossVersionObjectReference -} - -var map_HorizontalPodAutoscaler = map[string]string{ - "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "list of horizontal pod autoscaler objects.", - "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "specification of a horizontal pod autoscaler.", - "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - -var map_Scale = map[string]string{ - "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only.", -} - -func (Scale) SwaggerDoc() map[string]string { - return map_Scale -} - -var map_ScaleSpec = map[string]string{ - "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", -} - -func (ScaleSpec) SwaggerDoc() map[string]string { - return map_ScaleSpec -} - -var map_ScaleStatus = map[string]string{ - "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", -} - -func (ScaleStatus) SwaggerDoc() map[string]string { - return map_ScaleStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go deleted file mode 100644 index 6e08ec410..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/deep_copy_generated.go +++ /dev/null @@ -1,284 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package batch - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_batch_Job, - DeepCopy_batch_JobCondition, - DeepCopy_batch_JobList, - DeepCopy_batch_JobSpec, - DeepCopy_batch_JobStatus, - DeepCopy_batch_JobTemplate, - DeepCopy_batch_JobTemplateSpec, - DeepCopy_batch_ScheduledJob, - DeepCopy_batch_ScheduledJobList, - DeepCopy_batch_ScheduledJobSpec, - DeepCopy_batch_ScheduledJobStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_batch_Job(in Job, out *Job, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_batch_JobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_batch_JobStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_batch_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_batch_JobList(in JobList, out *JobList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Job, len(in)) - for i := range in { - if err := DeepCopy_batch_Job(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_batch_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { - if in.Parallelism != nil { - in, out := in.Parallelism, &out.Parallelism - *out = new(int32) - **out = *in - } else { - out.Parallelism = nil - } - if in.Completions != nil { - in, out := in.Completions, &out.Completions - *out = new(int32) - **out = *in - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.ActiveDeadlineSeconds = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - in, out := in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = *in - } else { - out.ManualSelector = nil - } - if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_batch_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]JobCondition, len(in)) - for i := range in { - if err := DeepCopy_batch_JobCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.StartTime != nil { - in, out := in.StartTime, &out.StartTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.CompletionTime != nil { - in, out := in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func DeepCopy_batch_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_batch_JobTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_batch_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error { - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_batch_JobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_batch_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_batch_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_batch_ScheduledJobStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_batch_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ScheduledJob, len(in)) - for i := range in { - if err := DeepCopy_batch_ScheduledJob(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_batch_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error { - out.Schedule = in.Schedule - if in.StartingDeadlineSeconds != nil { - in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.StartingDeadlineSeconds = nil - } - out.ConcurrencyPolicy = in.ConcurrencyPolicy - if in.Suspend != nil { - in, out := in.Suspend, &out.Suspend - *out = new(bool) - **out = *in - } else { - out.Suspend = nil - } - if err := DeepCopy_batch_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil { - return err - } - return nil -} - -func DeepCopy_batch_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error { - if in.Active != nil { - in, out := in.Active, &out.Active - *out = make([]api.ObjectReference, len(in)) - for i := range in { - if err := api.DeepCopy_api_ObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Active = nil - } - if in.LastScheduleTime != nil { - in, out := in.LastScheduleTime, &out.LastScheduleTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.LastScheduleTime = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go deleted file mode 100644 index 9d1a88603..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/install/install.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the batch API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/batch/v1" - "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/batch" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1.SchemeGroupVersion, v2alpha1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", batch.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - case v2alpha1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(batch.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - batch.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1.SchemeGroupVersion: - v1.AddToScheme(api.Scheme) - case v2alpha1.SchemeGroupVersion: - v2alpha1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go deleted file mode 100644 index cafa3fd89..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batch - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, - &JobTemplate{}, - &ScheduledJob{}, - &ScheduledJobList{}, - &api.ListOptions{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go deleted file mode 100644 index 14c44a0f8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.generated.go +++ /dev/null @@ -1,4671 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package batch - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg4_resource.Quantity - var v2 pkg1_unversioned.TypeMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceJob((*[]Job)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceJob((*[]Job)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Template - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv5 := &x.Template - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv7 := &x.ObjectMeta - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv8 := &x.Spec - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Parallelism != nil - yyq2[1] = x.Completions != nil - yyq2[2] = x.ActiveDeadlineSeconds != nil - yyq2[3] = x.Selector != nil - yyq2[4] = x.ManualSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy4 := *x.Parallelism - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy6 := *x.Parallelism - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy9 := *x.Completions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy11 := *x.Completions - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy14 := *x.ActiveDeadlineSeconds - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(yy14)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.ActiveDeadlineSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy22 := *x.ManualSelector - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy24 := *x.ManualSelector - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy27 := &x.Template - yy27.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy29 := &x.Template - yy29.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "manualSelector": - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv14 := &x.Template - yyv14.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj15 int - var yyb15 bool - var yyhl15 bool = l >= 0 - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym21 := z.DecBinary() - _ = yym21 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym25 := z.DecBinary() - _ = yym25 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv26 := &x.Template - yyv26.CodecDecodeSelf(d) - } - for { - yyj15++ - if yyhl15 { - yyb15 = yyj15 > l - } else { - yyb15 = r.CheckBreak() - } - if yyb15 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj15-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = x.StartTime != nil - yyq2[2] = x.CompletionTime != nil - yyq2[3] = x.Active != 0 - yyq2[4] = x.Succeeded != 0 - yyq2[5] = x.Failed != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym7 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym8 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym10 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym11 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv4), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym9 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv14), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym17 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym19 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_api.ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScheduledJob) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJob) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScheduledJobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScheduledJobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScheduledJobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScheduledJobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScheduledJobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceScheduledJob((*[]ScheduledJob)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceScheduledJob((*[]ScheduledJob)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.StartingDeadlineSeconds != nil - yyq2[2] = x.ConcurrencyPolicy != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy7 := *x.StartingDeadlineSeconds - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy9 := *x.StartingDeadlineSeconds - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Suspend == nil { - r.EncodeNil() - } else { - yy15 := *x.Suspend - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(yy15)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("suspend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Suspend == nil { - r.EncodeNil() - } else { - yy17 := *x.Suspend - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(yy17)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy20 := &x.JobTemplate - yy20.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.JobTemplate - yy22.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "schedule": - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - x.Schedule = string(r.DecodeString()) - } - case "startingDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "concurrencyPolicy": - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) - } - case "suspend": - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - case "jobTemplate": - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv10 := &x.JobTemplate - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - x.Schedule = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv18 := &x.JobTemplate - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ScheduledJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Active) != 0 - yyq2[1] = x.LastScheduleTime != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Active == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Active == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceapi_ObjectReference(([]pkg2_api.ObjectReference)(x.Active), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym7 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym8 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "active": - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv4 := &x.Active - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv4), d) - } - } - case "lastScheduleTime": - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv9 := &x.Active - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceapi_ObjectReference((*[]pkg2_api.ObjectReference)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym12 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Job, yyrl1) - } - } else { - yyv1 = make([]Job, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]JobCondition, yyrl1) - } - } else { - yyv1 = make([]JobCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceScheduledJob(v []ScheduledJob, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ScheduledJob{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1000) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ScheduledJob, yyrl1) - } - } else { - yyv1 = make([]ScheduledJob, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ScheduledJob{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ScheduledJob{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ScheduledJob{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ScheduledJob{}) // var yyz1 ScheduledJob - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ScheduledJob{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ScheduledJob{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceapi_ObjectReference(v []pkg2_api.ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_ObjectReference(v *[]pkg2_api.ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_api.ObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_api.ObjectReference, yyrl1) - } - } else { - yyv1 = make([]pkg2_api.ObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_api.ObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg2_api.ObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_api.ObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg2_api.ObjectReference{}) // var yyz1 pkg2_api.ObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_api.ObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_api.ObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go deleted file mode 100644 index 1ad469914..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/types.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batch - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// +genclient=true - -// Job represents the configuration of a single job. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty"` -} - -// JobList is a collection of jobs. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of Job. - Items []Job `json:"items"` -} - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Template defines jobs that will be created from this template - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Template JobTemplateSpec `json:"template,omitempty"` -} - -// JobTemplateSpec describes the data a Job should have when created from a template -type JobTemplateSpec struct { - // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - Parallelism *int32 `json:"parallelism,omitempty"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - Completions *int32 `json:"completions,omitempty"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - ManualSelector *bool `json:"manualSelector,omitempty"` - - // Template is the object that describes the pod that will be created when - // executing a job. - Template api.PodTemplateSpec `json:"template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty"` - - // Active is the number of actively running pods. - Active int32 `json:"active,omitempty"` - - // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int32 `json:"succeeded,omitempty"` - - // Failed is the number of pods which reached Phase Failed. - Failed int32 `json:"failed,omitempty"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` -} - -// +genclient=true - -// ScheduledJob represents the configuration of a single scheduled job. -type ScheduledJob struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec ScheduledJobSpec `json:"spec,omitempty"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status ScheduledJobStatus `json:"status,omitempty"` -} - -// ScheduledJobList is a collection of scheduled jobs. -type ScheduledJobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of ScheduledJob. - Items []ScheduledJob `json:"items"` -} - -// ScheduledJobSpec describes how the job execution will look like and when it will actually run. -type ScheduledJobSpec struct { - - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - Schedule string `json:"schedule"` - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"` - - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` - - // Suspend flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - Suspend *bool `json:"suspend"` - - // JobTemplate is the object that describes the job that will be created when - // executing a ScheduledJob. - JobTemplate JobTemplateSpec `json:"jobTemplate"` -} - -// ConcurrencyPolicy describes how the job will be handled. -// Only one of the following concurrent policies may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -type ConcurrencyPolicy string - -const ( - // AllowConcurrent allows ScheduledJobs to run concurrently. - AllowConcurrent ConcurrencyPolicy = "Allow" - - // ForbidConcurrent forbids concurrent runs, skipping next run if previous - // hasn't finished yet. - ForbidConcurrent ConcurrencyPolicy = "Forbid" - - // ReplaceConcurrent cancels currently running job and replaces it with a new one. - ReplaceConcurrent ConcurrencyPolicy = "Replace" -) - -// ScheduledJobStatus represents the current state of a Job. -type ScheduledJobStatus struct { - // Active holds pointers to currently running jobs. - Active []api.ObjectReference `json:"active,omitempty"` - - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. - LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go deleted file mode 100644 index 2d163c6e9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_batch_JobSpec_To_v1_JobSpec, - Convert_v1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - - err = api.Scheme.AddFieldLabelConversionFunc("batch/v1", "Job", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func Convert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - // unable to generate simple pointer conversion for v1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go deleted file mode 100644 index 4bb13c498..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/conversion_generated.go +++ /dev/null @@ -1,330 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - api_v1 "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1_Job_To_batch_Job, - Convert_batch_Job_To_v1_Job, - Convert_v1_JobCondition_To_batch_JobCondition, - Convert_batch_JobCondition_To_v1_JobCondition, - Convert_v1_JobList_To_batch_JobList, - Convert_batch_JobList_To_v1_JobList, - Convert_v1_JobSpec_To_batch_JobSpec, - Convert_batch_JobSpec_To_v1_JobSpec, - Convert_v1_JobStatus_To_batch_JobStatus, - Convert_batch_JobStatus_To_v1_JobStatus, - Convert_v1_LabelSelector_To_unversioned_LabelSelector, - Convert_unversioned_LabelSelector_To_v1_LabelSelector, - Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, - Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - SetDefaults_Job(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - return autoConvert_v1_Job_To_batch_Job(in, out, s) -} - -func autoConvert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_JobStatus_To_v1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_Job_To_v1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - return autoConvert_batch_Job_To_v1_Job(in, out, s) -} - -func autoConvert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - return autoConvert_v1_JobCondition_To_batch_JobCondition(in, out, s) -} - -func autoConvert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - out.Type = JobConditionType(in.Type) - out.Status = api_v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_batch_JobCondition_To_v1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_batch_JobCondition_To_v1_JobCondition(in, out, s) -} - -func autoConvert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.Job, len(*in)) - for i := range *in { - if err := Convert_v1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - return autoConvert_v1_JobList_To_batch_JobList(in, out, s) -} - -func autoConvert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := Convert_batch_Job_To_v1_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_batch_JobList_To_v1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_batch_JobList_To_v1_JobList(in, out, s) -} - -func autoConvert_v1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := Convert_v1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.Selector = nil - } - out.ManualSelector = in.ManualSelector - if err := api_v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.Selector = nil - } - out.ManualSelector = in.ManualSelector - if err := api_v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]batch.JobCondition, len(*in)) - for i := range *in { - if err := Convert_v1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.StartTime = in.StartTime - out.CompletionTime = in.CompletionTime - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_v1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - return autoConvert_v1_JobStatus_To_batch_JobStatus(in, out, s) -} - -func autoConvert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := Convert_batch_JobCondition_To_v1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.StartTime = in.StartTime - out.CompletionTime = in.CompletionTime - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_batch_JobStatus_To_v1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_batch_JobStatus_To_v1_JobStatus(in, out, s) -} - -func autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - out.MatchLabels = in.MatchLabels - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]unversioned.LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_v1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - return autoConvert_v1_LabelSelector_To_unversioned_LabelSelector(in, out, s) -} - -func autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - out.MatchLabels = in.MatchLabels - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_unversioned_LabelSelector_To_v1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelector_To_v1_LabelSelector(in, out, s) -} - -func autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = unversioned.LabelSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = LabelSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelectorRequirement_To_v1_LabelSelectorRequirement(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go deleted file mode 100644 index c2a50b4ee..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/deep_copy_generated.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - api_v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1_Job, - DeepCopy_v1_JobCondition, - DeepCopy_v1_JobList, - DeepCopy_v1_JobSpec, - DeepCopy_v1_JobStatus, - DeepCopy_v1_LabelSelector, - DeepCopy_v1_LabelSelectorRequirement, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api_v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Job, len(in)) - for i := range in { - if err := DeepCopy_v1_Job(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { - if in.Parallelism != nil { - in, out := in.Parallelism, &out.Parallelism - *out = new(int32) - **out = *in - } else { - out.Parallelism = nil - } - if in.Completions != nil { - in, out := in.Completions, &out.Completions - *out = new(int32) - **out = *in - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.ActiveDeadlineSeconds = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(LabelSelector) - if err := DeepCopy_v1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - in, out := in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = *in - } else { - out.ManualSelector = nil - } - if err := api_v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]JobCondition, len(in)) - for i := range in { - if err := DeepCopy_v1_JobCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.StartTime != nil { - in, out := in.StartTime, &out.StartTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.CompletionTime != nil { - in, out := in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func DeepCopy_v1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { - if in.MatchLabels != nil { - in, out := in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - in, out := in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(in)) - for i := range in { - if err := DeepCopy_v1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func DeepCopy_v1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := in.Values, &out.Values - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Values = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go deleted file mode 100644 index 81aa90c1d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/defaults.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_Job, - ) -} - -func SetDefaults_Job(obj *Job) { - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go deleted file mode 100644 index 1c67cc3a9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go deleted file mode 100644 index 95646919d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.pb.go +++ /dev/null @@ -1,1901 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus - LabelSelector - LabelSelectorRequirement -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *Job) Reset() { *m = Job{} } -func (m *Job) String() string { return proto.CompactTextString(m) } -func (*Job) ProtoMessage() {} - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (m *JobCondition) String() string { return proto.CompactTextString(m) } -func (*JobCondition) ProtoMessage() {} - -func (m *JobList) Reset() { *m = JobList{} } -func (m *JobList) String() string { return proto.CompactTextString(m) } -func (*JobList) ProtoMessage() {} - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (m *JobSpec) String() string { return proto.CompactTextString(m) } -func (*JobSpec) ProtoMessage() {} - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (m *JobStatus) String() string { return proto.CompactTextString(m) } -func (*JobStatus) ProtoMessage() {} - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (m *LabelSelector) String() string { return proto.CompactTextString(m) } -func (*LabelSelector) ProtoMessage() {} - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*LabelSelectorRequirement) ProtoMessage() {} - -func init() { - proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.JobStatus") - proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v1.LabelSelectorRequirement") -} -func (m *Job) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Job) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *JobCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *JobList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *JobSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Parallelism != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.ManualSelector != nil { - data[i] = 0x28 - i++ - if *m.ManualSelector { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *JobStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.StartTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.CompletionTime != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - } - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Active)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Failed)) - return i, nil -} - -func (m *LabelSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k := range m.MatchLabels { - data[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Job) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JobSpec) Size() (n int) { - var l int - _ = l - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) - } - if m.Completions != nil { - n += 1 + sovGenerated(uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ManualSelector != nil { - n += 2 - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CompletionTime != nil { - l = m.CompletionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Active)) - n += 1 + sovGenerated(uint64(m.Succeeded)) - n += 1 + sovGenerated(uint64(m.Failed)) - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Job) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JobConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Completions = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ManualSelector = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - m.Failed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - m.MatchLabels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto deleted file mode 100644 index 8d50f7f72..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/generated.proto +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.batch.v1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 4; - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md - optional bool manualSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - optional int32 failed = 6; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map<string, string> matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go deleted file mode 100644 index d8c087f1b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/register.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go deleted file mode 100644 index 58d5f6f54..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.generated.go +++ /dev/null @@ -1,3184 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceJob((*[]Job)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceJob((*[]Job)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Parallelism != nil - yyq2[1] = x.Completions != nil - yyq2[2] = x.ActiveDeadlineSeconds != nil - yyq2[3] = x.Selector != nil - yyq2[4] = x.ManualSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy4 := *x.Parallelism - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy6 := *x.Parallelism - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy9 := *x.Completions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy11 := *x.Completions - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy14 := *x.ActiveDeadlineSeconds - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(yy14)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.ActiveDeadlineSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy22 := *x.ManualSelector - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy24 := *x.ManualSelector - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy27 := &x.Template - yy27.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy29 := &x.Template - yy29.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - case "manualSelector": - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv13 := &x.Template - yyv13.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv24 := &x.Template - yyv24.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = x.StartTime != nil - yyq2[2] = x.CompletionTime != nil - yyq2[3] = x.Active != 0 - yyq2[4] = x.Succeeded != 0 - yyq2[5] = x.Failed != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym7 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym8 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym10 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym11 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv4), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym9 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv14), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym17 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym19 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.MatchLabels) != 0 - yyq2[1] = len(x.MatchExpressions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MatchLabels == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncMapStringStringV(x.MatchLabels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchLabels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchLabels == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncMapStringStringV(x.MatchLabels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "matchLabels": - if r.TryDecodeAsNil() { - x.MatchLabels = nil - } else { - yyv4 := &x.MatchLabels - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecMapStringStringX(yyv4, false, d) - } - } - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv6 := &x.MatchExpressions - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchLabels = nil - } else { - yyv9 := &x.MatchLabels - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecMapStringStringX(yyv9, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv11 := &x.MatchExpressions - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Values) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = LabelSelectorOperator(r.DecodeString()) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv6 := &x.Values - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = LabelSelectorOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv11 := &x.Values - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Job, yyrl1) - } - } else { - yyv1 = make([]Job, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]JobCondition, yyrl1) - } - } else { - yyv1 = make([]JobCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) - } - } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LabelSelectorRequirement{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go deleted file mode 100644 index 35ec05206..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// +genclient=true - -// Job represents the configuration of a single job. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// JobList is a collection of jobs. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Job. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md - ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // Active is the number of actively running pods. - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // Failed is the number of pods which reached Phase Failed. - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A label selector operator is the set of operators that can be used in a selector requirement. -type LabelSelectorOperator string - -const ( - LabelSelectorOpIn LabelSelectorOperator = "In" - LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" - LabelSelectorOpExists LabelSelectorOperator = "Exists" - LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go deleted file mode 100644 index 21c621abf..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Job.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "Active is the number of actively running pods.", - "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", - "failed": "Failed is the number of pods which reached Phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - -var map_LabelSelector = map[string]string{ - "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", -} - -func (LabelSelector) SwaggerDoc() map[string]string { - return map_LabelSelector -} - -var map_LabelSelectorRequirement = map[string]string{ - "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", - "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", -} - -func (LabelSelectorRequirement) SwaggerDoc() map[string]string { - return map_LabelSelectorRequirement -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go deleted file mode 100644 index 4714fda0f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_batch_JobSpec_To_v2alpha1_JobSpec, - Convert_v2alpha1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, kind := range []string{"Job", "JobTemplate", "ScheduledJob"} { - err = api.Scheme.AddFieldLabelConversionFunc("batch/v2alpha1", kind, - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - } - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func Convert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*batch.JobSpec))(in) - } - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*JobSpec))(in) - } - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - out.ManualSelector = new(bool) - *out.ManualSelector = *in.ManualSelector - } else { - out.ManualSelector = nil - } - - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go deleted file mode 100644 index c411875e3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/conversion_generated.go +++ /dev/null @@ -1,573 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v2alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - batch "k8s.io/kubernetes/pkg/apis/batch" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v2alpha1_Job_To_batch_Job, - Convert_batch_Job_To_v2alpha1_Job, - Convert_v2alpha1_JobCondition_To_batch_JobCondition, - Convert_batch_JobCondition_To_v2alpha1_JobCondition, - Convert_v2alpha1_JobList_To_batch_JobList, - Convert_batch_JobList_To_v2alpha1_JobList, - Convert_v2alpha1_JobSpec_To_batch_JobSpec, - Convert_batch_JobSpec_To_v2alpha1_JobSpec, - Convert_v2alpha1_JobStatus_To_batch_JobStatus, - Convert_batch_JobStatus_To_v2alpha1_JobStatus, - Convert_v2alpha1_JobTemplate_To_batch_JobTemplate, - Convert_batch_JobTemplate_To_v2alpha1_JobTemplate, - Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec, - Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec, - Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector, - Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector, - Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, - Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement, - Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob, - Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob, - Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList, - Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList, - Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec, - Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec, - Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus, - Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - SetDefaults_Job(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v2alpha1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - return autoConvert_v2alpha1_Job_To_batch_Job(in, out, s) -} - -func autoConvert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_JobStatus_To_v2alpha1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_Job_To_v2alpha1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - return autoConvert_batch_Job_To_v2alpha1_Job(in, out, s) -} - -func autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v2alpha1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - return autoConvert_v2alpha1_JobCondition_To_batch_JobCondition(in, out, s) -} - -func autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - out.Type = JobConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_batch_JobCondition_To_v2alpha1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_batch_JobCondition_To_v2alpha1_JobCondition(in, out, s) -} - -func autoConvert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.Job, len(*in)) - for i := range *in { - if err := Convert_v2alpha1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v2alpha1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - return autoConvert_v2alpha1_JobList_To_batch_JobList(in, out, s) -} - -func autoConvert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := Convert_batch_Job_To_v2alpha1_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_batch_JobList_To_v2alpha1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_batch_JobList_To_v2alpha1_JobList(in, out, s) -} - -func autoConvert_v2alpha1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.Selector = nil - } - out.ManualSelector = in.ManualSelector - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_batch_JobSpec_To_v2alpha1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.Selector = nil - } - out.ManualSelector = in.ManualSelector - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]batch.JobCondition, len(*in)) - for i := range *in { - if err := Convert_v2alpha1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.StartTime = in.StartTime - out.CompletionTime = in.CompletionTime - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_v2alpha1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_JobStatus_To_batch_JobStatus(in, out, s) -} - -func autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := Convert_batch_JobCondition_To_v2alpha1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.StartTime = in.StartTime - out.CompletionTime = in.CompletionTime - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_batch_JobStatus_To_v2alpha1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_batch_JobStatus_To_v2alpha1_JobStatus(in, out, s) -} - -func autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobTemplate_To_batch_JobTemplate(in *JobTemplate, out *batch.JobTemplate, s conversion.Scope) error { - return autoConvert_v2alpha1_JobTemplate_To_batch_JobTemplate(in, out, s) -} - -func autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_batch_JobTemplate_To_v2alpha1_JobTemplate(in *batch.JobTemplate, out *JobTemplate, s conversion.Scope) error { - return autoConvert_batch_JobTemplate_To_v2alpha1_JobTemplate(in, out, s) -} - -func autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v2alpha1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in *JobTemplateSpec, out *batch.JobTemplateSpec, s conversion.Scope) error { - return autoConvert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(in, out, s) -} - -func autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v2alpha1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in *batch.JobTemplateSpec, out *JobTemplateSpec, s conversion.Scope) error { - return autoConvert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(in, out, s) -} - -func autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - out.MatchLabels = in.MatchLabels - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]unversioned.LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - return autoConvert_v2alpha1_LabelSelector_To_unversioned_LabelSelector(in, out, s) -} - -func autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - out.MatchLabels = in.MatchLabels - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelector_To_v2alpha1_LabelSelector(in, out, s) -} - -func autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = unversioned.LabelSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_v2alpha1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = LabelSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelectorRequirement_To_v2alpha1_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error { - SetDefaults_ScheduledJob(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in *ScheduledJob, out *batch.ScheduledJob, s conversion.Scope) error { - return autoConvert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(in, out, s) -} - -func autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in *batch.ScheduledJob, out *ScheduledJob, s conversion.Scope) error { - return autoConvert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(in, out, s) -} - -func autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.ScheduledJob, len(*in)) - for i := range *in { - if err := Convert_v2alpha1_ScheduledJob_To_batch_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in *ScheduledJobList, out *batch.ScheduledJobList, s conversion.Scope) error { - return autoConvert_v2alpha1_ScheduledJobList_To_batch_ScheduledJobList(in, out, s) -} - -func autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ScheduledJob, len(*in)) - for i := range *in { - if err := Convert_batch_ScheduledJob_To_v2alpha1_ScheduledJob(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in *batch.ScheduledJobList, out *ScheduledJobList, s conversion.Scope) error { - return autoConvert_batch_ScheduledJobList_To_v2alpha1_ScheduledJobList(in, out, s) -} - -func autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error { - out.Schedule = in.Schedule - out.StartingDeadlineSeconds = in.StartingDeadlineSeconds - out.ConcurrencyPolicy = batch.ConcurrencyPolicy(in.ConcurrencyPolicy) - out.Suspend = in.Suspend - if err := Convert_v2alpha1_JobTemplateSpec_To_batch_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { - return err - } - return nil -} - -func Convert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in *ScheduledJobSpec, out *batch.ScheduledJobSpec, s conversion.Scope) error { - return autoConvert_v2alpha1_ScheduledJobSpec_To_batch_ScheduledJobSpec(in, out, s) -} - -func autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error { - out.Schedule = in.Schedule - out.StartingDeadlineSeconds = in.StartingDeadlineSeconds - out.ConcurrencyPolicy = ConcurrencyPolicy(in.ConcurrencyPolicy) - out.Suspend = in.Suspend - if err := Convert_batch_JobTemplateSpec_To_v2alpha1_JobTemplateSpec(&in.JobTemplate, &out.JobTemplate, s); err != nil { - return err - } - return nil -} - -func Convert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in *batch.ScheduledJobSpec, out *ScheduledJobSpec, s conversion.Scope) error { - return autoConvert_batch_ScheduledJobSpec_To_v2alpha1_ScheduledJobSpec(in, out, s) -} - -func autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error { - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]api.ObjectReference, len(*in)) - for i := range *in { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.Active = nil - } - out.LastScheduleTime = in.LastScheduleTime - return nil -} - -func Convert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in *ScheduledJobStatus, out *batch.ScheduledJobStatus, s conversion.Scope) error { - return autoConvert_v2alpha1_ScheduledJobStatus_To_batch_ScheduledJobStatus(in, out, s) -} - -func autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error { - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]v1.ObjectReference, len(*in)) - for i := range *in { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&(*in)[i], &(*out)[i], 0); err != nil { - return err - } - } - } else { - out.Active = nil - } - out.LastScheduleTime = in.LastScheduleTime - return nil -} - -func Convert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in *batch.ScheduledJobStatus, out *ScheduledJobStatus, s conversion.Scope) error { - return autoConvert_batch_ScheduledJobStatus_To_v2alpha1_ScheduledJobStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go deleted file mode 100644 index 92cb71ea0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/deep_copy_generated.go +++ /dev/null @@ -1,324 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v2alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v2alpha1_Job, - DeepCopy_v2alpha1_JobCondition, - DeepCopy_v2alpha1_JobList, - DeepCopy_v2alpha1_JobSpec, - DeepCopy_v2alpha1_JobStatus, - DeepCopy_v2alpha1_JobTemplate, - DeepCopy_v2alpha1_JobTemplateSpec, - DeepCopy_v2alpha1_LabelSelector, - DeepCopy_v2alpha1_LabelSelectorRequirement, - DeepCopy_v2alpha1_ScheduledJob, - DeepCopy_v2alpha1_ScheduledJobList, - DeepCopy_v2alpha1_ScheduledJobSpec, - DeepCopy_v2alpha1_ScheduledJobStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v2alpha1_Job(in Job, out *Job, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_JobStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v2alpha1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v2alpha1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Job, len(in)) - for i := range in { - if err := DeepCopy_v2alpha1_Job(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v2alpha1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { - if in.Parallelism != nil { - in, out := in.Parallelism, &out.Parallelism - *out = new(int32) - **out = *in - } else { - out.Parallelism = nil - } - if in.Completions != nil { - in, out := in.Completions, &out.Completions - *out = new(int32) - **out = *in - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.ActiveDeadlineSeconds = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(LabelSelector) - if err := DeepCopy_v2alpha1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.ManualSelector != nil { - in, out := in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = *in - } else { - out.ManualSelector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v2alpha1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]JobCondition, len(in)) - for i := range in { - if err := DeepCopy_v2alpha1_JobCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.StartTime != nil { - in, out := in.StartTime, &out.StartTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.CompletionTime != nil { - in, out := in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func DeepCopy_v2alpha1_JobTemplate(in JobTemplate, out *JobTemplate, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_JobTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v2alpha1_JobTemplateSpec(in JobTemplateSpec, out *JobTemplateSpec, c *conversion.Cloner) error { - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v2alpha1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { - if in.MatchLabels != nil { - in, out := in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - in, out := in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(in)) - for i := range in { - if err := DeepCopy_v2alpha1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func DeepCopy_v2alpha1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := in.Values, &out.Values - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Values = nil - } - return nil -} - -func DeepCopy_v2alpha1_ScheduledJob(in ScheduledJob, out *ScheduledJob, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_ScheduledJobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v2alpha1_ScheduledJobStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v2alpha1_ScheduledJobList(in ScheduledJobList, out *ScheduledJobList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ScheduledJob, len(in)) - for i := range in { - if err := DeepCopy_v2alpha1_ScheduledJob(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v2alpha1_ScheduledJobSpec(in ScheduledJobSpec, out *ScheduledJobSpec, c *conversion.Cloner) error { - out.Schedule = in.Schedule - if in.StartingDeadlineSeconds != nil { - in, out := in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.StartingDeadlineSeconds = nil - } - out.ConcurrencyPolicy = in.ConcurrencyPolicy - if in.Suspend != nil { - in, out := in.Suspend, &out.Suspend - *out = new(bool) - **out = *in - } else { - out.Suspend = nil - } - if err := DeepCopy_v2alpha1_JobTemplateSpec(in.JobTemplate, &out.JobTemplate, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v2alpha1_ScheduledJobStatus(in ScheduledJobStatus, out *ScheduledJobStatus, c *conversion.Cloner) error { - if in.Active != nil { - in, out := in.Active, &out.Active - *out = make([]v1.ObjectReference, len(in)) - for i := range in { - if err := v1.DeepCopy_v1_ObjectReference(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Active = nil - } - if in.LastScheduleTime != nil { - in, out := in.LastScheduleTime, &out.LastScheduleTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.LastScheduleTime = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go deleted file mode 100644 index 72da797c7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/defaults.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_Job, - SetDefaults_ScheduledJob, - ) -} - -func SetDefaults_Job(obj *Job) { - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } -} - -func SetDefaults_ScheduledJob(obj *ScheduledJob) { - if obj.Spec.ConcurrencyPolicy == "" { - obj.Spec.ConcurrencyPolicy = AllowConcurrent - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go deleted file mode 100644 index 0e6b67b58..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v2alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go deleted file mode 100644 index 17192c01c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.pb.go +++ /dev/null @@ -1,3018 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus - JobTemplate - JobTemplateSpec - LabelSelector - LabelSelectorRequirement - ScheduledJob - ScheduledJobList - ScheduledJobSpec - ScheduledJobStatus -*/ -package v2alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *Job) Reset() { *m = Job{} } -func (m *Job) String() string { return proto.CompactTextString(m) } -func (*Job) ProtoMessage() {} - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (m *JobCondition) String() string { return proto.CompactTextString(m) } -func (*JobCondition) ProtoMessage() {} - -func (m *JobList) Reset() { *m = JobList{} } -func (m *JobList) String() string { return proto.CompactTextString(m) } -func (*JobList) ProtoMessage() {} - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (m *JobSpec) String() string { return proto.CompactTextString(m) } -func (*JobSpec) ProtoMessage() {} - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (m *JobStatus) String() string { return proto.CompactTextString(m) } -func (*JobStatus) ProtoMessage() {} - -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (m *JobTemplate) String() string { return proto.CompactTextString(m) } -func (*JobTemplate) ProtoMessage() {} - -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (m *JobTemplateSpec) String() string { return proto.CompactTextString(m) } -func (*JobTemplateSpec) ProtoMessage() {} - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (m *LabelSelector) String() string { return proto.CompactTextString(m) } -func (*LabelSelector) ProtoMessage() {} - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*LabelSelectorRequirement) ProtoMessage() {} - -func (m *ScheduledJob) Reset() { *m = ScheduledJob{} } -func (m *ScheduledJob) String() string { return proto.CompactTextString(m) } -func (*ScheduledJob) ProtoMessage() {} - -func (m *ScheduledJobList) Reset() { *m = ScheduledJobList{} } -func (m *ScheduledJobList) String() string { return proto.CompactTextString(m) } -func (*ScheduledJobList) ProtoMessage() {} - -func (m *ScheduledJobSpec) Reset() { *m = ScheduledJobSpec{} } -func (m *ScheduledJobSpec) String() string { return proto.CompactTextString(m) } -func (*ScheduledJobSpec) ProtoMessage() {} - -func (m *ScheduledJobStatus) Reset() { *m = ScheduledJobStatus{} } -func (m *ScheduledJobStatus) String() string { return proto.CompactTextString(m) } -func (*ScheduledJobStatus) ProtoMessage() {} - -func init() { - proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplate") - proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.JobTemplateSpec") - proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.LabelSelectorRequirement") - proto.RegisterType((*ScheduledJob)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJob") - proto.RegisterType((*ScheduledJobList)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobList") - proto.RegisterType((*ScheduledJobSpec)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobSpec") - proto.RegisterType((*ScheduledJobStatus)(nil), "k8s.io.kubernetes.pkg.apis.batch.v2alpha1.ScheduledJobStatus") -} -func (m *Job) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Job) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *JobCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *JobList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *JobSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Parallelism != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.ManualSelector != nil { - data[i] = 0x28 - i++ - if *m.ManualSelector { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *JobStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.StartTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.CompletionTime != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - } - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Active)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Failed)) - return i, nil -} - -func (m *JobTemplate) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobTemplate) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n12, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - return i, nil -} - -func (m *JobTemplateSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobTemplateSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *LabelSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k := range m.MatchLabels { - data[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ScheduledJob) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScheduledJob) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n16, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n17, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n17 - return i, nil -} - -func (m *ScheduledJobList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScheduledJobList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n18 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ScheduledJobSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScheduledJobSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Schedule))) - i += copy(data[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.StartingDeadlineSeconds)) - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(data[i:], m.ConcurrencyPolicy) - if m.Suspend != nil { - data[i] = 0x20 - i++ - if *m.Suspend { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(m.JobTemplate.Size())) - n19, err := m.JobTemplate.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n19 - return i, nil -} - -func (m *ScheduledJobStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScheduledJobStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LastScheduleTime != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScheduleTime.Size())) - n20, err := m.LastScheduleTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n20 - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Job) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JobSpec) Size() (n int) { - var l int - _ = l - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) - } - if m.Completions != nil { - n += 1 + sovGenerated(uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ManualSelector != nil { - n += 2 - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CompletionTime != nil { - l = m.CompletionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Active)) - n += 1 + sovGenerated(uint64(m.Succeeded)) - n += 1 + sovGenerated(uint64(m.Failed)) - return n -} - -func (m *JobTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ScheduledJob) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScheduledJobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ScheduledJobSpec) Size() (n int) { - var l int - _ = l - l = len(m.Schedule) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartingDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) - } - l = len(m.ConcurrencyPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.Suspend != nil { - n += 2 - } - l = m.JobTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScheduledJobStatus) Size() (n int) { - var l int - _ = l - if len(m.Active) > 0 { - for _, e := range m.Active { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LastScheduleTime != nil { - l = m.LastScheduleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Job) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JobConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Completions = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ManualSelector = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - m.Failed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobTemplate) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobTemplateSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - m.MatchLabels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScheduledJob) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScheduledJob: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScheduledJob: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScheduledJobList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScheduledJobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScheduledJobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ScheduledJob{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScheduledJobSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScheduledJobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScheduledJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schedule = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StartingDeadlineSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConcurrencyPolicy = ConcurrencyPolicy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Suspend = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.JobTemplate.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScheduledJobStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScheduledJobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScheduledJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Active = append(m.Active, k8s_io_kubernetes_pkg_api_v1.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastScheduleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto deleted file mode 100644 index 5098f5166..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/generated.proto +++ /dev/null @@ -1,254 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.batch.v2alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v2alpha1"; - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 4; - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md - optional bool manualSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - optional int32 failed = 6; -} - -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Template defines jobs that will be created from this template - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobTemplateSpec template = 2; -} - -// JobTemplateSpec describes the data a Job should have when created from a template -message JobTemplateSpec { - // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map<string, string> matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ScheduledJob represents the configuration of a single scheduled job. -message ScheduledJob { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ScheduledJobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ScheduledJobStatus status = 3; -} - -// ScheduledJobList is a collection of scheduled jobs. -message ScheduledJobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ScheduledJob. - repeated ScheduledJob items = 2; -} - -// ScheduledJobSpec describes how the job execution will look like and when it will actually run. -message ScheduledJobSpec { - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - optional string schedule = 1; - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - optional int64 startingDeadlineSeconds = 2; - - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. - optional string concurrencyPolicy = 3; - - // Suspend flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - optional bool suspend = 4; - - // JobTemplate is the object that describes the job that will be created when - // executing a ScheduledJob. - optional JobTemplateSpec jobTemplate = 5; -} - -// ScheduledJobStatus represents the current state of a Job. -message ScheduledJobStatus { - // Active holds pointers to currently running jobs. - repeated k8s.io.kubernetes.pkg.api.v1.ObjectReference active = 1; - - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScheduleTime = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go deleted file mode 100644 index 00142f018..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v2alpha1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, - &JobTemplate{}, - &ScheduledJob{}, - &ScheduledJobList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - ) - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go deleted file mode 100644 index 341d1d217..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.generated.go +++ /dev/null @@ -1,5310 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v2alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceJob((*[]Job)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceJob((*[]Job)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplate) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Template - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplate) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplate) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv5 := &x.Template - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplate) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = JobTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobTemplateSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobTemplateSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobTemplateSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobTemplateSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv7 := &x.ObjectMeta - yyv7.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv8 := &x.Spec - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Parallelism != nil - yyq2[1] = x.Completions != nil - yyq2[2] = x.ActiveDeadlineSeconds != nil - yyq2[3] = x.Selector != nil - yyq2[4] = x.ManualSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy4 := *x.Parallelism - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy6 := *x.Parallelism - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy9 := *x.Completions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy11 := *x.Completions - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy14 := *x.ActiveDeadlineSeconds - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(yy14)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.ActiveDeadlineSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy22 := *x.ManualSelector - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manualSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ManualSelector == nil { - r.EncodeNil() - } else { - yy24 := *x.ManualSelector - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy27 := &x.Template - yy27.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy29 := &x.Template - yy29.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - case "manualSelector": - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv13 := &x.Template - yyv13.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ManualSelector != nil { - x.ManualSelector = nil - } - } else { - if x.ManualSelector == nil { - x.ManualSelector = new(bool) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*bool)(x.ManualSelector)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv24 := &x.Template - yyv24.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = x.StartTime != nil - yyq2[2] = x.CompletionTime != nil - yyq2[3] = x.Active != 0 - yyq2[4] = x.Succeeded != 0 - yyq2[5] = x.Failed != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym7 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym8 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym10 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym11 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv4), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym9 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv14), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym17 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym19 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScheduledJob) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJob) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJob) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScheduledJobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScheduledJobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJob) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScheduledJobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScheduledJobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScheduledJobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceScheduledJob(([]ScheduledJob)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceScheduledJob((*[]ScheduledJob)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceScheduledJob((*[]ScheduledJob)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScheduledJobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.StartingDeadlineSeconds != nil - yyq2[2] = x.ConcurrencyPolicy != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Schedule)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy7 := *x.StartingDeadlineSeconds - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(yy7)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startingDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartingDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy9 := *x.StartingDeadlineSeconds - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrencyPolicy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.ConcurrencyPolicy.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Suspend == nil { - r.EncodeNil() - } else { - yy15 := *x.Suspend - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(yy15)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("suspend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Suspend == nil { - r.EncodeNil() - } else { - yy17 := *x.Suspend - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeBool(bool(yy17)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy20 := &x.JobTemplate - yy20.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("jobTemplate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy22 := &x.JobTemplate - yy22.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "schedule": - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - x.Schedule = string(r.DecodeString()) - } - case "startingDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "concurrencyPolicy": - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) - } - case "suspend": - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - case "jobTemplate": - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv10 := &x.JobTemplate - yyv10.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Schedule = "" - } else { - x.Schedule = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartingDeadlineSeconds != nil { - x.StartingDeadlineSeconds = nil - } - } else { - if x.StartingDeadlineSeconds == nil { - x.StartingDeadlineSeconds = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.StartingDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrencyPolicy = "" - } else { - x.ConcurrencyPolicy = ConcurrencyPolicy(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Suspend != nil { - x.Suspend = nil - } - } else { - if x.Suspend == nil { - x.Suspend = new(bool) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*bool)(x.Suspend)) = r.DecodeBool() - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.JobTemplate = JobTemplateSpec{} - } else { - yyv18 := &x.JobTemplate - yyv18.CodecDecodeSelf(d) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ConcurrencyPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ConcurrencyPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ScheduledJobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Active) != 0 - yyq2[1] = x.LastScheduleTime != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Active == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Active == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSlicev1_ObjectReference(([]pkg2_v1.ObjectReference)(x.Active), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym7 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScheduleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScheduleTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScheduleTime) { - } else if yym8 { - z.EncBinaryMarshal(x.LastScheduleTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScheduleTime) - } else { - z.EncFallback(x.LastScheduleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScheduledJobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScheduledJobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "active": - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv4 := &x.Active - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv4), d) - } - } - case "lastScheduleTime": - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScheduledJobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = nil - } else { - yyv9 := &x.Active - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicev1_ObjectReference((*[]pkg2_v1.ObjectReference)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScheduleTime != nil { - x.LastScheduleTime = nil - } - } else { - if x.LastScheduleTime == nil { - x.LastScheduleTime = new(pkg1_unversioned.Time) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScheduleTime) { - } else if yym12 { - z.DecBinaryUnmarshal(x.LastScheduleTime) - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScheduleTime) - } else { - z.DecFallback(x.LastScheduleTime, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.MatchLabels) != 0 - yyq2[1] = len(x.MatchExpressions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MatchLabels == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncMapStringStringV(x.MatchLabels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchLabels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchLabels == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncMapStringStringV(x.MatchLabels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "matchLabels": - if r.TryDecodeAsNil() { - x.MatchLabels = nil - } else { - yyv4 := &x.MatchLabels - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecMapStringStringX(yyv4, false, d) - } - } - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv6 := &x.MatchExpressions - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchLabels = nil - } else { - yyv9 := &x.MatchLabels - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecMapStringStringX(yyv9, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv11 := &x.MatchExpressions - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Values) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = LabelSelectorOperator(r.DecodeString()) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv6 := &x.Values - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = LabelSelectorOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv11 := &x.Values - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Job, yyrl1) - } - } else { - yyv1 = make([]Job, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]JobCondition, yyrl1) - } - } else { - yyv1 = make([]JobCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceScheduledJob(v []ScheduledJob, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceScheduledJob(v *[]ScheduledJob, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ScheduledJob{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 1024) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ScheduledJob, yyrl1) - } - } else { - yyv1 = make([]ScheduledJob, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ScheduledJob{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ScheduledJob{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ScheduledJob{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ScheduledJob{}) // var yyz1 ScheduledJob - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ScheduledJob{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ScheduledJob{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicev1_ObjectReference(v []pkg2_v1.ObjectReference, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_ObjectReference(v *[]pkg2_v1.ObjectReference, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_v1.ObjectReference{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_v1.ObjectReference, yyrl1) - } - } else { - yyv1 = make([]pkg2_v1.ObjectReference, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.ObjectReference{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, pkg2_v1.ObjectReference{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.ObjectReference{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, pkg2_v1.ObjectReference{}) // var yyz1 pkg2_v1.ObjectReference - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = pkg2_v1.ObjectReference{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_v1.ObjectReference{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) - } - } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LabelSelectorRequirement{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go deleted file mode 100644 index 568f51712..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types.go +++ /dev/null @@ -1,283 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" -) - -// Job represents the configuration of a single job. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// JobList is a collection of jobs. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Job. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Template defines jobs that will be created from this template - // http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - -// JobTemplateSpec describes the data a Job should have when created from a template -type JobTemplateSpec struct { - // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // ManualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md - ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // Active is the number of actively running pods. - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // Failed is the number of pods which reached Phase Failed. - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// ScheduledJob represents the configuration of a single scheduled job. -type ScheduledJob struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec ScheduledJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status ScheduledJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ScheduledJobList is a collection of scheduled jobs. -type ScheduledJobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ScheduledJob. - Items []ScheduledJob `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ScheduledJobSpec describes how the job execution will look like and when it will actually run. -type ScheduledJobSpec struct { - - // Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` - - // ConcurrencyPolicy specifies how to treat concurrent executions of a Job. - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` - - // Suspend flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - Suspend *bool `json:"suspend" protobuf:"varint,4,opt,name=suspend"` - - // JobTemplate is the object that describes the job that will be created when - // executing a ScheduledJob. - JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` -} - -// ConcurrencyPolicy describes how the job will be handled. -// Only one of the following concurrent policies may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -type ConcurrencyPolicy string - -const ( - // AllowConcurrent allows ScheduledJobs to run concurrently. - AllowConcurrent ConcurrencyPolicy = "Allow" - - // ForbidConcurrent forbids concurrent runs, skipping next run if previous - // hasn't finished yet. - ForbidConcurrent ConcurrencyPolicy = "Forbid" - - // ReplaceConcurrent cancels currently running job and replaces it with a new one. - ReplaceConcurrent ConcurrencyPolicy = "Replace" -) - -// ScheduledJobStatus represents the current state of a Job. -type ScheduledJobStatus struct { - // Active holds pointers to currently running jobs. - Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` - - // LastScheduleTime keeps information of when was the last time the job was successfully scheduled. - LastScheduleTime *unversioned.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A label selector operator is the set of operators that can be used in a selector requirement. -type LabelSelectorOperator string - -const ( - LabelSelectorOpIn LabelSelectorOperator = "In" - LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" - LabelSelectorOpExists LabelSelectorOperator = "Exists" - LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 95b86d0de..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Job.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "Active is the number of actively running pods.", - "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", - "failed": "Failed is the number of pods which reached Phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "template": "Template defines jobs that will be created from this template http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - -var map_JobTemplateSpec = map[string]string{ - "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (JobTemplateSpec) SwaggerDoc() map[string]string { - return map_JobTemplateSpec -} - -var map_LabelSelector = map[string]string{ - "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", -} - -func (LabelSelector) SwaggerDoc() map[string]string { - return map_LabelSelector -} - -var map_LabelSelectorRequirement = map[string]string{ - "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", - "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", -} - -func (LabelSelectorRequirement) SwaggerDoc() map[string]string { - return map_LabelSelectorRequirement -} - -var map_ScheduledJob = map[string]string{ - "": "ScheduledJob represents the configuration of a single scheduled job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (ScheduledJob) SwaggerDoc() map[string]string { - return map_ScheduledJob -} - -var map_ScheduledJobList = map[string]string{ - "": "ScheduledJobList is a collection of scheduled jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of ScheduledJob.", -} - -func (ScheduledJobList) SwaggerDoc() map[string]string { - return map_ScheduledJobList -} - -var map_ScheduledJobSpec = map[string]string{ - "": "ScheduledJobSpec describes how the job execution will look like and when it will actually run.", - "schedule": "Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "ConcurrencyPolicy specifies how to treat concurrent executions of a Job.", - "suspend": "Suspend flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", - "jobTemplate": "JobTemplate is the object that describes the job that will be created when executing a ScheduledJob.", -} - -func (ScheduledJobSpec) SwaggerDoc() map[string]string { - return map_ScheduledJobSpec -} - -var map_ScheduledJobStatus = map[string]string{ - "": "ScheduledJobStatus represents the current state of a Job.", - "active": "Active holds pointers to currently running jobs.", - "lastScheduleTime": "LastScheduleTime keeps information of when was the last time the job was successfully scheduled.", -} - -func (ScheduledJobStatus) SwaggerDoc() map[string]string { - return map_ScheduledJobStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go deleted file mode 100644 index f80c22e9b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/deep_copy_generated.go +++ /dev/null @@ -1,374 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package componentconfig - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_componentconfig_IPVar, - DeepCopy_componentconfig_KubeControllerManagerConfiguration, - DeepCopy_componentconfig_KubeProxyConfiguration, - DeepCopy_componentconfig_KubeSchedulerConfiguration, - DeepCopy_componentconfig_KubeletConfiguration, - DeepCopy_componentconfig_LeaderElectionConfiguration, - DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration, - DeepCopy_componentconfig_PortRangeVar, - DeepCopy_componentconfig_VolumeConfiguration, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_componentconfig_IPVar(in IPVar, out *IPVar, c *conversion.Cloner) error { - if in.Val != nil { - in, out := in.Val, &out.Val - *out = new(string) - **out = *in - } else { - out.Val = nil - } - return nil -} - -func DeepCopy_componentconfig_KubeControllerManagerConfiguration(in KubeControllerManagerConfiguration, out *KubeControllerManagerConfiguration, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Port = in.Port - out.Address = in.Address - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.ConcurrentEndpointSyncs = in.ConcurrentEndpointSyncs - out.ConcurrentRSSyncs = in.ConcurrentRSSyncs - out.ConcurrentRCSyncs = in.ConcurrentRCSyncs - out.ConcurrentResourceQuotaSyncs = in.ConcurrentResourceQuotaSyncs - out.ConcurrentDeploymentSyncs = in.ConcurrentDeploymentSyncs - out.ConcurrentDaemonSetSyncs = in.ConcurrentDaemonSetSyncs - out.ConcurrentJobSyncs = in.ConcurrentJobSyncs - out.ConcurrentNamespaceSyncs = in.ConcurrentNamespaceSyncs - out.LookupCacheSizeForRC = in.LookupCacheSizeForRC - out.LookupCacheSizeForRS = in.LookupCacheSizeForRS - out.LookupCacheSizeForDaemonSet = in.LookupCacheSizeForDaemonSet - if err := unversioned.DeepCopy_unversioned_Duration(in.ServiceSyncPeriod, &out.ServiceSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.NodeSyncPeriod, &out.NodeSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.ResourceQuotaSyncPeriod, &out.ResourceQuotaSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.NamespaceSyncPeriod, &out.NamespaceSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.PVClaimBinderSyncPeriod, &out.PVClaimBinderSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.MinResyncPeriod, &out.MinResyncPeriod, c); err != nil { - return err - } - out.TerminatedPodGCThreshold = in.TerminatedPodGCThreshold - if err := unversioned.DeepCopy_unversioned_Duration(in.HorizontalPodAutoscalerSyncPeriod, &out.HorizontalPodAutoscalerSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.DeploymentControllerSyncPeriod, &out.DeploymentControllerSyncPeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.PodEvictionTimeout, &out.PodEvictionTimeout, c); err != nil { - return err - } - out.DeletingPodsQps = in.DeletingPodsQps - out.DeletingPodsBurst = in.DeletingPodsBurst - if err := unversioned.DeepCopy_unversioned_Duration(in.NodeMonitorGracePeriod, &out.NodeMonitorGracePeriod, c); err != nil { - return err - } - out.RegisterRetryCount = in.RegisterRetryCount - if err := unversioned.DeepCopy_unversioned_Duration(in.NodeStartupGracePeriod, &out.NodeStartupGracePeriod, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.NodeMonitorPeriod, &out.NodeMonitorPeriod, c); err != nil { - return err - } - out.ServiceAccountKeyFile = in.ServiceAccountKeyFile - out.EnableProfiling = in.EnableProfiling - out.ClusterName = in.ClusterName - out.ClusterCIDR = in.ClusterCIDR - out.ServiceCIDR = in.ServiceCIDR - out.NodeCIDRMaskSize = in.NodeCIDRMaskSize - out.AllocateNodeCIDRs = in.AllocateNodeCIDRs - out.ConfigureCloudRoutes = in.ConfigureCloudRoutes - out.RootCAFile = in.RootCAFile - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { - return err - } - if err := DeepCopy_componentconfig_VolumeConfiguration(in.VolumeConfiguration, &out.VolumeConfiguration, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.ControllerStartInterval, &out.ControllerStartInterval, c); err != nil { - return err - } - out.EnableGarbageCollector = in.EnableGarbageCollector - return nil -} - -func DeepCopy_componentconfig_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - if in.IPTablesMasqueradeBit != nil { - in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit - *out = new(int32) - **out = *in - } else { - out.IPTablesMasqueradeBit = nil - } - if err := unversioned.DeepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { - return err - } - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - if in.OOMScoreAdj != nil { - in, out := in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = *in - } else { - out.OOMScoreAdj = nil - } - out.Mode = in.Mode - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - if err := unversioned.DeepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { - return err - } - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - if err := unversioned.DeepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { - return err - } - return nil -} - -func DeepCopy_componentconfig_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Port = in.Port - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - out.EnableProfiling = in.EnableProfiling - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := DeepCopy_componentconfig_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { - return err - } - return nil -} - -func DeepCopy_componentconfig_KubeletConfiguration(in KubeletConfiguration, out *KubeletConfiguration, c *conversion.Cloner) error { - out.Config = in.Config - if err := unversioned.DeepCopy_unversioned_Duration(in.SyncFrequency, &out.SyncFrequency, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.FileCheckFrequency, &out.FileCheckFrequency, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.HTTPCheckFrequency, &out.HTTPCheckFrequency, c); err != nil { - return err - } - out.ManifestURL = in.ManifestURL - out.ManifestURLHeader = in.ManifestURLHeader - out.EnableServer = in.EnableServer - out.Address = in.Address - out.Port = in.Port - out.ReadOnlyPort = in.ReadOnlyPort - out.TLSCertFile = in.TLSCertFile - out.TLSPrivateKeyFile = in.TLSPrivateKeyFile - out.CertDirectory = in.CertDirectory - out.HostnameOverride = in.HostnameOverride - out.PodInfraContainerImage = in.PodInfraContainerImage - out.DockerEndpoint = in.DockerEndpoint - out.RootDirectory = in.RootDirectory - out.SeccompProfileRoot = in.SeccompProfileRoot - out.AllowPrivileged = in.AllowPrivileged - out.HostNetworkSources = in.HostNetworkSources - out.HostPIDSources = in.HostPIDSources - out.HostIPCSources = in.HostIPCSources - out.RegistryPullQPS = in.RegistryPullQPS - out.RegistryBurst = in.RegistryBurst - out.EventRecordQPS = in.EventRecordQPS - out.EventBurst = in.EventBurst - out.EnableDebuggingHandlers = in.EnableDebuggingHandlers - if err := unversioned.DeepCopy_unversioned_Duration(in.MinimumGCAge, &out.MinimumGCAge, c); err != nil { - return err - } - out.MaxPerPodContainerCount = in.MaxPerPodContainerCount - out.MaxContainerCount = in.MaxContainerCount - out.CAdvisorPort = in.CAdvisorPort - out.HealthzPort = in.HealthzPort - out.HealthzBindAddress = in.HealthzBindAddress - out.OOMScoreAdj = in.OOMScoreAdj - out.RegisterNode = in.RegisterNode - out.ClusterDomain = in.ClusterDomain - out.MasterServiceNamespace = in.MasterServiceNamespace - out.ClusterDNS = in.ClusterDNS - if err := unversioned.DeepCopy_unversioned_Duration(in.StreamingConnectionIdleTimeout, &out.StreamingConnectionIdleTimeout, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.NodeStatusUpdateFrequency, &out.NodeStatusUpdateFrequency, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.ImageMinimumGCAge, &out.ImageMinimumGCAge, c); err != nil { - return err - } - out.ImageGCHighThresholdPercent = in.ImageGCHighThresholdPercent - out.ImageGCLowThresholdPercent = in.ImageGCLowThresholdPercent - out.LowDiskSpaceThresholdMB = in.LowDiskSpaceThresholdMB - if err := unversioned.DeepCopy_unversioned_Duration(in.VolumeStatsAggPeriod, &out.VolumeStatsAggPeriod, c); err != nil { - return err - } - out.NetworkPluginName = in.NetworkPluginName - out.NetworkPluginDir = in.NetworkPluginDir - out.VolumePluginDir = in.VolumePluginDir - out.CloudProvider = in.CloudProvider - out.CloudConfigFile = in.CloudConfigFile - out.KubeletCgroups = in.KubeletCgroups - out.RuntimeCgroups = in.RuntimeCgroups - out.SystemCgroups = in.SystemCgroups - out.CgroupRoot = in.CgroupRoot - out.ContainerRuntime = in.ContainerRuntime - if err := unversioned.DeepCopy_unversioned_Duration(in.RuntimeRequestTimeout, &out.RuntimeRequestTimeout, c); err != nil { - return err - } - out.RktPath = in.RktPath - out.RktAPIEndpoint = in.RktAPIEndpoint - out.RktStage1Image = in.RktStage1Image - out.LockFilePath = in.LockFilePath - out.ExitOnLockContention = in.ExitOnLockContention - out.ConfigureCBR0 = in.ConfigureCBR0 - out.HairpinMode = in.HairpinMode - out.BabysitDaemons = in.BabysitDaemons - out.MaxPods = in.MaxPods - out.NvidiaGPUs = in.NvidiaGPUs - out.DockerExecHandlerName = in.DockerExecHandlerName - out.PodCIDR = in.PodCIDR - out.ResolverConfig = in.ResolverConfig - out.CPUCFSQuota = in.CPUCFSQuota - out.Containerized = in.Containerized - out.MaxOpenFiles = in.MaxOpenFiles - out.ReconcileCIDR = in.ReconcileCIDR - out.RegisterSchedulable = in.RegisterSchedulable - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SerializeImagePulls = in.SerializeImagePulls - out.ExperimentalFlannelOverlay = in.ExperimentalFlannelOverlay - if err := unversioned.DeepCopy_unversioned_Duration(in.OutOfDiskTransitionFrequency, &out.OutOfDiskTransitionFrequency, c); err != nil { - return err - } - out.NodeIP = in.NodeIP - if in.NodeLabels != nil { - in, out := in.NodeLabels, &out.NodeLabels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.NodeLabels = nil - } - out.NonMasqueradeCIDR = in.NonMasqueradeCIDR - out.EnableCustomMetrics = in.EnableCustomMetrics - out.EvictionHard = in.EvictionHard - out.EvictionSoft = in.EvictionSoft - out.EvictionSoftGracePeriod = in.EvictionSoftGracePeriod - if err := unversioned.DeepCopy_unversioned_Duration(in.EvictionPressureTransitionPeriod, &out.EvictionPressureTransitionPeriod, c); err != nil { - return err - } - out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod - out.PodsPerCore = in.PodsPerCore - out.EnableControllerAttachDetach = in.EnableControllerAttachDetach - return nil -} - -func DeepCopy_componentconfig_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { - out.LeaderElect = in.LeaderElect - if err := unversioned.DeepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { - return err - } - return nil -} - -func DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in PersistentVolumeRecyclerConfiguration, out *PersistentVolumeRecyclerConfiguration, c *conversion.Cloner) error { - out.MaximumRetry = in.MaximumRetry - out.MinimumTimeoutNFS = in.MinimumTimeoutNFS - out.PodTemplateFilePathNFS = in.PodTemplateFilePathNFS - out.IncrementTimeoutNFS = in.IncrementTimeoutNFS - out.PodTemplateFilePathHostPath = in.PodTemplateFilePathHostPath - out.MinimumTimeoutHostPath = in.MinimumTimeoutHostPath - out.IncrementTimeoutHostPath = in.IncrementTimeoutHostPath - return nil -} - -func DeepCopy_componentconfig_PortRangeVar(in PortRangeVar, out *PortRangeVar, c *conversion.Cloner) error { - if in.Val != nil { - in, out := in.Val, &out.Val - *out = new(string) - **out = *in - } else { - out.Val = nil - } - return nil -} - -func DeepCopy_componentconfig_VolumeConfiguration(in VolumeConfiguration, out *VolumeConfiguration, c *conversion.Cloner) error { - out.EnableHostPathProvisioning = in.EnableHostPathProvisioning - out.EnableDynamicProvisioning = in.EnableDynamicProvisioning - if err := DeepCopy_componentconfig_PersistentVolumeRecyclerConfiguration(in.PersistentVolumeRecyclerConfiguration, &out.PersistentVolumeRecyclerConfiguration, c); err != nil { - return err - } - out.FlexVolumePluginDir = in.FlexVolumePluginDir - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go deleted file mode 100644 index edd9c7975..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/helpers.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package componentconfig - -import ( - "fmt" - "net" - - utilnet "k8s.io/kubernetes/pkg/util/net" -) - -// used for validating command line opts -// TODO(mikedanese): remove these when we remove command line flags - -type IPVar struct { - Val *string -} - -func (v IPVar) Set(s string) error { - if net.ParseIP(s) == nil { - return fmt.Errorf("%q is not a valid IP address", s) - } - if v.Val == nil { - // it's okay to panic here since this is programmer error - panic("the string pointer passed into IPVar should not be nil") - } - *v.Val = s - return nil -} - -func (v IPVar) String() string { - if v.Val == nil { - return "" - } - return *v.Val -} - -func (v IPVar) Type() string { - return "ip" -} - -func (m *ProxyMode) Set(s string) error { - *m = ProxyMode(s) - return nil -} - -func (m *ProxyMode) String() string { - if m != nil { - return string(*m) - } - return "" -} - -func (m *ProxyMode) Type() string { - return "ProxyMode" -} - -type PortRangeVar struct { - Val *string -} - -func (v PortRangeVar) Set(s string) error { - if _, err := utilnet.ParsePortRange(s); err != nil { - return fmt.Errorf("%q is not a valid port range: %v", s, err) - } - if v.Val == nil { - // it's okay to panic here since this is programmer error - panic("the string pointer passed into PortRangeVar should not be nil") - } - *v.Val = s - return nil -} - -func (v PortRangeVar) String() string { - if v.Val == nil { - return "" - } - return *v.Val -} - -func (v PortRangeVar) Type() string { - return "port-range" -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go deleted file mode 100644 index ec5554273..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/install/install.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/componentconfig" - "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/componentconfig" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", componentconfig.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1alpha1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(componentconfig.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - componentconfig.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1alpha1.SchemeGroupVersion: - v1alpha1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go deleted file mode 100644 index 0666c5431..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package componentconfig - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) -} - -// GroupName is the group name use in this package -const GroupName = "componentconfig" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func addKnownTypes(scheme *runtime.Scheme) { - // TODO this will get cleaned up with the scheme types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, - &KubeSchedulerConfiguration{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go deleted file mode 100644 index 029f24fd5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go +++ /dev/null @@ -1,9703 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package componentconfig - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_unversioned.TypeMeta - var v1 time.Duration - _, _ = v0, v1 - } -} - -func (x *KubeProxyConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [20]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[18] = x.Kind != "" - yyq2[19] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(20) - } else { - yynn2 = 18 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BindAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("bindAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.BindAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.IPTablesMasqueradeBit == nil { - r.EncodeNil() - } else { - yy19 := *x.IPTablesMasqueradeBit - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(yy19)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesMasqueradeBit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.IPTablesMasqueradeBit == nil { - r.EncodeNil() - } else { - yy21 := *x.IPTablesMasqueradeBit - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(yy21)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy24 := &x.IPTablesSyncPeriod - yym25 := z.EncBinary() - _ = yym25 - if false { - } else if z.HasExtensions() && z.EncExt(yy24) { - } else if !yym25 && z.IsJSONHandle() { - z.EncJSONMarshal(yy24) - } else { - z.EncFallback(yy24) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("iptablesSyncPeriodSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy26 := &x.IPTablesSyncPeriod - yym27 := z.EncBinary() - _ = yym27 - if false { - } else if z.HasExtensions() && z.EncExt(yy26) { - } else if !yym27 && z.IsJSONHandle() { - z.EncJSONMarshal(yy26) - } else { - z.EncFallback(yy26) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeconfigPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeconfigPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeBool(bool(x.MasqueradeAll)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("masqueradeAll")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym33 := z.EncBinary() - _ = yym33 - if false { - } else { - r.EncodeBool(bool(x.MasqueradeAll)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Master)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("master")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym36 := z.EncBinary() - _ = yym36 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Master)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.OOMScoreAdj == nil { - r.EncodeNil() - } else { - yy38 := *x.OOMScoreAdj - yym39 := z.EncBinary() - _ = yym39 - if false { - } else { - r.EncodeInt(int64(yy38)) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.OOMScoreAdj == nil { - r.EncodeNil() - } else { - yy40 := *x.OOMScoreAdj - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeInt(int64(yy40)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Mode.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("mode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Mode.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PortRange)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("portRange")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PortRange)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym49 := z.EncBinary() - _ = yym49 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceContainer)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy52 := &x.UDPIdleTimeout - yym53 := z.EncBinary() - _ = yym53 - if false { - } else if z.HasExtensions() && z.EncExt(yy52) { - } else if !yym53 && z.IsJSONHandle() { - z.EncJSONMarshal(yy52) - } else { - z.EncFallback(yy52) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("udpTimeoutMilliseconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy54 := &x.UDPIdleTimeout - yym55 := z.EncBinary() - _ = yym55 - if false { - } else if z.HasExtensions() && z.EncExt(yy54) { - } else if !yym55 && z.IsJSONHandle() { - z.EncJSONMarshal(yy54) - } else { - z.EncFallback(yy54) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym57 := z.EncBinary() - _ = yym57 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMax)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackMax")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMax)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym60 := z.EncBinary() - _ = yym60 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMaxPerCore)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackMaxPerCore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym61 := z.EncBinary() - _ = yym61 - if false { - } else { - r.EncodeInt(int64(x.ConntrackMaxPerCore)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy63 := &x.ConntrackTCPEstablishedTimeout - yym64 := z.EncBinary() - _ = yym64 - if false { - } else if z.HasExtensions() && z.EncExt(yy63) { - } else if !yym64 && z.IsJSONHandle() { - z.EncJSONMarshal(yy63) - } else { - z.EncFallback(yy63) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conntrackTCPEstablishedTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy65 := &x.ConntrackTCPEstablishedTimeout - yym66 := z.EncBinary() - _ = yym66 - if false { - } else if z.HasExtensions() && z.EncExt(yy65) { - } else if !yym66 && z.IsJSONHandle() { - z.EncJSONMarshal(yy65) - } else { - z.EncFallback(yy65) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[18] { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[18] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym69 := z.EncBinary() - _ = yym69 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[19] { - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[19] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym72 := z.EncBinary() - _ = yym72 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeProxyConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeProxyConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "bindAddress": - if r.TryDecodeAsNil() { - x.BindAddress = "" - } else { - x.BindAddress = string(r.DecodeString()) - } - case "clusterCIDR": - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - case "healthzBindAddress": - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - case "healthzPort": - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - case "hostnameOverride": - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - case "iptablesMasqueradeBit": - if r.TryDecodeAsNil() { - if x.IPTablesMasqueradeBit != nil { - x.IPTablesMasqueradeBit = nil - } - } else { - if x.IPTablesMasqueradeBit == nil { - x.IPTablesMasqueradeBit = new(int32) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32)) - } - } - case "iptablesSyncPeriodSeconds": - if r.TryDecodeAsNil() { - x.IPTablesSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv11 := &x.IPTablesSyncPeriod - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - case "kubeconfigPath": - if r.TryDecodeAsNil() { - x.KubeconfigPath = "" - } else { - x.KubeconfigPath = string(r.DecodeString()) - } - case "masqueradeAll": - if r.TryDecodeAsNil() { - x.MasqueradeAll = false - } else { - x.MasqueradeAll = bool(r.DecodeBool()) - } - case "master": - if r.TryDecodeAsNil() { - x.Master = "" - } else { - x.Master = string(r.DecodeString()) - } - case "oomScoreAdj": - if r.TryDecodeAsNil() { - if x.OOMScoreAdj != nil { - x.OOMScoreAdj = nil - } - } else { - if x.OOMScoreAdj == nil { - x.OOMScoreAdj = new(int32) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else { - *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32)) - } - } - case "mode": - if r.TryDecodeAsNil() { - x.Mode = "" - } else { - x.Mode = ProxyMode(r.DecodeString()) - } - case "portRange": - if r.TryDecodeAsNil() { - x.PortRange = "" - } else { - x.PortRange = string(r.DecodeString()) - } - case "kubeletCgroups": - if r.TryDecodeAsNil() { - x.ResourceContainer = "" - } else { - x.ResourceContainer = string(r.DecodeString()) - } - case "udpTimeoutMilliseconds": - if r.TryDecodeAsNil() { - x.UDPIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv21 := &x.UDPIdleTimeout - yym22 := z.DecBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.DecExt(yyv21) { - } else if !yym22 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv21) - } else { - z.DecFallback(yyv21, false) - } - } - case "conntrackMax": - if r.TryDecodeAsNil() { - x.ConntrackMax = 0 - } else { - x.ConntrackMax = int32(r.DecodeInt(32)) - } - case "conntrackMaxPerCore": - if r.TryDecodeAsNil() { - x.ConntrackMaxPerCore = 0 - } else { - x.ConntrackMaxPerCore = int32(r.DecodeInt(32)) - } - case "conntrackTCPEstablishedTimeout": - if r.TryDecodeAsNil() { - x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{} - } else { - yyv25 := &x.ConntrackTCPEstablishedTimeout - yym26 := z.DecBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.DecExt(yyv25) { - } else if !yym26 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv25) - } else { - z.DecFallback(yyv25, false) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeProxyConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj29 int - var yyb29 bool - var yyhl29 bool = l >= 0 - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BindAddress = "" - } else { - x.BindAddress = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.IPTablesMasqueradeBit != nil { - x.IPTablesMasqueradeBit = nil - } - } else { - if x.IPTablesMasqueradeBit == nil { - x.IPTablesMasqueradeBit = new(int32) - } - yym36 := z.DecBinary() - _ = yym36 - if false { - } else { - *((*int32)(x.IPTablesMasqueradeBit)) = int32(r.DecodeInt(32)) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IPTablesSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv37 := &x.IPTablesSyncPeriod - yym38 := z.DecBinary() - _ = yym38 - if false { - } else if z.HasExtensions() && z.DecExt(yyv37) { - } else if !yym38 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv37) - } else { - z.DecFallback(yyv37, false) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeconfigPath = "" - } else { - x.KubeconfigPath = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MasqueradeAll = false - } else { - x.MasqueradeAll = bool(r.DecodeBool()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Master = "" - } else { - x.Master = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.OOMScoreAdj != nil { - x.OOMScoreAdj = nil - } - } else { - if x.OOMScoreAdj == nil { - x.OOMScoreAdj = new(int32) - } - yym43 := z.DecBinary() - _ = yym43 - if false { - } else { - *((*int32)(x.OOMScoreAdj)) = int32(r.DecodeInt(32)) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Mode = "" - } else { - x.Mode = ProxyMode(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PortRange = "" - } else { - x.PortRange = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceContainer = "" - } else { - x.ResourceContainer = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UDPIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv47 := &x.UDPIdleTimeout - yym48 := z.DecBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.DecExt(yyv47) { - } else if !yym48 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv47) - } else { - z.DecFallback(yyv47, false) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackMax = 0 - } else { - x.ConntrackMax = int32(r.DecodeInt(32)) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackMaxPerCore = 0 - } else { - x.ConntrackMaxPerCore = int32(r.DecodeInt(32)) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConntrackTCPEstablishedTimeout = pkg1_unversioned.Duration{} - } else { - yyv51 := &x.ConntrackTCPEstablishedTimeout - yym52 := z.DecBinary() - _ = yym52 - if false { - } else if z.HasExtensions() && z.DecExt(yyv51) { - } else if !yym52 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv51) - } else { - z.DecFallback(yyv51, false) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj29-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ProxyMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ProxyMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x HairpinMode) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *HairpinMode) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [91]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[48] = x.CloudProvider != "" - yyq2[49] = x.CloudConfigFile != "" - yyq2[50] = x.KubeletCgroups != "" - yyq2[51] = x.RuntimeCgroups != "" - yyq2[52] = x.SystemCgroups != "" - yyq2[53] = x.CgroupRoot != "" - yyq2[55] = true - yyq2[56] = x.RktPath != "" - yyq2[57] = x.RktAPIEndpoint != "" - yyq2[58] = x.RktStage1Image != "" - yyq2[79] = true - yyq2[80] = x.NodeIP != "" - yyq2[84] = x.EvictionHard != "" - yyq2[85] = x.EvictionSoft != "" - yyq2[86] = x.EvictionSoftGracePeriod != "" - yyq2[87] = true - yyq2[88] = x.EvictionMaxPodGracePeriod != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(91) - } else { - yynn2 = 74 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Config)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("config")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Config)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.SyncFrequency - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("syncFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.SyncFrequency - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.FileCheckFrequency - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fileCheckFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.FileCheckFrequency - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy17 := &x.HTTPCheckFrequency - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("httpCheckFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy19 := &x.HTTPCheckFrequency - yym20 := z.EncBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.EncExt(yy19) { - } else if !yym20 && z.IsJSONHandle() { - z.EncJSONMarshal(yy19) - } else { - z.EncFallback(yy19) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURL)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manifestURL")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURL)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURLHeader)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("manifestURLHeader")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ManifestURLHeader)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.EnableServer)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableServer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.EnableServer)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeUint(uint64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeUint(uint64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeUint(uint64(x.ReadOnlyPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeUint(uint64(x.ReadOnlyPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSCertFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tLSCertFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSCertFile)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSPrivateKeyFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tLSPrivateKeyFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TLSPrivateKeyFile)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CertDirectory)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("certDirectory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CertDirectory)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym49 := z.EncBinary() - _ = yym49 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostnameOverride")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym50 := z.EncBinary() - _ = yym50 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostnameOverride)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodInfraContainerImage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podInfraContainerImage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodInfraContainerImage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerEndpoint)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dockerEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerEndpoint)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym58 := z.EncBinary() - _ = yym58 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootDirectory)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rootDirectory")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym59 := z.EncBinary() - _ = yym59 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootDirectory)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym61 := z.EncBinary() - _ = yym61 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seccompProfileRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym62 := z.EncBinary() - _ = yym62 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SeccompProfileRoot)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym64 := z.EncBinary() - _ = yym64 - if false { - } else { - r.EncodeBool(bool(x.AllowPrivileged)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowPrivileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym65 := z.EncBinary() - _ = yym65 - if false { - } else { - r.EncodeBool(bool(x.AllowPrivileged)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym67 := z.EncBinary() - _ = yym67 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetworkSources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym68 := z.EncBinary() - _ = yym68 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostNetworkSources)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym70 := z.EncBinary() - _ = yym70 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPIDSources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym71 := z.EncBinary() - _ = yym71 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostPIDSources)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym73 := z.EncBinary() - _ = yym73 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPCSources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym74 := z.EncBinary() - _ = yym74 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HostIPCSources)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym76 := z.EncBinary() - _ = yym76 - if false { - } else { - r.EncodeFloat64(float64(x.RegistryPullQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registryPullQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym77 := z.EncBinary() - _ = yym77 - if false { - } else { - r.EncodeFloat64(float64(x.RegistryPullQPS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym79 := z.EncBinary() - _ = yym79 - if false { - } else { - r.EncodeInt(int64(x.RegistryBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registryBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - r.EncodeInt(int64(x.RegistryBurst)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym82 := z.EncBinary() - _ = yym82 - if false { - } else { - r.EncodeFloat32(float32(x.EventRecordQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("eventRecordQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym83 := z.EncBinary() - _ = yym83 - if false { - } else { - r.EncodeFloat32(float32(x.EventRecordQPS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym85 := z.EncBinary() - _ = yym85 - if false { - } else { - r.EncodeInt(int64(x.EventBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("eventBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym86 := z.EncBinary() - _ = yym86 - if false { - } else { - r.EncodeInt(int64(x.EventBurst)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym88 := z.EncBinary() - _ = yym88 - if false { - } else { - r.EncodeBool(bool(x.EnableDebuggingHandlers)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableDebuggingHandlers")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym89 := z.EncBinary() - _ = yym89 - if false { - } else { - r.EncodeBool(bool(x.EnableDebuggingHandlers)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy91 := &x.MinimumGCAge - yym92 := z.EncBinary() - _ = yym92 - if false { - } else if z.HasExtensions() && z.EncExt(yy91) { - } else if !yym92 && z.IsJSONHandle() { - z.EncJSONMarshal(yy91) - } else { - z.EncFallback(yy91) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumGCAge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy93 := &x.MinimumGCAge - yym94 := z.EncBinary() - _ = yym94 - if false { - } else if z.HasExtensions() && z.EncExt(yy93) { - } else if !yym94 && z.IsJSONHandle() { - z.EncJSONMarshal(yy93) - } else { - z.EncFallback(yy93) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym96 := z.EncBinary() - _ = yym96 - if false { - } else { - r.EncodeInt(int64(x.MaxPerPodContainerCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxPerPodContainerCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym97 := z.EncBinary() - _ = yym97 - if false { - } else { - r.EncodeInt(int64(x.MaxPerPodContainerCount)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym99 := z.EncBinary() - _ = yym99 - if false { - } else { - r.EncodeInt(int64(x.MaxContainerCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxContainerCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym100 := z.EncBinary() - _ = yym100 - if false { - } else { - r.EncodeInt(int64(x.MaxContainerCount)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym102 := z.EncBinary() - _ = yym102 - if false { - } else { - r.EncodeUint(uint64(x.CAdvisorPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cAdvisorPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym103 := z.EncBinary() - _ = yym103 - if false { - } else { - r.EncodeUint(uint64(x.CAdvisorPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym105 := z.EncBinary() - _ = yym105 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzPort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym106 := z.EncBinary() - _ = yym106 - if false { - } else { - r.EncodeInt(int64(x.HealthzPort)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym108 := z.EncBinary() - _ = yym108 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("healthzBindAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym109 := z.EncBinary() - _ = yym109 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HealthzBindAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym111 := z.EncBinary() - _ = yym111 - if false { - } else { - r.EncodeInt(int64(x.OOMScoreAdj)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("oomScoreAdj")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym112 := z.EncBinary() - _ = yym112 - if false { - } else { - r.EncodeInt(int64(x.OOMScoreAdj)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym114 := z.EncBinary() - _ = yym114 - if false { - } else { - r.EncodeBool(bool(x.RegisterNode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym115 := z.EncBinary() - _ = yym115 - if false { - } else { - r.EncodeBool(bool(x.RegisterNode)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym117 := z.EncBinary() - _ = yym117 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterDomain")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym118 := z.EncBinary() - _ = yym118 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDomain)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym120 := z.EncBinary() - _ = yym120 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("masterServiceNamespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym121 := z.EncBinary() - _ = yym121 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.MasterServiceNamespace)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym123 := z.EncBinary() - _ = yym123 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterDNS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym124 := z.EncBinary() - _ = yym124 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterDNS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy126 := &x.StreamingConnectionIdleTimeout - yym127 := z.EncBinary() - _ = yym127 - if false { - } else if z.HasExtensions() && z.EncExt(yy126) { - } else if !yym127 && z.IsJSONHandle() { - z.EncJSONMarshal(yy126) - } else { - z.EncFallback(yy126) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("streamingConnectionIdleTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy128 := &x.StreamingConnectionIdleTimeout - yym129 := z.EncBinary() - _ = yym129 - if false { - } else if z.HasExtensions() && z.EncExt(yy128) { - } else if !yym129 && z.IsJSONHandle() { - z.EncJSONMarshal(yy128) - } else { - z.EncFallback(yy128) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy131 := &x.NodeStatusUpdateFrequency - yym132 := z.EncBinary() - _ = yym132 - if false { - } else if z.HasExtensions() && z.EncExt(yy131) { - } else if !yym132 && z.IsJSONHandle() { - z.EncJSONMarshal(yy131) - } else { - z.EncFallback(yy131) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeStatusUpdateFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy133 := &x.NodeStatusUpdateFrequency - yym134 := z.EncBinary() - _ = yym134 - if false { - } else if z.HasExtensions() && z.EncExt(yy133) { - } else if !yym134 && z.IsJSONHandle() { - z.EncJSONMarshal(yy133) - } else { - z.EncFallback(yy133) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy136 := &x.ImageMinimumGCAge - yym137 := z.EncBinary() - _ = yym137 - if false { - } else if z.HasExtensions() && z.EncExt(yy136) { - } else if !yym137 && z.IsJSONHandle() { - z.EncJSONMarshal(yy136) - } else { - z.EncFallback(yy136) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageMinimumGCAge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy138 := &x.ImageMinimumGCAge - yym139 := z.EncBinary() - _ = yym139 - if false { - } else if z.HasExtensions() && z.EncExt(yy138) { - } else if !yym139 && z.IsJSONHandle() { - z.EncJSONMarshal(yy138) - } else { - z.EncFallback(yy138) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym141 := z.EncBinary() - _ = yym141 - if false { - } else { - r.EncodeInt(int64(x.ImageGCHighThresholdPercent)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageGCHighThresholdPercent")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym142 := z.EncBinary() - _ = yym142 - if false { - } else { - r.EncodeInt(int64(x.ImageGCHighThresholdPercent)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym144 := z.EncBinary() - _ = yym144 - if false { - } else { - r.EncodeInt(int64(x.ImageGCLowThresholdPercent)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("imageGCLowThresholdPercent")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym145 := z.EncBinary() - _ = yym145 - if false { - } else { - r.EncodeInt(int64(x.ImageGCLowThresholdPercent)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - r.EncodeInt(int64(x.LowDiskSpaceThresholdMB)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lowDiskSpaceThresholdMB")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - r.EncodeInt(int64(x.LowDiskSpaceThresholdMB)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy150 := &x.VolumeStatsAggPeriod - yym151 := z.EncBinary() - _ = yym151 - if false { - } else if z.HasExtensions() && z.EncExt(yy150) { - } else if !yym151 && z.IsJSONHandle() { - z.EncJSONMarshal(yy150) - } else { - z.EncFallback(yy150) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeStatsAggPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy152 := &x.VolumeStatsAggPeriod - yym153 := z.EncBinary() - _ = yym153 - if false { - } else if z.HasExtensions() && z.EncExt(yy152) { - } else if !yym153 && z.IsJSONHandle() { - z.EncJSONMarshal(yy152) - } else { - z.EncFallback(yy152) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym155 := z.EncBinary() - _ = yym155 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("networkPluginName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym156 := z.EncBinary() - _ = yym156 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym158 := z.EncBinary() - _ = yym158 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("networkPluginDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym159 := z.EncBinary() - _ = yym159 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NetworkPluginDir)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym161 := z.EncBinary() - _ = yym161 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePluginDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym162 := z.EncBinary() - _ = yym162 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[48] { - yym164 := z.EncBinary() - _ = yym164 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[48] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym165 := z.EncBinary() - _ = yym165 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[49] { - yym167 := z.EncBinary() - _ = yym167 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[49] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym168 := z.EncBinary() - _ = yym168 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[50] { - yym170 := z.EncBinary() - _ = yym170 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[50] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym171 := z.EncBinary() - _ = yym171 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[51] { - yym173 := z.EncBinary() - _ = yym173 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[51] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runtimeCgroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym174 := z.EncBinary() - _ = yym174 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[52] { - yym176 := z.EncBinary() - _ = yym176 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[52] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("systemContainer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym177 := z.EncBinary() - _ = yym177 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[53] { - yym179 := z.EncBinary() - _ = yym179 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[53] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cgroupRoot")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym180 := z.EncBinary() - _ = yym180 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym182 := z.EncBinary() - _ = yym182 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerRuntime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym183 := z.EncBinary() - _ = yym183 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[55] { - yy185 := &x.RuntimeRequestTimeout - yym186 := z.EncBinary() - _ = yym186 - if false { - } else if z.HasExtensions() && z.EncExt(yy185) { - } else if !yym186 && z.IsJSONHandle() { - z.EncJSONMarshal(yy185) - } else { - z.EncFallback(yy185) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[55] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runtimeRequestTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy187 := &x.RuntimeRequestTimeout - yym188 := z.EncBinary() - _ = yym188 - if false { - } else if z.HasExtensions() && z.EncExt(yy187) { - } else if !yym188 && z.IsJSONHandle() { - z.EncJSONMarshal(yy187) - } else { - z.EncFallback(yy187) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[56] { - yym190 := z.EncBinary() - _ = yym190 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[56] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rktPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym191 := z.EncBinary() - _ = yym191 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[57] { - yym193 := z.EncBinary() - _ = yym193 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[57] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rktAPIEndpoint")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym194 := z.EncBinary() - _ = yym194 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[58] { - yym196 := z.EncBinary() - _ = yym196 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[58] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rktStage1Image")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym197 := z.EncBinary() - _ = yym197 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym199 := z.EncBinary() - _ = yym199 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lockFilePath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym200 := z.EncBinary() - _ = yym200 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym202 := z.EncBinary() - _ = yym202 - if false { - } else { - r.EncodeBool(bool(x.ExitOnLockContention)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exitOnLockContention")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym203 := z.EncBinary() - _ = yym203 - if false { - } else { - r.EncodeBool(bool(x.ExitOnLockContention)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym205 := z.EncBinary() - _ = yym205 - if false { - } else { - r.EncodeBool(bool(x.ConfigureCBR0)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configureCbr0")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym206 := z.EncBinary() - _ = yym206 - if false { - } else { - r.EncodeBool(bool(x.ConfigureCBR0)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym208 := z.EncBinary() - _ = yym208 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hairpinMode")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym209 := z.EncBinary() - _ = yym209 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym211 := z.EncBinary() - _ = yym211 - if false { - } else { - r.EncodeBool(bool(x.BabysitDaemons)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("babysitDaemons")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym212 := z.EncBinary() - _ = yym212 - if false { - } else { - r.EncodeBool(bool(x.BabysitDaemons)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym214 := z.EncBinary() - _ = yym214 - if false { - } else { - r.EncodeInt(int64(x.MaxPods)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym215 := z.EncBinary() - _ = yym215 - if false { - } else { - r.EncodeInt(int64(x.MaxPods)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym217 := z.EncBinary() - _ = yym217 - if false { - } else { - r.EncodeInt(int64(x.NvidiaGPUs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nvidiaGPUs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym218 := z.EncBinary() - _ = yym218 - if false { - } else { - r.EncodeInt(int64(x.NvidiaGPUs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym220 := z.EncBinary() - _ = yym220 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("dockerExecHandlerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym221 := z.EncBinary() - _ = yym221 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym223 := z.EncBinary() - _ = yym223 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym224 := z.EncBinary() - _ = yym224 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym226 := z.EncBinary() - _ = yym226 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resolvConf")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym227 := z.EncBinary() - _ = yym227 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym229 := z.EncBinary() - _ = yym229 - if false { - } else { - r.EncodeBool(bool(x.CPUCFSQuota)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cpuCFSQuota")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym230 := z.EncBinary() - _ = yym230 - if false { - } else { - r.EncodeBool(bool(x.CPUCFSQuota)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym232 := z.EncBinary() - _ = yym232 - if false { - } else { - r.EncodeBool(bool(x.Containerized)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("containerized")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym233 := z.EncBinary() - _ = yym233 - if false { - } else { - r.EncodeBool(bool(x.Containerized)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym235 := z.EncBinary() - _ = yym235 - if false { - } else { - r.EncodeUint(uint64(x.MaxOpenFiles)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxOpenFiles")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym236 := z.EncBinary() - _ = yym236 - if false { - } else { - r.EncodeUint(uint64(x.MaxOpenFiles)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym238 := z.EncBinary() - _ = yym238 - if false { - } else { - r.EncodeBool(bool(x.ReconcileCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reconcileCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym239 := z.EncBinary() - _ = yym239 - if false { - } else { - r.EncodeBool(bool(x.ReconcileCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym241 := z.EncBinary() - _ = yym241 - if false { - } else { - r.EncodeBool(bool(x.RegisterSchedulable)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerSchedulable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym242 := z.EncBinary() - _ = yym242 - if false { - } else { - r.EncodeBool(bool(x.RegisterSchedulable)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym244 := z.EncBinary() - _ = yym244 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("contentType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym245 := z.EncBinary() - _ = yym245 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym247 := z.EncBinary() - _ = yym247 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym248 := z.EncBinary() - _ = yym248 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym250 := z.EncBinary() - _ = yym250 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym251 := z.EncBinary() - _ = yym251 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym253 := z.EncBinary() - _ = yym253 - if false { - } else { - r.EncodeBool(bool(x.SerializeImagePulls)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serializeImagePulls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym254 := z.EncBinary() - _ = yym254 - if false { - } else { - r.EncodeBool(bool(x.SerializeImagePulls)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym256 := z.EncBinary() - _ = yym256 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalFlannelOverlay)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("experimentalFlannelOverlay")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym257 := z.EncBinary() - _ = yym257 - if false { - } else { - r.EncodeBool(bool(x.ExperimentalFlannelOverlay)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[79] { - yy259 := &x.OutOfDiskTransitionFrequency - yym260 := z.EncBinary() - _ = yym260 - if false { - } else if z.HasExtensions() && z.EncExt(yy259) { - } else if !yym260 && z.IsJSONHandle() { - z.EncJSONMarshal(yy259) - } else { - z.EncFallback(yy259) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[79] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("outOfDiskTransitionFrequency")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy261 := &x.OutOfDiskTransitionFrequency - yym262 := z.EncBinary() - _ = yym262 - if false { - } else if z.HasExtensions() && z.EncExt(yy261) { - } else if !yym262 && z.IsJSONHandle() { - z.EncJSONMarshal(yy261) - } else { - z.EncFallback(yy261) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[80] { - yym264 := z.EncBinary() - _ = yym264 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[80] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeIP")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym265 := z.EncBinary() - _ = yym265 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.NodeLabels == nil { - r.EncodeNil() - } else { - yym267 := z.EncBinary() - _ = yym267 - if false { - } else { - z.F.EncMapStringStringV(x.NodeLabels, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeLabels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NodeLabels == nil { - r.EncodeNil() - } else { - yym268 := z.EncBinary() - _ = yym268 - if false { - } else { - z.F.EncMapStringStringV(x.NodeLabels, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym270 := z.EncBinary() - _ = yym270 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonMasqueradeCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym271 := z.EncBinary() - _ = yym271 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym273 := z.EncBinary() - _ = yym273 - if false { - } else { - r.EncodeBool(bool(x.EnableCustomMetrics)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableCustomMetrics")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym274 := z.EncBinary() - _ = yym274 - if false { - } else { - r.EncodeBool(bool(x.EnableCustomMetrics)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[84] { - yym276 := z.EncBinary() - _ = yym276 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[84] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionHard")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym277 := z.EncBinary() - _ = yym277 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[85] { - yym279 := z.EncBinary() - _ = yym279 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[85] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionSoft")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym280 := z.EncBinary() - _ = yym280 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[86] { - yym282 := z.EncBinary() - _ = yym282 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[86] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionSoftGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym283 := z.EncBinary() - _ = yym283 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[87] { - yy285 := &x.EvictionPressureTransitionPeriod - yym286 := z.EncBinary() - _ = yym286 - if false { - } else if z.HasExtensions() && z.EncExt(yy285) { - } else if !yym286 && z.IsJSONHandle() { - z.EncJSONMarshal(yy285) - } else { - z.EncFallback(yy285) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[87] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionPressureTransitionPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy287 := &x.EvictionPressureTransitionPeriod - yym288 := z.EncBinary() - _ = yym288 - if false { - } else if z.HasExtensions() && z.EncExt(yy287) { - } else if !yym288 && z.IsJSONHandle() { - z.EncJSONMarshal(yy287) - } else { - z.EncFallback(yy287) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[88] { - yym290 := z.EncBinary() - _ = yym290 - if false { - } else { - r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[88] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("evictionMaxPodGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym291 := z.EncBinary() - _ = yym291 - if false { - } else { - r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym293 := z.EncBinary() - _ = yym293 - if false { - } else { - r.EncodeInt(int64(x.PodsPerCore)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podsPerCore")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym294 := z.EncBinary() - _ = yym294 - if false { - } else { - r.EncodeInt(int64(x.PodsPerCore)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym296 := z.EncBinary() - _ = yym296 - if false { - } else { - r.EncodeBool(bool(x.EnableControllerAttachDetach)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableControllerAttachDetach")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym297 := z.EncBinary() - _ = yym297 - if false { - } else { - r.EncodeBool(bool(x.EnableControllerAttachDetach)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeletConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "config": - if r.TryDecodeAsNil() { - x.Config = "" - } else { - x.Config = string(r.DecodeString()) - } - case "syncFrequency": - if r.TryDecodeAsNil() { - x.SyncFrequency = pkg1_unversioned.Duration{} - } else { - yyv5 := &x.SyncFrequency - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - case "fileCheckFrequency": - if r.TryDecodeAsNil() { - x.FileCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv7 := &x.FileCheckFrequency - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - case "httpCheckFrequency": - if r.TryDecodeAsNil() { - x.HTTPCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv9 := &x.HTTPCheckFrequency - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - case "manifestURL": - if r.TryDecodeAsNil() { - x.ManifestURL = "" - } else { - x.ManifestURL = string(r.DecodeString()) - } - case "manifestURLHeader": - if r.TryDecodeAsNil() { - x.ManifestURLHeader = "" - } else { - x.ManifestURLHeader = string(r.DecodeString()) - } - case "enableServer": - if r.TryDecodeAsNil() { - x.EnableServer = false - } else { - x.EnableServer = bool(r.DecodeBool()) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = uint(r.DecodeUint(codecSelferBitsize1234)) - } - case "readOnlyPort": - if r.TryDecodeAsNil() { - x.ReadOnlyPort = 0 - } else { - x.ReadOnlyPort = uint(r.DecodeUint(codecSelferBitsize1234)) - } - case "tLSCertFile": - if r.TryDecodeAsNil() { - x.TLSCertFile = "" - } else { - x.TLSCertFile = string(r.DecodeString()) - } - case "tLSPrivateKeyFile": - if r.TryDecodeAsNil() { - x.TLSPrivateKeyFile = "" - } else { - x.TLSPrivateKeyFile = string(r.DecodeString()) - } - case "certDirectory": - if r.TryDecodeAsNil() { - x.CertDirectory = "" - } else { - x.CertDirectory = string(r.DecodeString()) - } - case "hostnameOverride": - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - case "podInfraContainerImage": - if r.TryDecodeAsNil() { - x.PodInfraContainerImage = "" - } else { - x.PodInfraContainerImage = string(r.DecodeString()) - } - case "dockerEndpoint": - if r.TryDecodeAsNil() { - x.DockerEndpoint = "" - } else { - x.DockerEndpoint = string(r.DecodeString()) - } - case "rootDirectory": - if r.TryDecodeAsNil() { - x.RootDirectory = "" - } else { - x.RootDirectory = string(r.DecodeString()) - } - case "seccompProfileRoot": - if r.TryDecodeAsNil() { - x.SeccompProfileRoot = "" - } else { - x.SeccompProfileRoot = string(r.DecodeString()) - } - case "allowPrivileged": - if r.TryDecodeAsNil() { - x.AllowPrivileged = false - } else { - x.AllowPrivileged = bool(r.DecodeBool()) - } - case "hostNetworkSources": - if r.TryDecodeAsNil() { - x.HostNetworkSources = "" - } else { - x.HostNetworkSources = string(r.DecodeString()) - } - case "hostPIDSources": - if r.TryDecodeAsNil() { - x.HostPIDSources = "" - } else { - x.HostPIDSources = string(r.DecodeString()) - } - case "hostIPCSources": - if r.TryDecodeAsNil() { - x.HostIPCSources = "" - } else { - x.HostIPCSources = string(r.DecodeString()) - } - case "registryPullQPS": - if r.TryDecodeAsNil() { - x.RegistryPullQPS = 0 - } else { - x.RegistryPullQPS = float64(r.DecodeFloat(false)) - } - case "registryBurst": - if r.TryDecodeAsNil() { - x.RegistryBurst = 0 - } else { - x.RegistryBurst = int32(r.DecodeInt(32)) - } - case "eventRecordQPS": - if r.TryDecodeAsNil() { - x.EventRecordQPS = 0 - } else { - x.EventRecordQPS = float32(r.DecodeFloat(true)) - } - case "eventBurst": - if r.TryDecodeAsNil() { - x.EventBurst = 0 - } else { - x.EventBurst = int32(r.DecodeInt(32)) - } - case "enableDebuggingHandlers": - if r.TryDecodeAsNil() { - x.EnableDebuggingHandlers = false - } else { - x.EnableDebuggingHandlers = bool(r.DecodeBool()) - } - case "minimumGCAge": - if r.TryDecodeAsNil() { - x.MinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv34 := &x.MinimumGCAge - yym35 := z.DecBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.DecExt(yyv34) { - } else if !yym35 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv34) - } else { - z.DecFallback(yyv34, false) - } - } - case "maxPerPodContainerCount": - if r.TryDecodeAsNil() { - x.MaxPerPodContainerCount = 0 - } else { - x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) - } - case "maxContainerCount": - if r.TryDecodeAsNil() { - x.MaxContainerCount = 0 - } else { - x.MaxContainerCount = int32(r.DecodeInt(32)) - } - case "cAdvisorPort": - if r.TryDecodeAsNil() { - x.CAdvisorPort = 0 - } else { - x.CAdvisorPort = uint(r.DecodeUint(codecSelferBitsize1234)) - } - case "healthzPort": - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - case "healthzBindAddress": - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - case "oomScoreAdj": - if r.TryDecodeAsNil() { - x.OOMScoreAdj = 0 - } else { - x.OOMScoreAdj = int32(r.DecodeInt(32)) - } - case "registerNode": - if r.TryDecodeAsNil() { - x.RegisterNode = false - } else { - x.RegisterNode = bool(r.DecodeBool()) - } - case "clusterDomain": - if r.TryDecodeAsNil() { - x.ClusterDomain = "" - } else { - x.ClusterDomain = string(r.DecodeString()) - } - case "masterServiceNamespace": - if r.TryDecodeAsNil() { - x.MasterServiceNamespace = "" - } else { - x.MasterServiceNamespace = string(r.DecodeString()) - } - case "clusterDNS": - if r.TryDecodeAsNil() { - x.ClusterDNS = "" - } else { - x.ClusterDNS = string(r.DecodeString()) - } - case "streamingConnectionIdleTimeout": - if r.TryDecodeAsNil() { - x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv46 := &x.StreamingConnectionIdleTimeout - yym47 := z.DecBinary() - _ = yym47 - if false { - } else if z.HasExtensions() && z.DecExt(yyv46) { - } else if !yym47 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv46) - } else { - z.DecFallback(yyv46, false) - } - } - case "nodeStatusUpdateFrequency": - if r.TryDecodeAsNil() { - x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} - } else { - yyv48 := &x.NodeStatusUpdateFrequency - yym49 := z.DecBinary() - _ = yym49 - if false { - } else if z.HasExtensions() && z.DecExt(yyv48) { - } else if !yym49 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv48) - } else { - z.DecFallback(yyv48, false) - } - } - case "imageMinimumGCAge": - if r.TryDecodeAsNil() { - x.ImageMinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv50 := &x.ImageMinimumGCAge - yym51 := z.DecBinary() - _ = yym51 - if false { - } else if z.HasExtensions() && z.DecExt(yyv50) { - } else if !yym51 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv50) - } else { - z.DecFallback(yyv50, false) - } - } - case "imageGCHighThresholdPercent": - if r.TryDecodeAsNil() { - x.ImageGCHighThresholdPercent = 0 - } else { - x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) - } - case "imageGCLowThresholdPercent": - if r.TryDecodeAsNil() { - x.ImageGCLowThresholdPercent = 0 - } else { - x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) - } - case "lowDiskSpaceThresholdMB": - if r.TryDecodeAsNil() { - x.LowDiskSpaceThresholdMB = 0 - } else { - x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) - } - case "volumeStatsAggPeriod": - if r.TryDecodeAsNil() { - x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} - } else { - yyv55 := &x.VolumeStatsAggPeriod - yym56 := z.DecBinary() - _ = yym56 - if false { - } else if z.HasExtensions() && z.DecExt(yyv55) { - } else if !yym56 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv55) - } else { - z.DecFallback(yyv55, false) - } - } - case "networkPluginName": - if r.TryDecodeAsNil() { - x.NetworkPluginName = "" - } else { - x.NetworkPluginName = string(r.DecodeString()) - } - case "networkPluginDir": - if r.TryDecodeAsNil() { - x.NetworkPluginDir = "" - } else { - x.NetworkPluginDir = string(r.DecodeString()) - } - case "volumePluginDir": - if r.TryDecodeAsNil() { - x.VolumePluginDir = "" - } else { - x.VolumePluginDir = string(r.DecodeString()) - } - case "cloudProvider": - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - case "cloudConfigFile": - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - case "kubeletCgroups": - if r.TryDecodeAsNil() { - x.KubeletCgroups = "" - } else { - x.KubeletCgroups = string(r.DecodeString()) - } - case "runtimeCgroups": - if r.TryDecodeAsNil() { - x.RuntimeCgroups = "" - } else { - x.RuntimeCgroups = string(r.DecodeString()) - } - case "systemContainer": - if r.TryDecodeAsNil() { - x.SystemCgroups = "" - } else { - x.SystemCgroups = string(r.DecodeString()) - } - case "cgroupRoot": - if r.TryDecodeAsNil() { - x.CgroupRoot = "" - } else { - x.CgroupRoot = string(r.DecodeString()) - } - case "containerRuntime": - if r.TryDecodeAsNil() { - x.ContainerRuntime = "" - } else { - x.ContainerRuntime = string(r.DecodeString()) - } - case "runtimeRequestTimeout": - if r.TryDecodeAsNil() { - x.RuntimeRequestTimeout = pkg1_unversioned.Duration{} - } else { - yyv67 := &x.RuntimeRequestTimeout - yym68 := z.DecBinary() - _ = yym68 - if false { - } else if z.HasExtensions() && z.DecExt(yyv67) { - } else if !yym68 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv67) - } else { - z.DecFallback(yyv67, false) - } - } - case "rktPath": - if r.TryDecodeAsNil() { - x.RktPath = "" - } else { - x.RktPath = string(r.DecodeString()) - } - case "rktAPIEndpoint": - if r.TryDecodeAsNil() { - x.RktAPIEndpoint = "" - } else { - x.RktAPIEndpoint = string(r.DecodeString()) - } - case "rktStage1Image": - if r.TryDecodeAsNil() { - x.RktStage1Image = "" - } else { - x.RktStage1Image = string(r.DecodeString()) - } - case "lockFilePath": - if r.TryDecodeAsNil() { - x.LockFilePath = "" - } else { - x.LockFilePath = string(r.DecodeString()) - } - case "exitOnLockContention": - if r.TryDecodeAsNil() { - x.ExitOnLockContention = false - } else { - x.ExitOnLockContention = bool(r.DecodeBool()) - } - case "configureCbr0": - if r.TryDecodeAsNil() { - x.ConfigureCBR0 = false - } else { - x.ConfigureCBR0 = bool(r.DecodeBool()) - } - case "hairpinMode": - if r.TryDecodeAsNil() { - x.HairpinMode = "" - } else { - x.HairpinMode = string(r.DecodeString()) - } - case "babysitDaemons": - if r.TryDecodeAsNil() { - x.BabysitDaemons = false - } else { - x.BabysitDaemons = bool(r.DecodeBool()) - } - case "maxPods": - if r.TryDecodeAsNil() { - x.MaxPods = 0 - } else { - x.MaxPods = int32(r.DecodeInt(32)) - } - case "nvidiaGPUs": - if r.TryDecodeAsNil() { - x.NvidiaGPUs = 0 - } else { - x.NvidiaGPUs = int32(r.DecodeInt(32)) - } - case "dockerExecHandlerName": - if r.TryDecodeAsNil() { - x.DockerExecHandlerName = "" - } else { - x.DockerExecHandlerName = string(r.DecodeString()) - } - case "podCIDR": - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - case "resolvConf": - if r.TryDecodeAsNil() { - x.ResolverConfig = "" - } else { - x.ResolverConfig = string(r.DecodeString()) - } - case "cpuCFSQuota": - if r.TryDecodeAsNil() { - x.CPUCFSQuota = false - } else { - x.CPUCFSQuota = bool(r.DecodeBool()) - } - case "containerized": - if r.TryDecodeAsNil() { - x.Containerized = false - } else { - x.Containerized = bool(r.DecodeBool()) - } - case "maxOpenFiles": - if r.TryDecodeAsNil() { - x.MaxOpenFiles = 0 - } else { - x.MaxOpenFiles = uint64(r.DecodeUint(64)) - } - case "reconcileCIDR": - if r.TryDecodeAsNil() { - x.ReconcileCIDR = false - } else { - x.ReconcileCIDR = bool(r.DecodeBool()) - } - case "registerSchedulable": - if r.TryDecodeAsNil() { - x.RegisterSchedulable = false - } else { - x.RegisterSchedulable = bool(r.DecodeBool()) - } - case "contentType": - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - case "kubeAPIQPS": - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - case "kubeAPIBurst": - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - case "serializeImagePulls": - if r.TryDecodeAsNil() { - x.SerializeImagePulls = false - } else { - x.SerializeImagePulls = bool(r.DecodeBool()) - } - case "experimentalFlannelOverlay": - if r.TryDecodeAsNil() { - x.ExperimentalFlannelOverlay = false - } else { - x.ExperimentalFlannelOverlay = bool(r.DecodeBool()) - } - case "outOfDiskTransitionFrequency": - if r.TryDecodeAsNil() { - x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} - } else { - yyv92 := &x.OutOfDiskTransitionFrequency - yym93 := z.DecBinary() - _ = yym93 - if false { - } else if z.HasExtensions() && z.DecExt(yyv92) { - } else if !yym93 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv92) - } else { - z.DecFallback(yyv92, false) - } - } - case "nodeIP": - if r.TryDecodeAsNil() { - x.NodeIP = "" - } else { - x.NodeIP = string(r.DecodeString()) - } - case "nodeLabels": - if r.TryDecodeAsNil() { - x.NodeLabels = nil - } else { - yyv95 := &x.NodeLabels - yym96 := z.DecBinary() - _ = yym96 - if false { - } else { - z.F.DecMapStringStringX(yyv95, false, d) - } - } - case "nonMasqueradeCIDR": - if r.TryDecodeAsNil() { - x.NonMasqueradeCIDR = "" - } else { - x.NonMasqueradeCIDR = string(r.DecodeString()) - } - case "enableCustomMetrics": - if r.TryDecodeAsNil() { - x.EnableCustomMetrics = false - } else { - x.EnableCustomMetrics = bool(r.DecodeBool()) - } - case "evictionHard": - if r.TryDecodeAsNil() { - x.EvictionHard = "" - } else { - x.EvictionHard = string(r.DecodeString()) - } - case "evictionSoft": - if r.TryDecodeAsNil() { - x.EvictionSoft = "" - } else { - x.EvictionSoft = string(r.DecodeString()) - } - case "evictionSoftGracePeriod": - if r.TryDecodeAsNil() { - x.EvictionSoftGracePeriod = "" - } else { - x.EvictionSoftGracePeriod = string(r.DecodeString()) - } - case "evictionPressureTransitionPeriod": - if r.TryDecodeAsNil() { - x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} - } else { - yyv102 := &x.EvictionPressureTransitionPeriod - yym103 := z.DecBinary() - _ = yym103 - if false { - } else if z.HasExtensions() && z.DecExt(yyv102) { - } else if !yym103 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv102) - } else { - z.DecFallback(yyv102, false) - } - } - case "evictionMaxPodGracePeriod": - if r.TryDecodeAsNil() { - x.EvictionMaxPodGracePeriod = 0 - } else { - x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) - } - case "podsPerCore": - if r.TryDecodeAsNil() { - x.PodsPerCore = 0 - } else { - x.PodsPerCore = int32(r.DecodeInt(32)) - } - case "enableControllerAttachDetach": - if r.TryDecodeAsNil() { - x.EnableControllerAttachDetach = false - } else { - x.EnableControllerAttachDetach = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj107 int - var yyb107 bool - var yyhl107 bool = l >= 0 - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Config = "" - } else { - x.Config = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SyncFrequency = pkg1_unversioned.Duration{} - } else { - yyv109 := &x.SyncFrequency - yym110 := z.DecBinary() - _ = yym110 - if false { - } else if z.HasExtensions() && z.DecExt(yyv109) { - } else if !yym110 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv109) - } else { - z.DecFallback(yyv109, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FileCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv111 := &x.FileCheckFrequency - yym112 := z.DecBinary() - _ = yym112 - if false { - } else if z.HasExtensions() && z.DecExt(yyv111) { - } else if !yym112 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv111) - } else { - z.DecFallback(yyv111, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HTTPCheckFrequency = pkg1_unversioned.Duration{} - } else { - yyv113 := &x.HTTPCheckFrequency - yym114 := z.DecBinary() - _ = yym114 - if false { - } else if z.HasExtensions() && z.DecExt(yyv113) { - } else if !yym114 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv113) - } else { - z.DecFallback(yyv113, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ManifestURL = "" - } else { - x.ManifestURL = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ManifestURLHeader = "" - } else { - x.ManifestURLHeader = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableServer = false - } else { - x.EnableServer = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = uint(r.DecodeUint(codecSelferBitsize1234)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnlyPort = 0 - } else { - x.ReadOnlyPort = uint(r.DecodeUint(codecSelferBitsize1234)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLSCertFile = "" - } else { - x.TLSCertFile = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLSPrivateKeyFile = "" - } else { - x.TLSPrivateKeyFile = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CertDirectory = "" - } else { - x.CertDirectory = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostnameOverride = "" - } else { - x.HostnameOverride = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodInfraContainerImage = "" - } else { - x.PodInfraContainerImage = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DockerEndpoint = "" - } else { - x.DockerEndpoint = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RootDirectory = "" - } else { - x.RootDirectory = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SeccompProfileRoot = "" - } else { - x.SeccompProfileRoot = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowPrivileged = false - } else { - x.AllowPrivileged = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetworkSources = "" - } else { - x.HostNetworkSources = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPIDSources = "" - } else { - x.HostPIDSources = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPCSources = "" - } else { - x.HostIPCSources = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegistryPullQPS = 0 - } else { - x.RegistryPullQPS = float64(r.DecodeFloat(false)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegistryBurst = 0 - } else { - x.RegistryBurst = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EventRecordQPS = 0 - } else { - x.EventRecordQPS = float32(r.DecodeFloat(true)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EventBurst = 0 - } else { - x.EventBurst = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableDebuggingHandlers = false - } else { - x.EnableDebuggingHandlers = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv138 := &x.MinimumGCAge - yym139 := z.DecBinary() - _ = yym139 - if false { - } else if z.HasExtensions() && z.DecExt(yyv138) { - } else if !yym139 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv138) - } else { - z.DecFallback(yyv138, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxPerPodContainerCount = 0 - } else { - x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxContainerCount = 0 - } else { - x.MaxContainerCount = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CAdvisorPort = 0 - } else { - x.CAdvisorPort = uint(r.DecodeUint(codecSelferBitsize1234)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzPort = 0 - } else { - x.HealthzPort = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HealthzBindAddress = "" - } else { - x.HealthzBindAddress = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OOMScoreAdj = 0 - } else { - x.OOMScoreAdj = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegisterNode = false - } else { - x.RegisterNode = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterDomain = "" - } else { - x.ClusterDomain = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MasterServiceNamespace = "" - } else { - x.MasterServiceNamespace = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterDNS = "" - } else { - x.ClusterDNS = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} - } else { - yyv150 := &x.StreamingConnectionIdleTimeout - yym151 := z.DecBinary() - _ = yym151 - if false { - } else if z.HasExtensions() && z.DecExt(yyv150) { - } else if !yym151 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv150) - } else { - z.DecFallback(yyv150, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} - } else { - yyv152 := &x.NodeStatusUpdateFrequency - yym153 := z.DecBinary() - _ = yym153 - if false { - } else if z.HasExtensions() && z.DecExt(yyv152) { - } else if !yym153 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv152) - } else { - z.DecFallback(yyv152, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageMinimumGCAge = pkg1_unversioned.Duration{} - } else { - yyv154 := &x.ImageMinimumGCAge - yym155 := z.DecBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else if !yym155 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv154) - } else { - z.DecFallback(yyv154, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageGCHighThresholdPercent = 0 - } else { - x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ImageGCLowThresholdPercent = 0 - } else { - x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LowDiskSpaceThresholdMB = 0 - } else { - x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} - } else { - yyv159 := &x.VolumeStatsAggPeriod - yym160 := z.DecBinary() - _ = yym160 - if false { - } else if z.HasExtensions() && z.DecExt(yyv159) { - } else if !yym160 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv159) - } else { - z.DecFallback(yyv159, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NetworkPluginName = "" - } else { - x.NetworkPluginName = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NetworkPluginDir = "" - } else { - x.NetworkPluginDir = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumePluginDir = "" - } else { - x.VolumePluginDir = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeletCgroups = "" - } else { - x.KubeletCgroups = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RuntimeCgroups = "" - } else { - x.RuntimeCgroups = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SystemCgroups = "" - } else { - x.SystemCgroups = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CgroupRoot = "" - } else { - x.CgroupRoot = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContainerRuntime = "" - } else { - x.ContainerRuntime = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RuntimeRequestTimeout = pkg1_unversioned.Duration{} - } else { - yyv171 := &x.RuntimeRequestTimeout - yym172 := z.DecBinary() - _ = yym172 - if false { - } else if z.HasExtensions() && z.DecExt(yyv171) { - } else if !yym172 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv171) - } else { - z.DecFallback(yyv171, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RktPath = "" - } else { - x.RktPath = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RktAPIEndpoint = "" - } else { - x.RktAPIEndpoint = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RktStage1Image = "" - } else { - x.RktStage1Image = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LockFilePath = "" - } else { - x.LockFilePath = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExitOnLockContention = false - } else { - x.ExitOnLockContention = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConfigureCBR0 = false - } else { - x.ConfigureCBR0 = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HairpinMode = "" - } else { - x.HairpinMode = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.BabysitDaemons = false - } else { - x.BabysitDaemons = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxPods = 0 - } else { - x.MaxPods = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NvidiaGPUs = 0 - } else { - x.NvidiaGPUs = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DockerExecHandlerName = "" - } else { - x.DockerExecHandlerName = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodCIDR = "" - } else { - x.PodCIDR = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResolverConfig = "" - } else { - x.ResolverConfig = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CPUCFSQuota = false - } else { - x.CPUCFSQuota = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Containerized = false - } else { - x.Containerized = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxOpenFiles = 0 - } else { - x.MaxOpenFiles = uint64(r.DecodeUint(64)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReconcileCIDR = false - } else { - x.ReconcileCIDR = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegisterSchedulable = false - } else { - x.RegisterSchedulable = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SerializeImagePulls = false - } else { - x.SerializeImagePulls = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExperimentalFlannelOverlay = false - } else { - x.ExperimentalFlannelOverlay = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} - } else { - yyv196 := &x.OutOfDiskTransitionFrequency - yym197 := z.DecBinary() - _ = yym197 - if false { - } else if z.HasExtensions() && z.DecExt(yyv196) { - } else if !yym197 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv196) - } else { - z.DecFallback(yyv196, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeIP = "" - } else { - x.NodeIP = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeLabels = nil - } else { - yyv199 := &x.NodeLabels - yym200 := z.DecBinary() - _ = yym200 - if false { - } else { - z.F.DecMapStringStringX(yyv199, false, d) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NonMasqueradeCIDR = "" - } else { - x.NonMasqueradeCIDR = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableCustomMetrics = false - } else { - x.EnableCustomMetrics = bool(r.DecodeBool()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionHard = "" - } else { - x.EvictionHard = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionSoft = "" - } else { - x.EvictionSoft = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionSoftGracePeriod = "" - } else { - x.EvictionSoftGracePeriod = string(r.DecodeString()) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} - } else { - yyv206 := &x.EvictionPressureTransitionPeriod - yym207 := z.DecBinary() - _ = yym207 - if false { - } else if z.HasExtensions() && z.DecExt(yyv206) { - } else if !yym207 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv206) - } else { - z.DecFallback(yyv206, false) - } - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EvictionMaxPodGracePeriod = 0 - } else { - x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodsPerCore = 0 - } else { - x.PodsPerCore = int32(r.DecodeInt(32)) - } - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableControllerAttachDetach = false - } else { - x.EnableControllerAttachDetach = bool(r.DecodeBool()) - } - for { - yyj107++ - if yyhl107 { - yyb107 = yyj107 > l - } else { - yyb107 = r.CheckBreak() - } - if yyb107 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj107-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[12] = x.Kind != "" - yyq2[13] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 12 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("algorithmProvider")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("policyConfigFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("contentType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failureDomains")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy37 := &x.LeaderElection - yy37.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy39 := &x.LeaderElection - yy39.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[12] { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[12] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - yym45 := z.EncBinary() - _ = yym45 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeSchedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - case "algorithmProvider": - if r.TryDecodeAsNil() { - x.AlgorithmProvider = "" - } else { - x.AlgorithmProvider = string(r.DecodeString()) - } - case "policyConfigFile": - if r.TryDecodeAsNil() { - x.PolicyConfigFile = "" - } else { - x.PolicyConfigFile = string(r.DecodeString()) - } - case "enableProfiling": - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - case "contentType": - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - case "kubeAPIQPS": - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - case "kubeAPIBurst": - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - case "schedulerName": - if r.TryDecodeAsNil() { - x.SchedulerName = "" - } else { - x.SchedulerName = string(r.DecodeString()) - } - case "hardPodAffinitySymmetricWeight": - if r.TryDecodeAsNil() { - x.HardPodAffinitySymmetricWeight = 0 - } else { - x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "failureDomains": - if r.TryDecodeAsNil() { - x.FailureDomains = "" - } else { - x.FailureDomains = string(r.DecodeString()) - } - case "leaderElection": - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv15 := &x.LeaderElection - yyv15.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj18 int - var yyb18 bool - var yyhl18 bool = l >= 0 - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AlgorithmProvider = "" - } else { - x.AlgorithmProvider = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PolicyConfigFile = "" - } else { - x.PolicyConfigFile = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SchedulerName = "" - } else { - x.SchedulerName = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HardPodAffinitySymmetricWeight = 0 - } else { - x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FailureDomains = "" - } else { - x.FailureDomains = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv30 := &x.LeaderElection - yyv30.CodecDecodeSelf(d) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj18++ - if yyhl18 { - yyb18 = yyj18 > l - } else { - yyb18 = r.CheckBreak() - } - if yyb18 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj18-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LeaderElectionConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.LeaderElect)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaderElect")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.LeaderElect)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.LeaseDuration - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaseDuration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.LeaseDuration - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.RenewDeadline - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("renewDeadline")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.RenewDeadline - yym15 := z.EncBinary() - _ = yym15 - if false { - } else if z.HasExtensions() && z.EncExt(yy14) { - } else if !yym15 && z.IsJSONHandle() { - z.EncJSONMarshal(yy14) - } else { - z.EncFallback(yy14) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy17 := &x.RetryPeriod - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("retryPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy19 := &x.RetryPeriod - yym20 := z.EncBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.EncExt(yy19) { - } else if !yym20 && z.IsJSONHandle() { - z.EncJSONMarshal(yy19) - } else { - z.EncFallback(yy19) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LeaderElectionConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LeaderElectionConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "leaderElect": - if r.TryDecodeAsNil() { - x.LeaderElect = false - } else { - x.LeaderElect = bool(r.DecodeBool()) - } - case "leaseDuration": - if r.TryDecodeAsNil() { - x.LeaseDuration = pkg1_unversioned.Duration{} - } else { - yyv5 := &x.LeaseDuration - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - case "renewDeadline": - if r.TryDecodeAsNil() { - x.RenewDeadline = pkg1_unversioned.Duration{} - } else { - yyv7 := &x.RenewDeadline - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(yyv7) { - } else if !yym8 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv7) - } else { - z.DecFallback(yyv7, false) - } - } - case "retryPeriod": - if r.TryDecodeAsNil() { - x.RetryPeriod = pkg1_unversioned.Duration{} - } else { - yyv9 := &x.RetryPeriod - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaderElect = false - } else { - x.LeaderElect = bool(r.DecodeBool()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaseDuration = pkg1_unversioned.Duration{} - } else { - yyv13 := &x.LeaseDuration - yym14 := z.DecBinary() - _ = yym14 - if false { - } else if z.HasExtensions() && z.DecExt(yyv13) { - } else if !yym14 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv13) - } else { - z.DecFallback(yyv13, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RenewDeadline = pkg1_unversioned.Duration{} - } else { - yyv15 := &x.RenewDeadline - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RetryPeriod = pkg1_unversioned.Duration{} - } else { - yyv17 := &x.RetryPeriod - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [49]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[47] = x.Kind != "" - yyq2[48] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(49) - } else { - yynn2 = 47 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Port)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("address")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Address)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentEndpointSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentEndpointSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentEndpointSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRSSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentRSSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRSSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRCSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentRCSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentRCSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentResourceQuotaSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDeploymentSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentDeploymentSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDeploymentSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentDaemonSetSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentJobSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentJobSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentJobSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym37 := z.EncBinary() - _ = yym37 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentNamespaceSyncs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("concurrentNamespaceSyncs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym38 := z.EncBinary() - _ = yym38 - if false { - } else { - r.EncodeInt(int64(x.ConcurrentNamespaceSyncs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRC)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym41 := z.EncBinary() - _ = yym41 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRC)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym44 := z.EncBinary() - _ = yym44 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForRS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym46 := z.EncBinary() - _ = yym46 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForDaemonSet")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym47 := z.EncBinary() - _ = yym47 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy49 := &x.ServiceSyncPeriod - yym50 := z.EncBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.EncExt(yy49) { - } else if !yym50 && z.IsJSONHandle() { - z.EncJSONMarshal(yy49) - } else { - z.EncFallback(yy49) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy51 := &x.ServiceSyncPeriod - yym52 := z.EncBinary() - _ = yym52 - if false { - } else if z.HasExtensions() && z.EncExt(yy51) { - } else if !yym52 && z.IsJSONHandle() { - z.EncJSONMarshal(yy51) - } else { - z.EncFallback(yy51) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy54 := &x.NodeSyncPeriod - yym55 := z.EncBinary() - _ = yym55 - if false { - } else if z.HasExtensions() && z.EncExt(yy54) { - } else if !yym55 && z.IsJSONHandle() { - z.EncJSONMarshal(yy54) - } else { - z.EncFallback(yy54) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy56 := &x.NodeSyncPeriod - yym57 := z.EncBinary() - _ = yym57 - if false { - } else if z.HasExtensions() && z.EncExt(yy56) { - } else if !yym57 && z.IsJSONHandle() { - z.EncJSONMarshal(yy56) - } else { - z.EncFallback(yy56) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy59 := &x.ResourceQuotaSyncPeriod - yym60 := z.EncBinary() - _ = yym60 - if false { - } else if z.HasExtensions() && z.EncExt(yy59) { - } else if !yym60 && z.IsJSONHandle() { - z.EncJSONMarshal(yy59) - } else { - z.EncFallback(yy59) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceQuotaSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy61 := &x.ResourceQuotaSyncPeriod - yym62 := z.EncBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.EncExt(yy61) { - } else if !yym62 && z.IsJSONHandle() { - z.EncJSONMarshal(yy61) - } else { - z.EncFallback(yy61) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy64 := &x.NamespaceSyncPeriod - yym65 := z.EncBinary() - _ = yym65 - if false { - } else if z.HasExtensions() && z.EncExt(yy64) { - } else if !yym65 && z.IsJSONHandle() { - z.EncJSONMarshal(yy64) - } else { - z.EncFallback(yy64) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy66 := &x.NamespaceSyncPeriod - yym67 := z.EncBinary() - _ = yym67 - if false { - } else if z.HasExtensions() && z.EncExt(yy66) { - } else if !yym67 && z.IsJSONHandle() { - z.EncJSONMarshal(yy66) - } else { - z.EncFallback(yy66) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy69 := &x.PVClaimBinderSyncPeriod - yym70 := z.EncBinary() - _ = yym70 - if false { - } else if z.HasExtensions() && z.EncExt(yy69) { - } else if !yym70 && z.IsJSONHandle() { - z.EncJSONMarshal(yy69) - } else { - z.EncFallback(yy69) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pvClaimBinderSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy71 := &x.PVClaimBinderSyncPeriod - yym72 := z.EncBinary() - _ = yym72 - if false { - } else if z.HasExtensions() && z.EncExt(yy71) { - } else if !yym72 && z.IsJSONHandle() { - z.EncJSONMarshal(yy71) - } else { - z.EncFallback(yy71) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy74 := &x.MinResyncPeriod - yym75 := z.EncBinary() - _ = yym75 - if false { - } else if z.HasExtensions() && z.EncExt(yy74) { - } else if !yym75 && z.IsJSONHandle() { - z.EncJSONMarshal(yy74) - } else { - z.EncFallback(yy74) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minResyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy76 := &x.MinResyncPeriod - yym77 := z.EncBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.EncExt(yy76) { - } else if !yym77 && z.IsJSONHandle() { - z.EncJSONMarshal(yy76) - } else { - z.EncFallback(yy76) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym79 := z.EncBinary() - _ = yym79 - if false { - } else { - r.EncodeInt(int64(x.TerminatedPodGCThreshold)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminatedPodGCThreshold")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - r.EncodeInt(int64(x.TerminatedPodGCThreshold)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy82 := &x.HorizontalPodAutoscalerSyncPeriod - yym83 := z.EncBinary() - _ = yym83 - if false { - } else if z.HasExtensions() && z.EncExt(yy82) { - } else if !yym83 && z.IsJSONHandle() { - z.EncJSONMarshal(yy82) - } else { - z.EncFallback(yy82) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("horizontalPodAutoscalerSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy84 := &x.HorizontalPodAutoscalerSyncPeriod - yym85 := z.EncBinary() - _ = yym85 - if false { - } else if z.HasExtensions() && z.EncExt(yy84) { - } else if !yym85 && z.IsJSONHandle() { - z.EncJSONMarshal(yy84) - } else { - z.EncFallback(yy84) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy87 := &x.DeploymentControllerSyncPeriod - yym88 := z.EncBinary() - _ = yym88 - if false { - } else if z.HasExtensions() && z.EncExt(yy87) { - } else if !yym88 && z.IsJSONHandle() { - z.EncJSONMarshal(yy87) - } else { - z.EncFallback(yy87) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deploymentControllerSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy89 := &x.DeploymentControllerSyncPeriod - yym90 := z.EncBinary() - _ = yym90 - if false { - } else if z.HasExtensions() && z.EncExt(yy89) { - } else if !yym90 && z.IsJSONHandle() { - z.EncJSONMarshal(yy89) - } else { - z.EncFallback(yy89) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy92 := &x.PodEvictionTimeout - yym93 := z.EncBinary() - _ = yym93 - if false { - } else if z.HasExtensions() && z.EncExt(yy92) { - } else if !yym93 && z.IsJSONHandle() { - z.EncJSONMarshal(yy92) - } else { - z.EncFallback(yy92) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podEvictionTimeout")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy94 := &x.PodEvictionTimeout - yym95 := z.EncBinary() - _ = yym95 - if false { - } else if z.HasExtensions() && z.EncExt(yy94) { - } else if !yym95 && z.IsJSONHandle() { - z.EncJSONMarshal(yy94) - } else { - z.EncFallback(yy94) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym97 := z.EncBinary() - _ = yym97 - if false { - } else { - r.EncodeFloat32(float32(x.DeletingPodsQps)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletingPodsQps")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym98 := z.EncBinary() - _ = yym98 - if false { - } else { - r.EncodeFloat32(float32(x.DeletingPodsQps)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym100 := z.EncBinary() - _ = yym100 - if false { - } else { - r.EncodeInt(int64(x.DeletingPodsBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deletingPodsBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym101 := z.EncBinary() - _ = yym101 - if false { - } else { - r.EncodeInt(int64(x.DeletingPodsBurst)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy103 := &x.NodeMonitorGracePeriod - yym104 := z.EncBinary() - _ = yym104 - if false { - } else if z.HasExtensions() && z.EncExt(yy103) { - } else if !yym104 && z.IsJSONHandle() { - z.EncJSONMarshal(yy103) - } else { - z.EncFallback(yy103) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy105 := &x.NodeMonitorGracePeriod - yym106 := z.EncBinary() - _ = yym106 - if false { - } else if z.HasExtensions() && z.EncExt(yy105) { - } else if !yym106 && z.IsJSONHandle() { - z.EncJSONMarshal(yy105) - } else { - z.EncFallback(yy105) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym108 := z.EncBinary() - _ = yym108 - if false { - } else { - r.EncodeInt(int64(x.RegisterRetryCount)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("registerRetryCount")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym109 := z.EncBinary() - _ = yym109 - if false { - } else { - r.EncodeInt(int64(x.RegisterRetryCount)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy111 := &x.NodeStartupGracePeriod - yym112 := z.EncBinary() - _ = yym112 - if false { - } else if z.HasExtensions() && z.EncExt(yy111) { - } else if !yym112 && z.IsJSONHandle() { - z.EncJSONMarshal(yy111) - } else { - z.EncFallback(yy111) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeStartupGracePeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy113 := &x.NodeStartupGracePeriod - yym114 := z.EncBinary() - _ = yym114 - if false { - } else if z.HasExtensions() && z.EncExt(yy113) { - } else if !yym114 && z.IsJSONHandle() { - z.EncJSONMarshal(yy113) - } else { - z.EncFallback(yy113) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy116 := &x.NodeMonitorPeriod - yym117 := z.EncBinary() - _ = yym117 - if false { - } else if z.HasExtensions() && z.EncExt(yy116) { - } else if !yym117 && z.IsJSONHandle() { - z.EncJSONMarshal(yy116) - } else { - z.EncFallback(yy116) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy118 := &x.NodeMonitorPeriod - yym119 := z.EncBinary() - _ = yym119 - if false { - } else if z.HasExtensions() && z.EncExt(yy118) { - } else if !yym119 && z.IsJSONHandle() { - z.EncJSONMarshal(yy118) - } else { - z.EncFallback(yy118) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym121 := z.EncBinary() - _ = yym121 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceAccountKeyFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym122 := z.EncBinary() - _ = yym122 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym124 := z.EncBinary() - _ = yym124 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym125 := z.EncBinary() - _ = yym125 - if false { - } else { - r.EncodeBool(bool(x.EnableProfiling)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym127 := z.EncBinary() - _ = yym127 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym128 := z.EncBinary() - _ = yym128 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym130 := z.EncBinary() - _ = yym130 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym131 := z.EncBinary() - _ = yym131 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym133 := z.EncBinary() - _ = yym133 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym134 := z.EncBinary() - _ = yym134 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym136 := z.EncBinary() - _ = yym136 - if false { - } else { - r.EncodeInt(int64(x.NodeCIDRMaskSize)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeCIDRMaskSize")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 - if false { - } else { - r.EncodeInt(int64(x.NodeCIDRMaskSize)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym139 := z.EncBinary() - _ = yym139 - if false { - } else { - r.EncodeBool(bool(x.AllocateNodeCIDRs)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allocateNodeCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym140 := z.EncBinary() - _ = yym140 - if false { - } else { - r.EncodeBool(bool(x.AllocateNodeCIDRs)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym142 := z.EncBinary() - _ = yym142 - if false { - } else { - r.EncodeBool(bool(x.ConfigureCloudRoutes)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("configureCloudRoutes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym143 := z.EncBinary() - _ = yym143 - if false { - } else { - r.EncodeBool(bool(x.ConfigureCloudRoutes)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym145 := z.EncBinary() - _ = yym145 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rootCAFile")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym146 := z.EncBinary() - _ = yym146 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("contentType")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym149 := z.EncBinary() - _ = yym149 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym151 := z.EncBinary() - _ = yym151 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym152 := z.EncBinary() - _ = yym152 - if false { - } else { - r.EncodeFloat32(float32(x.KubeAPIQPS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym154 := z.EncBinary() - _ = yym154 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym155 := z.EncBinary() - _ = yym155 - if false { - } else { - r.EncodeInt(int64(x.KubeAPIBurst)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy157 := &x.LeaderElection - yy157.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy159 := &x.LeaderElection - yy159.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy162 := &x.VolumeConfiguration - yy162.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumeConfiguration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy164 := &x.VolumeConfiguration - yy164.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy167 := &x.ControllerStartInterval - yym168 := z.EncBinary() - _ = yym168 - if false { - } else if z.HasExtensions() && z.EncExt(yy167) { - } else if !yym168 && z.IsJSONHandle() { - z.EncJSONMarshal(yy167) - } else { - z.EncFallback(yy167) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("controllerStartInterval")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy169 := &x.ControllerStartInterval - yym170 := z.EncBinary() - _ = yym170 - if false { - } else if z.HasExtensions() && z.EncExt(yy169) { - } else if !yym170 && z.IsJSONHandle() { - z.EncJSONMarshal(yy169) - } else { - z.EncFallback(yy169) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym172 := z.EncBinary() - _ = yym172 - if false { - } else { - r.EncodeBool(bool(x.EnableGarbageCollector)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableGarbageCollector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym173 := z.EncBinary() - _ = yym173 - if false { - } else { - r.EncodeBool(bool(x.EnableGarbageCollector)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[47] { - yym175 := z.EncBinary() - _ = yym175 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[47] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym176 := z.EncBinary() - _ = yym176 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[48] { - yym178 := z.EncBinary() - _ = yym178 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[48] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym179 := z.EncBinary() - _ = yym179 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *KubeControllerManagerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "port": - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - case "address": - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - case "cloudProvider": - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - case "cloudConfigFile": - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - case "concurrentEndpointSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentEndpointSyncs = 0 - } else { - x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) - } - case "concurrentRSSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentRSSyncs = 0 - } else { - x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) - } - case "concurrentRCSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentRCSyncs = 0 - } else { - x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) - } - case "concurrentResourceQuotaSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentResourceQuotaSyncs = 0 - } else { - x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) - } - case "concurrentDeploymentSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentDeploymentSyncs = 0 - } else { - x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) - } - case "concurrentDaemonSetSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentDaemonSetSyncs = 0 - } else { - x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) - } - case "concurrentJobSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentJobSyncs = 0 - } else { - x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) - } - case "concurrentNamespaceSyncs": - if r.TryDecodeAsNil() { - x.ConcurrentNamespaceSyncs = 0 - } else { - x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) - } - case "lookupCacheSizeForRC": - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRC = 0 - } else { - x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) - } - case "lookupCacheSizeForRS": - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRS = 0 - } else { - x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) - } - case "lookupCacheSizeForDaemonSet": - if r.TryDecodeAsNil() { - x.LookupCacheSizeForDaemonSet = 0 - } else { - x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) - } - case "serviceSyncPeriod": - if r.TryDecodeAsNil() { - x.ServiceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv19 := &x.ServiceSyncPeriod - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - case "nodeSyncPeriod": - if r.TryDecodeAsNil() { - x.NodeSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv21 := &x.NodeSyncPeriod - yym22 := z.DecBinary() - _ = yym22 - if false { - } else if z.HasExtensions() && z.DecExt(yyv21) { - } else if !yym22 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv21) - } else { - z.DecFallback(yyv21, false) - } - } - case "resourceQuotaSyncPeriod": - if r.TryDecodeAsNil() { - x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv23 := &x.ResourceQuotaSyncPeriod - yym24 := z.DecBinary() - _ = yym24 - if false { - } else if z.HasExtensions() && z.DecExt(yyv23) { - } else if !yym24 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv23) - } else { - z.DecFallback(yyv23, false) - } - } - case "namespaceSyncPeriod": - if r.TryDecodeAsNil() { - x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv25 := &x.NamespaceSyncPeriod - yym26 := z.DecBinary() - _ = yym26 - if false { - } else if z.HasExtensions() && z.DecExt(yyv25) { - } else if !yym26 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv25) - } else { - z.DecFallback(yyv25, false) - } - } - case "pvClaimBinderSyncPeriod": - if r.TryDecodeAsNil() { - x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv27 := &x.PVClaimBinderSyncPeriod - yym28 := z.DecBinary() - _ = yym28 - if false { - } else if z.HasExtensions() && z.DecExt(yyv27) { - } else if !yym28 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv27) - } else { - z.DecFallback(yyv27, false) - } - } - case "minResyncPeriod": - if r.TryDecodeAsNil() { - x.MinResyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv29 := &x.MinResyncPeriod - yym30 := z.DecBinary() - _ = yym30 - if false { - } else if z.HasExtensions() && z.DecExt(yyv29) { - } else if !yym30 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv29) - } else { - z.DecFallback(yyv29, false) - } - } - case "terminatedPodGCThreshold": - if r.TryDecodeAsNil() { - x.TerminatedPodGCThreshold = 0 - } else { - x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) - } - case "horizontalPodAutoscalerSyncPeriod": - if r.TryDecodeAsNil() { - x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv32 := &x.HorizontalPodAutoscalerSyncPeriod - yym33 := z.DecBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.DecExt(yyv32) { - } else if !yym33 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv32) - } else { - z.DecFallback(yyv32, false) - } - } - case "deploymentControllerSyncPeriod": - if r.TryDecodeAsNil() { - x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv34 := &x.DeploymentControllerSyncPeriod - yym35 := z.DecBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.DecExt(yyv34) { - } else if !yym35 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv34) - } else { - z.DecFallback(yyv34, false) - } - } - case "podEvictionTimeout": - if r.TryDecodeAsNil() { - x.PodEvictionTimeout = pkg1_unversioned.Duration{} - } else { - yyv36 := &x.PodEvictionTimeout - yym37 := z.DecBinary() - _ = yym37 - if false { - } else if z.HasExtensions() && z.DecExt(yyv36) { - } else if !yym37 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv36) - } else { - z.DecFallback(yyv36, false) - } - } - case "deletingPodsQps": - if r.TryDecodeAsNil() { - x.DeletingPodsQps = 0 - } else { - x.DeletingPodsQps = float32(r.DecodeFloat(true)) - } - case "deletingPodsBurst": - if r.TryDecodeAsNil() { - x.DeletingPodsBurst = 0 - } else { - x.DeletingPodsBurst = int32(r.DecodeInt(32)) - } - case "nodeMonitorGracePeriod": - if r.TryDecodeAsNil() { - x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv40 := &x.NodeMonitorGracePeriod - yym41 := z.DecBinary() - _ = yym41 - if false { - } else if z.HasExtensions() && z.DecExt(yyv40) { - } else if !yym41 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv40) - } else { - z.DecFallback(yyv40, false) - } - } - case "registerRetryCount": - if r.TryDecodeAsNil() { - x.RegisterRetryCount = 0 - } else { - x.RegisterRetryCount = int32(r.DecodeInt(32)) - } - case "nodeStartupGracePeriod": - if r.TryDecodeAsNil() { - x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv43 := &x.NodeStartupGracePeriod - yym44 := z.DecBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.DecExt(yyv43) { - } else if !yym44 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv43) - } else { - z.DecFallback(yyv43, false) - } - } - case "nodeMonitorPeriod": - if r.TryDecodeAsNil() { - x.NodeMonitorPeriod = pkg1_unversioned.Duration{} - } else { - yyv45 := &x.NodeMonitorPeriod - yym46 := z.DecBinary() - _ = yym46 - if false { - } else if z.HasExtensions() && z.DecExt(yyv45) { - } else if !yym46 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv45) - } else { - z.DecFallback(yyv45, false) - } - } - case "serviceAccountKeyFile": - if r.TryDecodeAsNil() { - x.ServiceAccountKeyFile = "" - } else { - x.ServiceAccountKeyFile = string(r.DecodeString()) - } - case "enableProfiling": - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - case "clusterName": - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - case "clusterCIDR": - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - case "serviceCIDR": - if r.TryDecodeAsNil() { - x.ServiceCIDR = "" - } else { - x.ServiceCIDR = string(r.DecodeString()) - } - case "nodeCIDRMaskSize": - if r.TryDecodeAsNil() { - x.NodeCIDRMaskSize = 0 - } else { - x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) - } - case "allocateNodeCIDRs": - if r.TryDecodeAsNil() { - x.AllocateNodeCIDRs = false - } else { - x.AllocateNodeCIDRs = bool(r.DecodeBool()) - } - case "configureCloudRoutes": - if r.TryDecodeAsNil() { - x.ConfigureCloudRoutes = false - } else { - x.ConfigureCloudRoutes = bool(r.DecodeBool()) - } - case "rootCAFile": - if r.TryDecodeAsNil() { - x.RootCAFile = "" - } else { - x.RootCAFile = string(r.DecodeString()) - } - case "contentType": - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - case "kubeAPIQPS": - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - case "kubeAPIBurst": - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - case "leaderElection": - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv59 := &x.LeaderElection - yyv59.CodecDecodeSelf(d) - } - case "volumeConfiguration": - if r.TryDecodeAsNil() { - x.VolumeConfiguration = VolumeConfiguration{} - } else { - yyv60 := &x.VolumeConfiguration - yyv60.CodecDecodeSelf(d) - } - case "controllerStartInterval": - if r.TryDecodeAsNil() { - x.ControllerStartInterval = pkg1_unversioned.Duration{} - } else { - yyv61 := &x.ControllerStartInterval - yym62 := z.DecBinary() - _ = yym62 - if false { - } else if z.HasExtensions() && z.DecExt(yyv61) { - } else if !yym62 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv61) - } else { - z.DecFallback(yyv61, false) - } - } - case "enableGarbageCollector": - if r.TryDecodeAsNil() { - x.EnableGarbageCollector = false - } else { - x.EnableGarbageCollector = bool(r.DecodeBool()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj66 int - var yyb66 bool - var yyhl66 bool = l >= 0 - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Port = 0 - } else { - x.Port = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Address = "" - } else { - x.Address = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudProvider = "" - } else { - x.CloudProvider = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CloudConfigFile = "" - } else { - x.CloudConfigFile = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentEndpointSyncs = 0 - } else { - x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentRSSyncs = 0 - } else { - x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentRCSyncs = 0 - } else { - x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentResourceQuotaSyncs = 0 - } else { - x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentDeploymentSyncs = 0 - } else { - x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentDaemonSetSyncs = 0 - } else { - x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentJobSyncs = 0 - } else { - x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConcurrentNamespaceSyncs = 0 - } else { - x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRC = 0 - } else { - x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LookupCacheSizeForRS = 0 - } else { - x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LookupCacheSizeForDaemonSet = 0 - } else { - x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv82 := &x.ServiceSyncPeriod - yym83 := z.DecBinary() - _ = yym83 - if false { - } else if z.HasExtensions() && z.DecExt(yyv82) { - } else if !yym83 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv82) - } else { - z.DecFallback(yyv82, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv84 := &x.NodeSyncPeriod - yym85 := z.DecBinary() - _ = yym85 - if false { - } else if z.HasExtensions() && z.DecExt(yyv84) { - } else if !yym85 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv84) - } else { - z.DecFallback(yyv84, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv86 := &x.ResourceQuotaSyncPeriod - yym87 := z.DecBinary() - _ = yym87 - if false { - } else if z.HasExtensions() && z.DecExt(yyv86) { - } else if !yym87 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv86) - } else { - z.DecFallback(yyv86, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv88 := &x.NamespaceSyncPeriod - yym89 := z.DecBinary() - _ = yym89 - if false { - } else if z.HasExtensions() && z.DecExt(yyv88) { - } else if !yym89 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv88) - } else { - z.DecFallback(yyv88, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv90 := &x.PVClaimBinderSyncPeriod - yym91 := z.DecBinary() - _ = yym91 - if false { - } else if z.HasExtensions() && z.DecExt(yyv90) { - } else if !yym91 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv90) - } else { - z.DecFallback(yyv90, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinResyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv92 := &x.MinResyncPeriod - yym93 := z.DecBinary() - _ = yym93 - if false { - } else if z.HasExtensions() && z.DecExt(yyv92) { - } else if !yym93 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv92) - } else { - z.DecFallback(yyv92, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TerminatedPodGCThreshold = 0 - } else { - x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv95 := &x.HorizontalPodAutoscalerSyncPeriod - yym96 := z.DecBinary() - _ = yym96 - if false { - } else if z.HasExtensions() && z.DecExt(yyv95) { - } else if !yym96 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv95) - } else { - z.DecFallback(yyv95, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} - } else { - yyv97 := &x.DeploymentControllerSyncPeriod - yym98 := z.DecBinary() - _ = yym98 - if false { - } else if z.HasExtensions() && z.DecExt(yyv97) { - } else if !yym98 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv97) - } else { - z.DecFallback(yyv97, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodEvictionTimeout = pkg1_unversioned.Duration{} - } else { - yyv99 := &x.PodEvictionTimeout - yym100 := z.DecBinary() - _ = yym100 - if false { - } else if z.HasExtensions() && z.DecExt(yyv99) { - } else if !yym100 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv99) - } else { - z.DecFallback(yyv99, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeletingPodsQps = 0 - } else { - x.DeletingPodsQps = float32(r.DecodeFloat(true)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DeletingPodsBurst = 0 - } else { - x.DeletingPodsBurst = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv103 := &x.NodeMonitorGracePeriod - yym104 := z.DecBinary() - _ = yym104 - if false { - } else if z.HasExtensions() && z.DecExt(yyv103) { - } else if !yym104 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv103) - } else { - z.DecFallback(yyv103, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RegisterRetryCount = 0 - } else { - x.RegisterRetryCount = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} - } else { - yyv106 := &x.NodeStartupGracePeriod - yym107 := z.DecBinary() - _ = yym107 - if false { - } else if z.HasExtensions() && z.DecExt(yyv106) { - } else if !yym107 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv106) - } else { - z.DecFallback(yyv106, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeMonitorPeriod = pkg1_unversioned.Duration{} - } else { - yyv108 := &x.NodeMonitorPeriod - yym109 := z.DecBinary() - _ = yym109 - if false { - } else if z.HasExtensions() && z.DecExt(yyv108) { - } else if !yym109 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv108) - } else { - z.DecFallback(yyv108, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceAccountKeyFile = "" - } else { - x.ServiceAccountKeyFile = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableProfiling = false - } else { - x.EnableProfiling = bool(r.DecodeBool()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterName = "" - } else { - x.ClusterName = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClusterCIDR = "" - } else { - x.ClusterCIDR = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceCIDR = "" - } else { - x.ServiceCIDR = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NodeCIDRMaskSize = 0 - } else { - x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllocateNodeCIDRs = false - } else { - x.AllocateNodeCIDRs = bool(r.DecodeBool()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ConfigureCloudRoutes = false - } else { - x.ConfigureCloudRoutes = bool(r.DecodeBool()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RootCAFile = "" - } else { - x.RootCAFile = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ContentType = "" - } else { - x.ContentType = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIQPS = 0 - } else { - x.KubeAPIQPS = float32(r.DecodeFloat(true)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.KubeAPIBurst = 0 - } else { - x.KubeAPIBurst = int32(r.DecodeInt(32)) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LeaderElection = LeaderElectionConfiguration{} - } else { - yyv122 := &x.LeaderElection - yyv122.CodecDecodeSelf(d) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.VolumeConfiguration = VolumeConfiguration{} - } else { - yyv123 := &x.VolumeConfiguration - yyv123.CodecDecodeSelf(d) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ControllerStartInterval = pkg1_unversioned.Duration{} - } else { - yyv124 := &x.ControllerStartInterval - yym125 := z.DecBinary() - _ = yym125 - if false { - } else if z.HasExtensions() && z.DecExt(yyv124) { - } else if !yym125 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv124) - } else { - z.DecFallback(yyv124, false) - } - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableGarbageCollector = false - } else { - x.EnableGarbageCollector = bool(r.DecodeBool()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj66++ - if yyhl66 { - yyb66 = yyj66 > l - } else { - yyb66 = r.CheckBreak() - } - if yyb66 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj66-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.EnableHostPathProvisioning)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableHostPathProvisioning")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.EnableHostPathProvisioning)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.EnableDynamicProvisioning)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("enableDynamicProvisioning")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.EnableDynamicProvisioning)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.PersistentVolumeRecyclerConfiguration - yy10.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("persitentVolumeRecyclerConfiguration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.PersistentVolumeRecyclerConfiguration - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("flexVolumePluginDir")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *VolumeConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "enableHostPathProvisioning": - if r.TryDecodeAsNil() { - x.EnableHostPathProvisioning = false - } else { - x.EnableHostPathProvisioning = bool(r.DecodeBool()) - } - case "enableDynamicProvisioning": - if r.TryDecodeAsNil() { - x.EnableDynamicProvisioning = false - } else { - x.EnableDynamicProvisioning = bool(r.DecodeBool()) - } - case "persitentVolumeRecyclerConfiguration": - if r.TryDecodeAsNil() { - x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} - } else { - yyv6 := &x.PersistentVolumeRecyclerConfiguration - yyv6.CodecDecodeSelf(d) - } - case "flexVolumePluginDir": - if r.TryDecodeAsNil() { - x.FlexVolumePluginDir = "" - } else { - x.FlexVolumePluginDir = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableHostPathProvisioning = false - } else { - x.EnableHostPathProvisioning = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.EnableDynamicProvisioning = false - } else { - x.EnableDynamicProvisioning = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} - } else { - yyv11 := &x.PersistentVolumeRecyclerConfiguration - yyv11.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FlexVolumePluginDir = "" - } else { - x.FlexVolumePluginDir = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 7 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.MaximumRetry)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maximumRetry")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.MaximumRetry)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutNFS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutNFS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutNFS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathNFS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutNFS)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutNFS")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutNFS)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathHostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutHostPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutHostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.MinimumTimeoutHostPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutHostPath)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutHostPath")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.IncrementTimeoutHostPath)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PersistentVolumeRecyclerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "maximumRetry": - if r.TryDecodeAsNil() { - x.MaximumRetry = 0 - } else { - x.MaximumRetry = int32(r.DecodeInt(32)) - } - case "minimumTimeoutNFS": - if r.TryDecodeAsNil() { - x.MinimumTimeoutNFS = 0 - } else { - x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) - } - case "podTemplateFilePathNFS": - if r.TryDecodeAsNil() { - x.PodTemplateFilePathNFS = "" - } else { - x.PodTemplateFilePathNFS = string(r.DecodeString()) - } - case "incrementTimeoutNFS": - if r.TryDecodeAsNil() { - x.IncrementTimeoutNFS = 0 - } else { - x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) - } - case "podTemplateFilePathHostPath": - if r.TryDecodeAsNil() { - x.PodTemplateFilePathHostPath = "" - } else { - x.PodTemplateFilePathHostPath = string(r.DecodeString()) - } - case "minimumTimeoutHostPath": - if r.TryDecodeAsNil() { - x.MinimumTimeoutHostPath = 0 - } else { - x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) - } - case "incrementTimeoutHostPath": - if r.TryDecodeAsNil() { - x.IncrementTimeoutHostPath = 0 - } else { - x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj11 int - var yyb11 bool - var yyhl11 bool = l >= 0 - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaximumRetry = 0 - } else { - x.MaximumRetry = int32(r.DecodeInt(32)) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinimumTimeoutNFS = 0 - } else { - x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodTemplateFilePathNFS = "" - } else { - x.PodTemplateFilePathNFS = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IncrementTimeoutNFS = 0 - } else { - x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodTemplateFilePathHostPath = "" - } else { - x.PodTemplateFilePathHostPath = string(r.DecodeString()) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinimumTimeoutHostPath = 0 - } else { - x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) - } - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.IncrementTimeoutHostPath = 0 - } else { - x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) - } - for { - yyj11++ - if yyhl11 { - yyb11 = yyj11 > l - } else { - yyb11 = r.CheckBreak() - } - if yyb11 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj11-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go deleted file mode 100644 index 070ac0e24..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go +++ /dev/null @@ -1,623 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package componentconfig - -import "k8s.io/kubernetes/pkg/api/unversioned" - -type KubeProxyConfiguration struct { - unversioned.TypeMeta - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string `json:"bindAddress"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // healthzBindAddress is the IP address for the health check server to serve on, - // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) - HealthzBindAddress string `json:"healthzBindAddress"` - // healthzPort is the port to bind the health check server. Use 0 to disable. - HealthzPort int32 `json:"healthzPort"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` - // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` - // kubeconfigPath is the path to the kubeconfig file with authorization information (the - // master location is set by the master flag). - KubeconfigPath string `json:"kubeconfigPath"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool `json:"masqueradeAll"` - // master is the address of the Kubernetes API server (overrides any value in kubeconfig) - Master string `json:"master"` - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` - // resourceContainer is the absolute name of the resource-only container to create and run - // the Kube-proxy in (Default: /kube-proxy). - ResourceContainer string `json:"kubeletCgroups"` - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` - // conntrackMax is the maximum number of NAT connections to track (0 to - // leave as-is). This takes precendence over conntrackMaxPerCore. - ConntrackMax int32 `json:"conntrackMax"` - // conntrackMaxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave as-is). This value is only considered if - // conntrackMax == 0. - ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"` - // conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '250ms', '2s'). Must be greater than 0. - ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` -} - -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (newer, faster). If blank, look at the Node object on the Kubernetes API and respect the -// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the -// best-available proxy (currently iptables, but may change in future versions). If the -// iptables proxy is selected, regardless of how, but the system's kernel or iptables -// versions are insufficient, this always falls back to the userspace proxy. -type ProxyMode string - -const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" -) - -// HairpinMode denotes how the kubelet should configure networking to handle -// hairpin packets. -type HairpinMode string - -// Enum settings for different ways to handle hairpin packets. -const ( - // Set the hairpin flag on the veth of containers in the respective - // container runtime. - HairpinVeth = "hairpin-veth" - // Make the container bridge promiscuous. This will force it to accept - // hairpin packets, even if the flag isn't set on ports of the bridge. - PromiscuousBridge = "promiscuous-bridge" - // Neither of the above. If the kubelet is started in this hairpin mode - // and kube-proxy is running in iptables mode, hairpin packets will be - // dropped by the container bridge. - HairpinNone = "none" -) - -// TODO: curate the ordering and structure of this config object -type KubeletConfiguration struct { - // config is the path to the config file or directory of files - Config string `json:"config"` - // syncFrequency is the max period between synchronizing running - // containers and config - SyncFrequency unversioned.Duration `json:"syncFrequency"` - // fileCheckFrequency is the duration between checking config files for - // new data - FileCheckFrequency unversioned.Duration `json:"fileCheckFrequency"` - // httpCheckFrequency is the duration between checking http for new data - HTTPCheckFrequency unversioned.Duration `json:"httpCheckFrequency"` - // manifestURL is the URL for accessing the container manifest - ManifestURL string `json:"manifestURL"` - // manifestURLHeader is the HTTP header to use when accessing the manifest - // URL, with the key separated from the value with a ':', as in 'key:value' - ManifestURLHeader string `json:"manifestURLHeader"` - // enableServer enables the Kubelet's server - EnableServer bool `json:"enableServer"` - // address is the IP address for the Kubelet to serve on (set to 0.0.0.0 - // for all interfaces) - Address string `json:"address"` - // port is the port for the Kubelet to serve on. - Port uint `json:"port"` - // readOnlyPort is the read-only port for the Kubelet to serve on with - // no authentication/authorization (set to 0 to disable) - ReadOnlyPort uint `json:"readOnlyPort"` - // tLSCertFile is the file containing x509 Certificate for HTTPS. (CA cert, - // if any, concatenated after server cert). If tlsCertFile and - // tlsPrivateKeyFile are not provided, a self-signed certificate - // and key are generated for the public address and saved to the directory - // passed to certDir. - TLSCertFile string `json:"tLSCertFile"` - // tLSPrivateKeyFile is the ile containing x509 private key matching - // tlsCertFile. - TLSPrivateKeyFile string `json:"tLSPrivateKeyFile"` - // certDirectory is the directory where the TLS certs are located (by - // default /var/run/kubernetes). If tlsCertFile and tlsPrivateKeyFile - // are provided, this flag will be ignored. - CertDirectory string `json:"certDirectory"` - // hostnameOverride is the hostname used to identify the kubelet instead - // of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // podInfraContainerImage is the image whose network/ipc namespaces - // containers in each pod will use. - PodInfraContainerImage string `json:"podInfraContainerImage"` - // dockerEndpoint is the path to the docker endpoint to communicate with. - DockerEndpoint string `json:"dockerEndpoint"` - // rootDirectory is the directory path to place kubelet files (volume - // mounts,etc). - RootDirectory string `json:"rootDirectory"` - // seccompProfileRoot is the directory path for seccomp profiles. - SeccompProfileRoot string `json:"seccompProfileRoot"` - // allowPrivileged enables containers to request privileged mode. - // Defaults to false. - AllowPrivileged bool `json:"allowPrivileged"` - // hostNetworkSources is a comma-separated list of sources from which the - // Kubelet allows pods to use of host network. Defaults to "*". - HostNetworkSources string `json:"hostNetworkSources"` - // hostPIDSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host pid namespace. Defaults to "*". - HostPIDSources string `json:"hostPIDSources"` - // hostIPCSources is a comma-separated list of sources from which the - // Kubelet allows pods to use the host ipc namespace. Defaults to "*". - HostIPCSources string `json:"hostIPCSources"` - // registryPullQPS is the limit of registry pulls per second. If 0, - // unlimited. Set to 0 for no limit. Defaults to 5.0. - RegistryPullQPS float64 `json:"registryPullQPS"` - // registryBurst is the maximum size of a bursty pulls, temporarily allows - // pulls to burst to this number, while still not exceeding registryQps. - // Only used if registryQps > 0. - RegistryBurst int32 `json:"registryBurst"` - // eventRecordQPS is the maximum event creations per second. If 0, there - // is no limit enforced. - EventRecordQPS float32 `json:"eventRecordQPS"` - // eventBurst is the maximum size of a bursty event records, temporarily - // allows event records to burst to this number, while still not exceeding - // event-qps. Only used if eventQps > 0 - EventBurst int32 `json:"eventBurst"` - // enableDebuggingHandlers enables server endpoints for log collection - // and local running of containers and commands - EnableDebuggingHandlers bool `json:"enableDebuggingHandlers"` - // minimumGCAge is the minimum age for a finished container before it is - // garbage collected. - MinimumGCAge unversioned.Duration `json:"minimumGCAge"` - // maxPerPodContainerCount is the maximum number of old instances to - // retain per container. Each container takes up some disk space. - MaxPerPodContainerCount int32 `json:"maxPerPodContainerCount"` - // maxContainerCount is the maximum number of old instances of containers - // to retain globally. Each container takes up some disk space. - MaxContainerCount int32 `json:"maxContainerCount"` - // cAdvisorPort is the port of the localhost cAdvisor endpoint - CAdvisorPort uint `json:"cAdvisorPort"` - // healthzPort is the port of the localhost healthz endpoint - HealthzPort int32 `json:"healthzPort"` - // healthzBindAddress is the IP address for the healthz server to serve - // on. - HealthzBindAddress string `json:"healthzBindAddress"` - // oomScoreAdj is The oom-score-adj value for kubelet process. Values - // must be within the range [-1000, 1000]. - OOMScoreAdj int32 `json:"oomScoreAdj"` - // registerNode enables automatic registration with the apiserver. - RegisterNode bool `json:"registerNode"` - // clusterDomain is the DNS domain for this cluster. If set, kubelet will - // configure all containers to search this domain in addition to the - // host's search domains. - ClusterDomain string `json:"clusterDomain"` - // masterServiceNamespace is The namespace from which the kubernetes - // master services should be injected into pods. - MasterServiceNamespace string `json:"masterServiceNamespace"` - // clusterDNS is the IP address for a cluster DNS server. If set, kubelet - // will configure all containers to use this for DNS resolution in - // addition to the host's DNS servers - ClusterDNS string `json:"clusterDNS"` - // streamingConnectionIdleTimeout is the maximum time a streaming connection - // can be idle before the connection is automatically closed. - StreamingConnectionIdleTimeout unversioned.Duration `json:"streamingConnectionIdleTimeout"` - // nodeStatusUpdateFrequency is the frequency that kubelet posts node - // status to master. Note: be cautious when changing the constant, it - // must work with nodeMonitorGracePeriod in nodecontroller. - NodeStatusUpdateFrequency unversioned.Duration `json:"nodeStatusUpdateFrequency"` - // minimumGCAge is the minimum age for a unused image before it is - // garbage collected. - ImageMinimumGCAge unversioned.Duration `json:"imageMinimumGCAge"` - // imageGCHighThresholdPercent is the percent of disk usage after which - // image garbage collection is always run. - ImageGCHighThresholdPercent int32 `json:"imageGCHighThresholdPercent"` - // imageGCLowThresholdPercent is the percent of disk usage before which - // image garbage collection is never run. Lowest disk usage to garbage - // collect to. - ImageGCLowThresholdPercent int32 `json:"imageGCLowThresholdPercent"` - // lowDiskSpaceThresholdMB is the absolute free disk space, in MB, to - // maintain. When disk space falls below this threshold, new pods would - // be rejected. - LowDiskSpaceThresholdMB int32 `json:"lowDiskSpaceThresholdMB"` - // How frequently to calculate and cache volume disk usage for all pods - VolumeStatsAggPeriod unversioned.Duration `json:"volumeStatsAggPeriod"` - // networkPluginName is the name of the network plugin to be invoked for - // various events in kubelet/pod lifecycle - NetworkPluginName string `json:"networkPluginName"` - // networkPluginDir is the full path of the directory in which to search - // for network plugins - NetworkPluginDir string `json:"networkPluginDir"` - // volumePluginDir is the full path of the directory in which to search - // for additional third party volume plugins - VolumePluginDir string `json:"volumePluginDir"` - // cloudProvider is the provider for cloud services. - CloudProvider string `json:"cloudProvider,omitempty"` - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string `json:"cloudConfigFile,omitempty"` - // KubeletCgroups is the absolute name of cgroups to isolate the kubelet in. - KubeletCgroups string `json:"kubeletCgroups,omitempty"` - // Cgroups that container runtime is expected to be isolated in. - RuntimeCgroups string `json:"runtimeCgroups,omitempty"` - // SystemCgroups is absolute name of cgroups in which to place - // all non-kernel processes that are not already in a container. Empty - // for no container. Rolling back the flag requires a reboot. - SystemCgroups string `json:"systemContainer,omitempty"` - // cgroupRoot is the root cgroup to use for pods. This is handled by the - // container runtime on a best effort basis. - CgroupRoot string `json:"cgroupRoot,omitempty"` - // containerRuntime is the container runtime to use. - ContainerRuntime string `json:"containerRuntime"` - // runtimeRequestTimeout is the timeout for all runtime requests except long running - // requests - pull, logs, exec and attach. - RuntimeRequestTimeout unversioned.Duration `json:"runtimeRequestTimeout,omitempty"` - // rktPath is the path of rkt binary. Leave empty to use the first rkt in - // $PATH. - RktPath string `json:"rktPath,omitempty"` - // rktApiEndpoint is the endpoint of the rkt API service to communicate with. - RktAPIEndpoint string `json:"rktAPIEndpoint,omitempty"` - // rktStage1Image is the image to use as stage1. Local paths and - // http/https URLs are supported. - RktStage1Image string `json:"rktStage1Image,omitempty"` - // lockFilePath is the path that kubelet will use to as a lock file. - // It uses this file as a lock to synchronize with other kubelet processes - // that may be running. - LockFilePath string `json:"lockFilePath"` - // ExitOnLockContention is a flag that signifies to the kubelet that it is running - // in "bootstrap" mode. This requires that 'LockFilePath' has been set. - // This will cause the kubelet to listen to inotify events on the lock file, - // releasing it and exiting when another process tries to open that file. - ExitOnLockContention bool `json:"exitOnLockContention"` - // configureCBR0 enables the kublet to configure cbr0 based on - // Node.Spec.PodCIDR. - ConfigureCBR0 bool `json:"configureCbr0"` - // How should the kubelet configure the container bridge for hairpin packets. - // Setting this flag allows endpoints in a Service to loadbalance back to - // themselves if they should try to access their own Service. Values: - // "promiscuous-bridge": make the container bridge promiscuous. - // "hairpin-veth": set the hairpin flag on container veth interfaces. - // "none": do nothing. - // Setting --configure-cbr0 to false implies that to achieve hairpin NAT - // one must set --hairpin-mode=veth-flag, because bridge assumes the - // existence of a container bridge named cbr0. - HairpinMode string `json:"hairpinMode"` - // The node has babysitter process monitoring docker and kubelet. - BabysitDaemons bool `json:"babysitDaemons"` - // maxPods is the number of pods that can run on this Kubelet. - MaxPods int32 `json:"maxPods"` - // nvidiaGPUs is the number of NVIDIA GPU devices on this node. - NvidiaGPUs int32 `json:"nvidiaGPUs"` - // dockerExecHandlerName is the handler to use when executing a command - // in a container. Valid values are 'native' and 'nsenter'. Defaults to - // 'native'. - DockerExecHandlerName string `json:"dockerExecHandlerName"` - // The CIDR to use for pod IP addresses, only used in standalone mode. - // In cluster mode, this is obtained from the master. - PodCIDR string `json:"podCIDR"` - // ResolverConfig is the resolver configuration file used as the basis - // for the container DNS resolution configuration."), [] - ResolverConfig string `json:"resolvConf"` - // cpuCFSQuota is Enable CPU CFS quota enforcement for containers that - // specify CPU limits - CPUCFSQuota bool `json:"cpuCFSQuota"` - // containerized should be set to true if kubelet is running in a container. - Containerized bool `json:"containerized"` - // maxOpenFiles is Number of files that can be opened by Kubelet process. - MaxOpenFiles uint64 `json:"maxOpenFiles"` - // reconcileCIDR is Reconcile node CIDR with the CIDR specified by the - // API server. No-op if register-node or configure-cbr0 is false. - ReconcileCIDR bool `json:"reconcileCIDR"` - // registerSchedulable tells the kubelet to register the node as - // schedulable. No-op if register-node is false. - RegisterSchedulable bool `json:"registerSchedulable"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the burst to allow while talking with kubernetes - // apiserver - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // serializeImagePulls when enabled, tells the Kubelet to pull images one - // at a time. We recommend *not* changing the default value on nodes that - // run docker daemon with version < 1.9 or an Aufs storage backend. - // Issue #10959 has more details. - SerializeImagePulls bool `json:"serializeImagePulls"` - // experimentalFlannelOverlay enables experimental support for starting the - // kubelet with the default overlay network (flannel). Assumes flanneld - // is already running in client mode. - ExperimentalFlannelOverlay bool `json:"experimentalFlannelOverlay"` - // outOfDiskTransitionFrequency is duration for which the kubelet has to - // wait before transitioning out of out-of-disk node condition status. - OutOfDiskTransitionFrequency unversioned.Duration `json:"outOfDiskTransitionFrequency,omitempty"` - // nodeIP is IP address of the node. If set, kubelet will use this IP - // address for the node. - NodeIP string `json:"nodeIP,omitempty"` - // nodeLabels to add when registering the node in the cluster. - NodeLabels map[string]string `json:"nodeLabels"` - // nonMasqueradeCIDR configures masquerading: traffic to IPs outside this range will use IP masquerade. - NonMasqueradeCIDR string `json:"nonMasqueradeCIDR"` - // enable gathering custom metrics. - EnableCustomMetrics bool `json:"enableCustomMetrics"` - // Comma-delimited list of hard eviction expressions. For example, 'memory.available<300Mi'. - EvictionHard string `json:"evictionHard,omitempty"` - // Comma-delimited list of soft eviction expressions. For example, 'memory.available<300Mi'. - EvictionSoft string `json:"evictionSoft,omitempty"` - // Comma-delimeted list of grace periods for each soft eviction signal. For example, 'memory.available=30s'. - EvictionSoftGracePeriod string `json:"evictionSoftGracePeriod,omitempty"` - // Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. - EvictionPressureTransitionPeriod unversioned.Duration `json:"evictionPressureTransitionPeriod,omitempty"` - // Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. - EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"` - // Maximum number of pods per core. Cannot exceed MaxPods - PodsPerCore int32 `json:"podsPerCore"` - // enableControllerAttachDetach enables the Attach/Detach controller to - // manage attachment/detachment of volumes scheduled to this node, and - // disables kubelet from executing any attach/detach operations - EnableControllerAttachDetach bool `json:"enableControllerAttachDetach"` -} - -type KubeSchedulerConfiguration struct { - unversioned.TypeMeta - - // port is the port that the scheduler's http service runs on. - Port int32 `json:"port"` - // address is the IP address to serve on. - Address string `json:"address"` - // algorithmProvider is the scheduling algorithm provider to use. - AlgorithmProvider string `json:"algorithmProvider"` - // policyConfigFile is the filepath to the scheduler policy configuration. - PolicyConfigFile string `json:"policyConfigFile"` - // enableProfiling enables profiling via web interface. - EnableProfiling bool `json:"enableProfiling"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // schedulerName is name of the scheduler, used to select which pods - // will be processed by this scheduler, based on pod's annotation with - // key 'scheduler.alpha.kubernetes.io/name'. - SchedulerName string `json:"schedulerName"` - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` - // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. - FailureDomains string `json:"failureDomains"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` -} - -// LeaderElectionConfiguration defines the configuration of leader election -// clients for components that can run with leader election enabled. -type LeaderElectionConfiguration struct { - // leaderElect enables a leader election client to gain leadership - // before executing the main loop. Enable this when running replicated - // components for high availability. - LeaderElect bool `json:"leaderElect"` - // leaseDuration is the duration that non-leader candidates will wait - // after observing a leadership renewal until attempting to acquire - // leadership of a led but unrenewed leader slot. This is effectively the - // maximum duration that a leader can be stopped before it is replaced - // by another candidate. This is only applicable if leader election is - // enabled. - LeaseDuration unversioned.Duration `json:"leaseDuration"` - // renewDeadline is the interval between attempts by the acting master to - // renew a leadership slot before it stops leading. This must be less - // than or equal to the lease duration. This is only applicable if leader - // election is enabled. - RenewDeadline unversioned.Duration `json:"renewDeadline"` - // retryPeriod is the duration the clients should wait between attempting - // acquisition and renewal of a leadership. This is only applicable if - // leader election is enabled. - RetryPeriod unversioned.Duration `json:"retryPeriod"` -} - -type KubeControllerManagerConfiguration struct { - unversioned.TypeMeta - - // port is the port that the controller-manager's http service runs on. - Port int32 `json:"port"` - // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). - Address string `json:"address"` - // cloudProvider is the provider for cloud services. - CloudProvider string `json:"cloudProvider"` - // cloudConfigFile is the path to the cloud provider configuration file. - CloudConfigFile string `json:"cloudConfigFile"` - // concurrentEndpointSyncs is the number of endpoint syncing operations - // that will be done concurrently. Larger number = faster endpoint updating, - // but more CPU (and network) load. - ConcurrentEndpointSyncs int32 `json:"concurrentEndpointSyncs"` - // concurrentRSSyncs is the number of replica sets that are allowed to sync - // concurrently. Larger number = more responsive replica management, but more - // CPU (and network) load. - ConcurrentRSSyncs int32 `json:"concurrentRSSyncs"` - // concurrentRCSyncs is the number of replication controllers that are - // allowed to sync concurrently. Larger number = more responsive replica - // management, but more CPU (and network) load. - ConcurrentRCSyncs int32 `json:"concurrentRCSyncs"` - // concurrentResourceQuotaSyncs is the number of resource quotas that are - // allowed to sync concurrently. Larger number = more responsive quota - // management, but more CPU (and network) load. - ConcurrentResourceQuotaSyncs int32 `json:"concurrentResourceQuotaSyncs"` - // concurrentDeploymentSyncs is the number of deployment objects that are - // allowed to sync concurrently. Larger number = more responsive deployments, - // but more CPU (and network) load. - ConcurrentDeploymentSyncs int32 `json:"concurrentDeploymentSyncs"` - // concurrentDaemonSetSyncs is the number of daemonset objects that are - // allowed to sync concurrently. Larger number = more responsive daemonset, - // but more CPU (and network) load. - ConcurrentDaemonSetSyncs int32 `json:"concurrentDaemonSetSyncs"` - // concurrentJobSyncs is the number of job objects that are - // allowed to sync concurrently. Larger number = more responsive jobs, - // but more CPU (and network) load. - ConcurrentJobSyncs int32 `json:"concurrentJobSyncs"` - // concurrentNamespaceSyncs is the number of namespace objects that are - // allowed to sync concurrently. - ConcurrentNamespaceSyncs int32 `json:"concurrentNamespaceSyncs"` - // lookupCacheSizeForRC is the size of lookup cache for replication controllers. - // Larger number = more responsive replica management, but more MEM load. - LookupCacheSizeForRC int32 `json:"lookupCacheSizeForRC"` - // lookupCacheSizeForRS is the size of lookup cache for replicatsets. - // Larger number = more responsive replica management, but more MEM load. - LookupCacheSizeForRS int32 `json:"lookupCacheSizeForRS"` - // lookupCacheSizeForDaemonSet is the size of lookup cache for daemonsets. - // Larger number = more responsive daemonset, but more MEM load. - LookupCacheSizeForDaemonSet int32 `json:"lookupCacheSizeForDaemonSet"` - // serviceSyncPeriod is the period for syncing services with their external - // load balancers. - ServiceSyncPeriod unversioned.Duration `json:"serviceSyncPeriod"` - // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer - // periods will result in fewer calls to cloud provider, but may delay addition - // of new nodes to cluster. - NodeSyncPeriod unversioned.Duration `json:"nodeSyncPeriod"` - // resourceQuotaSyncPeriod is the period for syncing quota usage status - // in the system. - ResourceQuotaSyncPeriod unversioned.Duration `json:"resourceQuotaSyncPeriod"` - // namespaceSyncPeriod is the period for syncing namespace life-cycle - // updates. - NamespaceSyncPeriod unversioned.Duration `json:"namespaceSyncPeriod"` - // pvClaimBinderSyncPeriod is the period for syncing persistent volumes - // and persistent volume claims. - PVClaimBinderSyncPeriod unversioned.Duration `json:"pvClaimBinderSyncPeriod"` - // minResyncPeriod is the resync period in reflectors; will be random between - // minResyncPeriod and 2*minResyncPeriod. - MinResyncPeriod unversioned.Duration `json:"minResyncPeriod"` - // terminatedPodGCThreshold is the number of terminated pods that can exist - // before the terminated pod garbage collector starts deleting terminated pods. - // If <= 0, the terminated pod garbage collector is disabled. - TerminatedPodGCThreshold int32 `json:"terminatedPodGCThreshold"` - // horizontalPodAutoscalerSyncPeriod is the period for syncing the number of - // pods in horizontal pod autoscaler. - HorizontalPodAutoscalerSyncPeriod unversioned.Duration `json:"horizontalPodAutoscalerSyncPeriod"` - // deploymentControllerSyncPeriod is the period for syncing the deployments. - DeploymentControllerSyncPeriod unversioned.Duration `json:"deploymentControllerSyncPeriod"` - // podEvictionTimeout is the grace period for deleting pods on failed nodes. - PodEvictionTimeout unversioned.Duration `json:"podEvictionTimeout"` - // deletingPodsQps is the number of nodes per second on which pods are deleted in - // case of node failure. - DeletingPodsQps float32 `json:"deletingPodsQps"` - // deletingPodsBurst is the number of nodes on which pods are bursty deleted in - // case of node failure. For more details look into RateLimiter. - DeletingPodsBurst int32 `json:"deletingPodsBurst"` - // nodeMontiorGracePeriod is the amount of time which we allow a running node to be - // unresponsive before marking it unhealty. Must be N times more than kubelet's - // nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet - // to post node status. - NodeMonitorGracePeriod unversioned.Duration `json:"nodeMonitorGracePeriod"` - // registerRetryCount is the number of retries for initial node registration. - // Retry interval equals node-sync-period. - RegisterRetryCount int32 `json:"registerRetryCount"` - // nodeStartupGracePeriod is the amount of time which we allow starting a node to - // be unresponsive before marking it unhealty. - NodeStartupGracePeriod unversioned.Duration `json:"nodeStartupGracePeriod"` - // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. - NodeMonitorPeriod unversioned.Duration `json:"nodeMonitorPeriod"` - // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key - // used to sign service account tokens. - ServiceAccountKeyFile string `json:"serviceAccountKeyFile"` - // enableProfiling enables profiling via web interface host:port/debug/pprof/ - EnableProfiling bool `json:"enableProfiling"` - // clusterName is the instance prefix for the cluster. - ClusterName string `json:"clusterName"` - // clusterCIDR is CIDR Range for Pods in cluster. - ClusterCIDR string `json:"clusterCIDR"` - // serviceCIDR is CIDR Range for Services in cluster. - ServiceCIDR string `json:"serviceCIDR"` - // NodeCIDRMaskSize is the mask size for node cidr in cluster. - NodeCIDRMaskSize int32 `json:"nodeCIDRMaskSize"` - // allocateNodeCIDRs enables CIDRs for Pods to be allocated and, if - // ConfigureCloudRoutes is true, to be set on the cloud provider. - AllocateNodeCIDRs bool `json:"allocateNodeCIDRs"` - // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs - // to be configured on the cloud provider. - ConfigureCloudRoutes bool `json:"configureCloudRoutes"` - // rootCAFile is the root certificate authority will be included in service - // account's token secret. This must be a valid PEM-encoded CA bundle. - RootCAFile string `json:"rootCAFile"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the burst to use while talking with kubernetes apiserver. - KubeAPIBurst int32 `json:"kubeAPIBurst"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` - // volumeConfiguration holds configuration for volume related features. - VolumeConfiguration VolumeConfiguration `json:"volumeConfiguration"` - // How long to wait between starting controller managers - ControllerStartInterval unversioned.Duration `json:"controllerStartInterval"` - // enables the generic garbage collector. MUST be synced with the - // corresponding flag of the kube-apiserver. WARNING: the generic garbage - // collector is an alpha feature. - EnableGarbageCollector bool `json:"enableGarbageCollector"` -} - -// VolumeConfiguration contains *all* enumerated flags meant to configure all volume -// plugins. From this config, the controller-manager binary will create many instances of -// volume.VolumeConfig, each containing only the configuration needed for that plugin which -// are then passed to the appropriate plugin. The ControllerManager binary is the only part -// of the code which knows what plugins are supported and which flags correspond to each plugin. -type VolumeConfiguration struct { - // enableHostPathProvisioning enables HostPath PV provisioning when running without a - // cloud provider. This allows testing and development of provisioning features. HostPath - // provisioning is not supported in any way, won't work in a multi-node cluster, and - // should not be used for anything other than testing or development. - EnableHostPathProvisioning bool `json:"enableHostPathProvisioning"` - // enableDynamicProvisioning enables the provisioning of volumes when running within an environment - // that supports dynamic provisioning. Defaults to true. - EnableDynamicProvisioning bool `json:"enableDynamicProvisioning"` - // persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins. - PersistentVolumeRecyclerConfiguration PersistentVolumeRecyclerConfiguration `json:"persitentVolumeRecyclerConfiguration"` - // volumePluginDir is the full path of the directory in which the flex - // volume plugin should search for additional third party volume plugins - FlexVolumePluginDir string `json:"flexVolumePluginDir"` -} - -type PersistentVolumeRecyclerConfiguration struct { - // maximumRetry is number of retries the PV recycler will execute on failure to recycle - // PV. - MaximumRetry int32 `json:"maximumRetry"` - // minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler - // pod. - MinimumTimeoutNFS int32 `json:"minimumTimeoutNFS"` - // podTemplateFilePathNFS is the file path to a pod definition used as a template for - // NFS persistent volume recycling - PodTemplateFilePathNFS string `json:"podTemplateFilePathNFS"` - // incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds - // for an NFS scrubber pod. - IncrementTimeoutNFS int32 `json:"incrementTimeoutNFS"` - // podTemplateFilePathHostPath is the file path to a pod definition used as a template for - // HostPath persistent volume recycling. This is for development and testing only and - // will not work in a multi-node cluster. - PodTemplateFilePathHostPath string `json:"podTemplateFilePathHostPath"` - // minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath - // Recycler pod. This is for development and testing only and will not work in a multi-node - // cluster. - MinimumTimeoutHostPath int32 `json:"minimumTimeoutHostPath"` - // incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds - // for a HostPath scrubber pod. This is for development and testing only and will not work - // in a multi-node cluster. - IncrementTimeoutHostPath int32 `json:"incrementTimeoutHostPath"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go deleted file mode 100644 index b49ff2af0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/conversion_generated.go +++ /dev/null @@ -1,184 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - componentconfig "k8s.io/kubernetes/pkg/apis/componentconfig" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration, - Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration, - Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration, - Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration, - Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration, - Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - SetDefaults_KubeProxyConfiguration(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit - out.IPTablesSyncPeriod = in.IPTablesSyncPeriod - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - out.OOMScoreAdj = in.OOMScoreAdj - out.Mode = componentconfig.ProxyMode(in.Mode) - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout - return nil -} - -func Convert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in *KubeProxyConfiguration, out *componentconfig.KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeProxyConfiguration_To_componentconfig_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - out.IPTablesMasqueradeBit = in.IPTablesMasqueradeBit - out.IPTablesSyncPeriod = in.IPTablesSyncPeriod - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - out.OOMScoreAdj = in.OOMScoreAdj - out.Mode = ProxyMode(in.Mode) - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - out.UDPIdleTimeout = in.UDPIdleTimeout - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - out.ConntrackTCPEstablishedTimeout = in.ConntrackTCPEstablishedTimeout - return nil -} - -func Convert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in *componentconfig.KubeProxyConfiguration, out *KubeProxyConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeProxyConfiguration_To_v1alpha1_KubeProxyConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - SetDefaults_KubeSchedulerConfiguration(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Port = int32(in.Port) - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := api.Convert_Pointer_bool_To_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = int32(in.KubeAPIBurst) - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in *KubeSchedulerConfiguration, out *componentconfig.KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_KubeSchedulerConfiguration_To_componentconfig_KubeSchedulerConfiguration(in, out, s) -} - -func autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Port = int(in.Port) - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if err := api.Convert_bool_To_Pointer_bool(&in.EnableProfiling, &out.EnableProfiling, s); err != nil { - return err - } - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = int(in.KubeAPIBurst) - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - return nil -} - -func Convert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in *componentconfig.KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_KubeSchedulerConfiguration_To_v1alpha1_KubeSchedulerConfiguration(in, out, s) -} - -func autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - SetDefaults_LeaderElectionConfiguration(in) - if err := api.Convert_Pointer_bool_To_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return nil -} - -func Convert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in *LeaderElectionConfiguration, out *componentconfig.LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha1_LeaderElectionConfiguration_To_componentconfig_LeaderElectionConfiguration(in, out, s) -} - -func autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - if err := api.Convert_bool_To_Pointer_bool(&in.LeaderElect, &out.LeaderElect, s); err != nil { - return err - } - out.LeaseDuration = in.LeaseDuration - out.RenewDeadline = in.RenewDeadline - out.RetryPeriod = in.RetryPeriod - return nil -} - -func Convert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in *componentconfig.LeaderElectionConfiguration, out *LeaderElectionConfiguration, s conversion.Scope) error { - return autoConvert_componentconfig_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go deleted file mode 100644 index d1543037d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/deep_copy_generated.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1alpha1_KubeProxyConfiguration, - DeepCopy_v1alpha1_KubeSchedulerConfiguration, - DeepCopy_v1alpha1_LeaderElectionConfiguration, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1alpha1_KubeProxyConfiguration(in KubeProxyConfiguration, out *KubeProxyConfiguration, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.BindAddress = in.BindAddress - out.ClusterCIDR = in.ClusterCIDR - out.HealthzBindAddress = in.HealthzBindAddress - out.HealthzPort = in.HealthzPort - out.HostnameOverride = in.HostnameOverride - if in.IPTablesMasqueradeBit != nil { - in, out := in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit - *out = new(int32) - **out = *in - } else { - out.IPTablesMasqueradeBit = nil - } - if err := unversioned.DeepCopy_unversioned_Duration(in.IPTablesSyncPeriod, &out.IPTablesSyncPeriod, c); err != nil { - return err - } - out.KubeconfigPath = in.KubeconfigPath - out.MasqueradeAll = in.MasqueradeAll - out.Master = in.Master - if in.OOMScoreAdj != nil { - in, out := in.OOMScoreAdj, &out.OOMScoreAdj - *out = new(int32) - **out = *in - } else { - out.OOMScoreAdj = nil - } - out.Mode = in.Mode - out.PortRange = in.PortRange - out.ResourceContainer = in.ResourceContainer - if err := unversioned.DeepCopy_unversioned_Duration(in.UDPIdleTimeout, &out.UDPIdleTimeout, c); err != nil { - return err - } - out.ConntrackMax = in.ConntrackMax - out.ConntrackMaxPerCore = in.ConntrackMaxPerCore - if err := unversioned.DeepCopy_unversioned_Duration(in.ConntrackTCPEstablishedTimeout, &out.ConntrackTCPEstablishedTimeout, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1alpha1_KubeSchedulerConfiguration(in KubeSchedulerConfiguration, out *KubeSchedulerConfiguration, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Port = in.Port - out.Address = in.Address - out.AlgorithmProvider = in.AlgorithmProvider - out.PolicyConfigFile = in.PolicyConfigFile - if in.EnableProfiling != nil { - in, out := in.EnableProfiling, &out.EnableProfiling - *out = new(bool) - **out = *in - } else { - out.EnableProfiling = nil - } - out.ContentType = in.ContentType - out.KubeAPIQPS = in.KubeAPIQPS - out.KubeAPIBurst = in.KubeAPIBurst - out.SchedulerName = in.SchedulerName - out.HardPodAffinitySymmetricWeight = in.HardPodAffinitySymmetricWeight - out.FailureDomains = in.FailureDomains - if err := DeepCopy_v1alpha1_LeaderElectionConfiguration(in.LeaderElection, &out.LeaderElection, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1alpha1_LeaderElectionConfiguration(in LeaderElectionConfiguration, out *LeaderElectionConfiguration, c *conversion.Cloner) error { - if in.LeaderElect != nil { - in, out := in.LeaderElect, &out.LeaderElect - *out = new(bool) - **out = *in - } else { - out.LeaderElect = nil - } - if err := unversioned.DeepCopy_unversioned_Duration(in.LeaseDuration, &out.LeaseDuration, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.RenewDeadline, &out.RenewDeadline, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Duration(in.RetryPeriod, &out.RetryPeriod, c); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go deleted file mode 100644 index d90e2205b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/kubelet/qos" - "k8s.io/kubernetes/pkg/master/ports" - "k8s.io/kubernetes/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_KubeProxyConfiguration, - SetDefaults_KubeSchedulerConfiguration, - SetDefaults_LeaderElectionConfiguration, - ) -} - -func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) { - if obj.BindAddress == "" { - obj.BindAddress = "0.0.0.0" - } - if obj.HealthzPort == 0 { - obj.HealthzPort = 10249 - } - if obj.HealthzBindAddress == "" { - obj.HealthzBindAddress = "127.0.0.1" - } - if obj.OOMScoreAdj == nil { - temp := int32(qos.KubeProxyOOMScoreAdj) - obj.OOMScoreAdj = &temp - } - if obj.ResourceContainer == "" { - obj.ResourceContainer = "/kube-proxy" - } - if obj.IPTablesSyncPeriod.Duration == 0 { - obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second} - } - zero := unversioned.Duration{} - if obj.UDPIdleTimeout == zero { - obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond} - } - // If ConntrackMax is set, respect it. - if obj.ConntrackMax == 0 { - // If ConntrackMax is *not* set, use per-core scaling. - if obj.ConntrackMaxPerCore == 0 { - obj.ConntrackMaxPerCore = 32 * 1024 - } - } - if obj.IPTablesMasqueradeBit == nil { - temp := int32(14) - obj.IPTablesMasqueradeBit = &temp - } - if obj.ConntrackTCPEstablishedTimeout == zero { - obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default) - } -} - -func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) { - if obj.Port == 0 { - obj.Port = ports.SchedulerPort - } - if obj.Address == "" { - obj.Address = "0.0.0.0" - } - if obj.AlgorithmProvider == "" { - obj.AlgorithmProvider = "DefaultProvider" - } - if obj.ContentType == "" { - obj.ContentType = "application/vnd.kubernetes.protobuf" - } - if obj.KubeAPIQPS == 0 { - obj.KubeAPIQPS = 50.0 - } - if obj.KubeAPIBurst == 0 { - obj.KubeAPIBurst = 100 - } - if obj.SchedulerName == "" { - obj.SchedulerName = api.DefaultSchedulerName - } - if obj.HardPodAffinitySymmetricWeight == 0 { - obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight - } - if obj.FailureDomains == "" { - obj.FailureDomains = api.DefaultFailureDomains - } -} - -func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) { - zero := unversioned.Duration{} - if obj.LeaseDuration == zero { - obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second} - } - if obj.RenewDeadline == zero { - obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second} - } - if obj.RetryPeriod == zero { - obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second} - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go deleted file mode 100644 index 65a03a209..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go deleted file mode 100644 index d74effb7d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/register.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "componentconfig" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) -} - -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeProxyConfiguration{}, - &KubeSchedulerConfiguration{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go deleted file mode 100644 index 95c3aee55..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import "k8s.io/kubernetes/pkg/api/unversioned" - -type KubeProxyConfiguration struct { - unversioned.TypeMeta - - // bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 - // for all interfaces) - BindAddress string `json:"bindAddress"` - // clusterCIDR is the CIDR range of the pods in the cluster. It is used to - // bridge traffic coming from outside of the cluster. If not provided, - // no off-cluster bridging will be performed. - ClusterCIDR string `json:"clusterCIDR"` - // healthzBindAddress is the IP address for the health check server to serve on, - // defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces) - HealthzBindAddress string `json:"healthzBindAddress"` - // healthzPort is the port to bind the health check server. Use 0 to disable. - HealthzPort int32 `json:"healthzPort"` - // hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname. - HostnameOverride string `json:"hostnameOverride"` - // iptablesMasqueradeBit is the bit of the iptables fwmark space to use for SNAT if using - // the pure iptables proxy mode. Values must be within the range [0, 31]. - IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit"` - // iptablesSyncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', - // '2h22m'). Must be greater than 0. - IPTablesSyncPeriod unversioned.Duration `json:"iptablesSyncPeriodSeconds"` - // kubeconfigPath is the path to the kubeconfig file with authorization information (the - // master location is set by the master flag). - KubeconfigPath string `json:"kubeconfigPath"` - // masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. - MasqueradeAll bool `json:"masqueradeAll"` - // master is the address of the Kubernetes API server (overrides any value in kubeconfig) - Master string `json:"master"` - // oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within - // the range [-1000, 1000] - OOMScoreAdj *int32 `json:"oomScoreAdj"` - // mode specifies which proxy mode to use. - Mode ProxyMode `json:"mode"` - // portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed - // in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen. - PortRange string `json:"portRange"` - // resourceContainer is the bsolute name of the resource-only container to create and run - // the Kube-proxy in (Default: /kube-proxy). - ResourceContainer string `json:"resourceContainer"` - // udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). - // Must be greater than 0. Only applicable for proxyMode=userspace. - UDPIdleTimeout unversioned.Duration `json:"udpTimeoutMilliseconds"` - // conntrackMax is the maximum number of NAT connections to track (0 to - // leave as-is). This takes precendence over conntrackMaxPerCore. - ConntrackMax int32 `json:"conntrackMax"` - // conntrackMaxPerCore is the maximum number of NAT connections to track - // per CPU core (0 to leave as-is). This value is only considered if - // conntrackMax == 0. - ConntrackMaxPerCore int32 `json:"conntrackMaxPerCore"` - // conntrackTCPEstablishedTimeout is how long an idle TCP connection will be kept open - // (e.g. '250ms', '2s'). Must be greater than 0. - ConntrackTCPEstablishedTimeout unversioned.Duration `json:"conntrackTCPEstablishedTimeout"` -} - -// Currently two modes of proxying are available: 'userspace' (older, stable) or 'iptables' -// (experimental). If blank, look at the Node object on the Kubernetes API and respect the -// 'net.experimental.kubernetes.io/proxy-mode' annotation if provided. Otherwise use the -// best-available proxy (currently userspace, but may change in future versions). If the -// iptables proxy is selected, regardless of how, but the system's kernel or iptables -// versions are insufficient, this always falls back to the userspace proxy. -type ProxyMode string - -const ( - ProxyModeUserspace ProxyMode = "userspace" - ProxyModeIPTables ProxyMode = "iptables" -) - -type KubeSchedulerConfiguration struct { - unversioned.TypeMeta - - // port is the port that the scheduler's http service runs on. - Port int `json:"port"` - // address is the IP address to serve on. - Address string `json:"address"` - // algorithmProvider is the scheduling algorithm provider to use. - AlgorithmProvider string `json:"algorithmProvider"` - // policyConfigFile is the filepath to the scheduler policy configuration. - PolicyConfigFile string `json:"policyConfigFile"` - // enableProfiling enables profiling via web interface. - EnableProfiling *bool `json:"enableProfiling"` - // contentType is contentType of requests sent to apiserver. - ContentType string `json:"contentType"` - // kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. - KubeAPIQPS float32 `json:"kubeAPIQPS"` - // kubeAPIBurst is the QPS burst to use while talking with kubernetes apiserver. - KubeAPIBurst int `json:"kubeAPIBurst"` - // schedulerName is name of the scheduler, used to select which pods - // will be processed by this scheduler, based on pod's annotation with - // key 'scheduler.alpha.kubernetes.io/name'. - SchedulerName string `json:"schedulerName"` - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. - HardPodAffinitySymmetricWeight int `json:"hardPodAffinitySymmetricWeight"` - // Indicate the "all topologies" set for empty topologyKey when it's used for PreferredDuringScheduling pod anti-affinity. - FailureDomains string `json:"failureDomains"` - // leaderElection defines the configuration of leader election client. - LeaderElection LeaderElectionConfiguration `json:"leaderElection"` -} - -// LeaderElectionConfiguration defines the configuration of leader election -// clients for components that can run with leader election enabled. -type LeaderElectionConfiguration struct { - // leaderElect enables a leader election client to gain leadership - // before executing the main loop. Enable this when running replicated - // components for high availability. - LeaderElect *bool `json:"leaderElect"` - // leaseDuration is the duration that non-leader candidates will wait - // after observing a leadership renewal until attempting to acquire - // leadership of a led but unrenewed leader slot. This is effectively the - // maximum duration that a leader can be stopped before it is replaced - // by another candidate. This is only applicable if leader election is - // enabled. - LeaseDuration unversioned.Duration `json:"leaseDuration"` - // renewDeadline is the interval between attempts by the acting master to - // renew a leadership slot before it stops leading. This must be less - // than or equal to the lease duration. This is only applicable if leader - // election is enabled. - RenewDeadline unversioned.Duration `json:"renewDeadline"` - // retryPeriod is the duration the clients should wait between attempting - // acquisition and renewal of a leadership. This is only applicable if - // leader election is enabled. - RetryPeriod unversioned.Duration `json:"retryPeriod"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go deleted file mode 100644 index 118d478c2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/deep_copy_generated.go +++ /dev/null @@ -1,958 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package extensions - -import ( - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - intstr "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_extensions_APIVersion, - DeepCopy_extensions_CustomMetricCurrentStatus, - DeepCopy_extensions_CustomMetricCurrentStatusList, - DeepCopy_extensions_CustomMetricTarget, - DeepCopy_extensions_CustomMetricTargetList, - DeepCopy_extensions_DaemonSet, - DeepCopy_extensions_DaemonSetList, - DeepCopy_extensions_DaemonSetSpec, - DeepCopy_extensions_DaemonSetStatus, - DeepCopy_extensions_Deployment, - DeepCopy_extensions_DeploymentList, - DeepCopy_extensions_DeploymentRollback, - DeepCopy_extensions_DeploymentSpec, - DeepCopy_extensions_DeploymentStatus, - DeepCopy_extensions_DeploymentStrategy, - DeepCopy_extensions_FSGroupStrategyOptions, - DeepCopy_extensions_HTTPIngressPath, - DeepCopy_extensions_HTTPIngressRuleValue, - DeepCopy_extensions_HostPortRange, - DeepCopy_extensions_IDRange, - DeepCopy_extensions_Ingress, - DeepCopy_extensions_IngressBackend, - DeepCopy_extensions_IngressList, - DeepCopy_extensions_IngressRule, - DeepCopy_extensions_IngressRuleValue, - DeepCopy_extensions_IngressSpec, - DeepCopy_extensions_IngressStatus, - DeepCopy_extensions_IngressTLS, - DeepCopy_extensions_NetworkPolicy, - DeepCopy_extensions_NetworkPolicyIngressRule, - DeepCopy_extensions_NetworkPolicyList, - DeepCopy_extensions_NetworkPolicyPeer, - DeepCopy_extensions_NetworkPolicyPort, - DeepCopy_extensions_NetworkPolicySpec, - DeepCopy_extensions_PodSecurityPolicy, - DeepCopy_extensions_PodSecurityPolicyList, - DeepCopy_extensions_PodSecurityPolicySpec, - DeepCopy_extensions_ReplicaSet, - DeepCopy_extensions_ReplicaSetList, - DeepCopy_extensions_ReplicaSetSpec, - DeepCopy_extensions_ReplicaSetStatus, - DeepCopy_extensions_ReplicationControllerDummy, - DeepCopy_extensions_RollbackConfig, - DeepCopy_extensions_RollingUpdateDeployment, - DeepCopy_extensions_RunAsUserStrategyOptions, - DeepCopy_extensions_SELinuxStrategyOptions, - DeepCopy_extensions_Scale, - DeepCopy_extensions_ScaleSpec, - DeepCopy_extensions_ScaleStatus, - DeepCopy_extensions_SupplementalGroupsStrategyOptions, - DeepCopy_extensions_ThirdPartyResource, - DeepCopy_extensions_ThirdPartyResourceData, - DeepCopy_extensions_ThirdPartyResourceDataList, - DeepCopy_extensions_ThirdPartyResourceList, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_extensions_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { - out.Name = in.Name - return nil -} - -func DeepCopy_extensions_CustomMetricCurrentStatus(in CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, c *conversion.Cloner) error { - out.Name = in.Name - if err := resource.DeepCopy_resource_Quantity(in.CurrentValue, &out.CurrentValue, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_CustomMetricCurrentStatusList(in CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, c *conversion.Cloner) error { - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(in)) - for i := range in { - if err := DeepCopy_extensions_CustomMetricCurrentStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_CustomMetricTarget(in CustomMetricTarget, out *CustomMetricTarget, c *conversion.Cloner) error { - out.Name = in.Name - if err := resource.DeepCopy_resource_Quantity(in.TargetValue, &out.TargetValue, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_CustomMetricTargetList(in CustomMetricTargetList, out *CustomMetricTargetList, c *conversion.Cloner) error { - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]CustomMetricTarget, len(in)) - for i := range in { - if err := DeepCopy_extensions_CustomMetricTarget(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_DaemonSetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]DaemonSet, len(in)) - for i := range in { - if err := DeepCopy_extensions_DaemonSet(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - return nil -} - -func DeepCopy_extensions_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_DeploymentStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Deployment, len(in)) - for i := range in { - if err := DeepCopy_extensions_Deployment(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Name = in.Name - if in.UpdatedAnnotations != nil { - in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.UpdatedAnnotations = nil - } - if err := DeepCopy_extensions_RollbackConfig(in.RollbackTo, &out.RollbackTo, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_extensions_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - if in.RevisionHistoryLimit != nil { - in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = *in - } else { - out.RevisionHistoryLimit = nil - } - out.Paused = in.Paused - if in.RollbackTo != nil { - in, out := in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - if err := DeepCopy_extensions_RollbackConfig(*in, *out, c); err != nil { - return err - } - } else { - out.RollbackTo = nil - } - return nil -} - -func DeepCopy_extensions_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas - return nil -} - -func DeepCopy_extensions_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { - out.Type = in.Type - if in.RollingUpdate != nil { - in, out := in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - if err := DeepCopy_extensions_RollingUpdateDeployment(*in, *out, c); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func DeepCopy_extensions_FSGroupStrategyOptions(in FSGroupStrategyOptions, out *FSGroupStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.Ranges != nil { - in, out := in.Ranges, &out.Ranges - *out = make([]IDRange, len(in)) - for i := range in { - if err := DeepCopy_extensions_IDRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func DeepCopy_extensions_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error { - out.Path = in.Path - if err := DeepCopy_extensions_IngressBackend(in.Backend, &out.Backend, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error { - if in.Paths != nil { - in, out := in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(in)) - for i := range in { - if err := DeepCopy_extensions_HTTPIngressPath(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Paths = nil - } - return nil -} - -func DeepCopy_extensions_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func DeepCopy_extensions_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func DeepCopy_extensions_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_IngressSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_IngressStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error { - out.ServiceName = in.ServiceName - if err := intstr.DeepCopy_intstr_IntOrString(in.ServicePort, &out.ServicePort, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Ingress, len(in)) - for i := range in { - if err := DeepCopy_extensions_Ingress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error { - out.Host = in.Host - if err := DeepCopy_extensions_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error { - if in.HTTP != nil { - in, out := in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - if err := DeepCopy_extensions_HTTPIngressRuleValue(*in, *out, c); err != nil { - return err - } - } else { - out.HTTP = nil - } - return nil -} - -func DeepCopy_extensions_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error { - if in.Backend != nil { - in, out := in.Backend, &out.Backend - *out = new(IngressBackend) - if err := DeepCopy_extensions_IngressBackend(*in, *out, c); err != nil { - return err - } - } else { - out.Backend = nil - } - if in.TLS != nil { - in, out := in.TLS, &out.TLS - *out = make([]IngressTLS, len(in)) - for i := range in { - if err := DeepCopy_extensions_IngressTLS(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.TLS = nil - } - if in.Rules != nil { - in, out := in.Rules, &out.Rules - *out = make([]IngressRule, len(in)) - for i := range in { - if err := DeepCopy_extensions_IngressRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func DeepCopy_extensions_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error { - if err := api.DeepCopy_api_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error { - if in.Hosts != nil { - in, out := in.Hosts, &out.Hosts - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Hosts = nil - } - out.SecretName = in.SecretName - return nil -} - -func DeepCopy_extensions_NetworkPolicy(in NetworkPolicy, out *NetworkPolicy, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_NetworkPolicySpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_NetworkPolicyIngressRule(in NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, c *conversion.Cloner) error { - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(in)) - for i := range in { - if err := DeepCopy_extensions_NetworkPolicyPort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.From != nil { - in, out := in.From, &out.From - *out = make([]NetworkPolicyPeer, len(in)) - for i := range in { - if err := DeepCopy_extensions_NetworkPolicyPeer(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.From = nil - } - return nil -} - -func DeepCopy_extensions_NetworkPolicyList(in NetworkPolicyList, out *NetworkPolicyList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]NetworkPolicy, len(in)) - for i := range in { - if err := DeepCopy_extensions_NetworkPolicy(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_NetworkPolicyPeer(in NetworkPolicyPeer, out *NetworkPolicyPeer, c *conversion.Cloner) error { - if in.PodSelector != nil { - in, out := in.PodSelector, &out.PodSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.PodSelector = nil - } - if in.NamespaceSelector != nil { - in, out := in.NamespaceSelector, &out.NamespaceSelector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.NamespaceSelector = nil - } - return nil -} - -func DeepCopy_extensions_NetworkPolicyPort(in NetworkPolicyPort, out *NetworkPolicyPort, c *conversion.Cloner) error { - if in.Protocol != nil { - in, out := in.Protocol, &out.Protocol - *out = new(api.Protocol) - **out = *in - } else { - out.Protocol = nil - } - if in.Port != nil { - in, out := in.Port, &out.Port - *out = new(intstr.IntOrString) - if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { - return err - } - } else { - out.Port = nil - } - return nil -} - -func DeepCopy_extensions_NetworkPolicySpec(in NetworkPolicySpec, out *NetworkPolicySpec, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_LabelSelector(in.PodSelector, &out.PodSelector, c); err != nil { - return err - } - if in.Ingress != nil { - in, out := in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(in)) - for i := range in { - if err := DeepCopy_extensions_NetworkPolicyIngressRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func DeepCopy_extensions_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(in)) - for i := range in { - if err := DeepCopy_extensions_PodSecurityPolicy(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error { - out.Privileged = in.Privileged - if in.DefaultAddCapabilities != nil { - in, out := in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api.Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.DefaultAddCapabilities = nil - } - if in.RequiredDropCapabilities != nil { - in, out := in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api.Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.RequiredDropCapabilities = nil - } - if in.AllowedCapabilities != nil { - in, out := in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api.Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AllowedCapabilities = nil - } - if in.Volumes != nil { - in, out := in.Volumes, &out.Volumes - *out = make([]FSType, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Volumes = nil - } - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(in)) - for i := range in { - if err := DeepCopy_extensions_HostPortRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := DeepCopy_extensions_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil { - return err - } - if err := DeepCopy_extensions_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil { - return err - } - if err := DeepCopy_extensions_SupplementalGroupsStrategyOptions(in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { - return err - } - if err := DeepCopy_extensions_FSGroupStrategyOptions(in.FSGroup, &out.FSGroup, c); err != nil { - return err - } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func DeepCopy_extensions_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_ReplicaSetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ReplicaSet, len(in)) - for i := range in { - if err := DeepCopy_extensions_ReplicaSet(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := api.DeepCopy_api_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func DeepCopy_extensions_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error { - out.Revision = in.Revision - return nil -} - -func DeepCopy_extensions_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { - if err := intstr.DeepCopy_intstr_IntOrString(in.MaxUnavailable, &out.MaxUnavailable, c); err != nil { - return err - } - if err := intstr.DeepCopy_intstr_IntOrString(in.MaxSurge, &out.MaxSurge, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.Ranges != nil { - in, out := in.Ranges, &out.Ranges - *out = make([]IDRange, len(in)) - for i := range in { - if err := DeepCopy_extensions_IDRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func DeepCopy_extensions_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.SELinuxOptions != nil { - in, out := in.SELinuxOptions, &out.SELinuxOptions - *out = new(api.SELinuxOptions) - if err := api.DeepCopy_api_SELinuxOptions(*in, *out, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - return nil -} - -func DeepCopy_extensions_Scale(in Scale, out *Scale, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_extensions_ScaleSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_extensions_ScaleStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_extensions_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - return nil -} - -func DeepCopy_extensions_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - return nil -} - -func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.Ranges != nil { - in, out := in.Ranges, &out.Ranges - *out = make([]IDRange, len(in)) - for i := range in { - if err := DeepCopy_extensions_IDRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func DeepCopy_extensions_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - out.Description = in.Description - if in.Versions != nil { - in, out := in.Versions, &out.Versions - *out = make([]APIVersion, len(in)) - for i := range in { - if err := DeepCopy_extensions_APIVersion(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Versions = nil - } - return nil -} - -func DeepCopy_extensions_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make([]byte, len(in)) - copy(*out, in) - } else { - out.Data = nil - } - return nil -} - -func DeepCopy_extensions_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(in)) - for i := range in { - if err := DeepCopy_extensions_ThirdPartyResourceData(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_extensions_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ThirdPartyResource, len(in)) - for i := range in { - if err := DeepCopy_extensions_ThirdPartyResource(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go deleted file mode 100644 index 449127084..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/install/install.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/extensions" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1beta1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", extensions.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString( - "PodSecurityPolicy", - "ThirdPartyResource", - ) - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1beta1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(extensions.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - extensions.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1beta1.SchemeGroupVersion: - v1beta1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go deleted file mode 100644 index 1c5f6ba10..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensions - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "extensions" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - // TODO this gets cleaned up when the types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, - &autoscaling.HorizontalPodAutoscaler{}, - &autoscaling.HorizontalPodAutoscalerList{}, - &batch.Job{}, - &batch.JobList{}, - &batch.JobTemplate{}, - &ReplicationControllerDummy{}, - &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, - &DaemonSetList{}, - &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, - &Ingress{}, - &IngressList{}, - &api.ListOptions{}, - &ReplicaSet{}, - &ReplicaSetList{}, - &api.ExportOptions{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, - &NetworkPolicy{}, - &NetworkPolicyList{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go deleted file mode 100644 index eec90fb0a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.generated.go +++ /dev/null @@ -1,17991 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package extensions - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_api "k8s.io/kubernetes/pkg/api" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_api.ObjectMeta - var v1 pkg4_resource.Quantity - var v2 pkg1_unversioned.LabelSelector - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.TargetValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.TargetValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} - } else { - yyv5 := &x.TargetValue - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} - } else { - yyv9 := &x.TargetValue - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.CurrentValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.CurrentValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} - } else { - yyv5 := &x.CurrentValue - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} - } else { - yyv9 := &x.CurrentValue - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Description != "" - yyq2[2] = len(x.Versions) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("description")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Versions == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("versions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Versions == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "description": - if r.TryDecodeAsNil() { - x.Description = "" - } else { - x.Description = string(r.DecodeString()) - } - case "versions": - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv6 := &x.Versions - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Description = "" - } else { - x.Description = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv13 := &x.Versions - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Data == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv11 := &x.Data - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - yyq2[1] = x.Selector != nil - yyq2[3] = true - yyq2[4] = x.MinReadySeconds != 0 - yyq2[5] = x.RevisionHistoryLimit != nil - yyq2[6] = x.Paused != false - yyq2[7] = x.RollbackTo != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.Template - yy10.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.Strategy - yy15.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.Strategy - yy17.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy23 := *x.RevisionHistoryLimit - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeInt(int64(yy23)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy25 := *x.RevisionHistoryLimit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv8 := &x.Strategy - yyv8.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - case "revisionHistoryLimit": - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "paused": - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - case "rollbackTo": - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv19 := &x.Strategy - yyv19.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.UpdatedAnnotations) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.RollbackTo - yy10.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.RollbackTo - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "updatedAnnotations": - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv5 := &x.UpdatedAnnotations - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - z.F.DecMapStringStringX(yyv5, false, d) - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv7 := &x.RollbackTo - yyv7.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv12 := &x.UpdatedAnnotations - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecMapStringStringX(yyv12, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv14 := &x.RollbackTo - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Revision != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "revision": - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = x.RollingUpdate != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.MaxUnavailable - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.MaxUnavailable - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.MaxSurge - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.MaxSurge - yym12 := z.EncBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.EncExt(yy11) { - } else if !yym12 && z.IsJSONHandle() { - z.EncJSONMarshal(yy11) - } else { - z.EncFallback(yy11) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - x.MaxUnavailable = pkg5_intstr.IntOrString{} - } else { - yyv4 := &x.MaxUnavailable - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - case "maxSurge": - if r.TryDecodeAsNil() { - x.MaxSurge = pkg5_intstr.IntOrString{} - } else { - yyv6 := &x.MaxSurge - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxUnavailable = pkg5_intstr.IntOrString{} - } else { - yyv9 := &x.MaxUnavailable - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxSurge = pkg5_intstr.IntOrString{} - } else { - yyv11 := &x.MaxSurge - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv11) - } else { - z.DecFallback(yyv11, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != 0 - yyq2[1] = x.Replicas != 0 - yyq2[2] = x.UpdatedReplicas != 0 - yyq2[3] = x.AvailableReplicas != 0 - yyq2[4] = x.UnavailableReplicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "updatedReplicas": - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - case "unavailableReplicas": - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Template - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv6 := &x.Template - yyv6.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv10 := &x.Template - yyv10.CodecDecodeSelf(d) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "currentNumberScheduled": - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) - } - case "numberMisscheduled": - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) - } - case "desiredNumberScheduled": - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Backend != nil - yyq2[1] = len(x.TLS) != 0 - yyq2[2] = len(x.Rules) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.TLS == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TLS == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Rules == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "backend": - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - case "tls": - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv5 := &x.TLS - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv7 := &x.Rules - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv7), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv11 := &x.TLS - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv13 := &x.Rules - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv13), d) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hosts) != 0 - yyq2[1] = x.SecretName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hosts == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hosts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hosts == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hosts": - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv4 := &x.Hosts - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv8 := &x.Hosts - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_api.LoadBalancerStatus{} - } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_api.LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Host != "" - yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - var yyn6 bool - if x.IngressRuleValue.HTTP == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - case "http": - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HTTP != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "http": - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paths")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "paths": - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv4 := &x.Paths - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv7 := &x.Paths - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Backend - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Backend - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "backend": - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv5 := &x.Backend - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv8 := &x.Backend - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.ServicePort - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("servicePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.ServicePort - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - case "servicePort": - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv5 := &x.ServicePort - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv9 := &x.ServicePort - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.Selector != nil - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.Template - yy10.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg1_unversioned.LabelSelector) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_api.PodTemplateSpec{} - } else { - yyv12 := &x.Template - yyv12.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ObservedGeneration != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Privileged != false - yyq2[1] = len(x.DefaultAddCapabilities) != 0 - yyq2[2] = len(x.RequiredDropCapabilities) != 0 - yyq2[3] = len(x.AllowedCapabilities) != 0 - yyq2[4] = len(x.Volumes) != 0 - yyq2[5] = x.HostNetwork != false - yyq2[6] = len(x.HostPorts) != 0 - yyq2[7] = x.HostPID != false - yyq2[8] = x.HostIPC != false - yyq2[13] = x.ReadOnlyRootFilesystem != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.DefaultAddCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.RequiredDropCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encSliceapi_Capability(([]pkg2_api.Capability)(x.AllowedCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy31 := &x.SELinux - yy31.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinux")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.SELinux - yy33.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy36 := &x.RunAsUser - yy36.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy38 := &x.RunAsUser - yy38.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy41 := &x.SupplementalGroups - yy41.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.SupplementalGroups - yy43.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy46 := &x.FSGroup - yy46.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy48 := &x.FSGroup - yy48.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "privileged": - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - case "defaultAddCapabilities": - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv5 := &x.DefaultAddCapabilities - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv5), d) - } - } - case "requiredDropCapabilities": - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv7 := &x.RequiredDropCapabilities - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv7), d) - } - } - case "allowedCapabilities": - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv9 := &x.AllowedCapabilities - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv9), d) - } - } - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv11 := &x.Volumes - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv11), d) - } - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPorts": - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv14 := &x.HostPorts - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv14), d) - } - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - case "seLinux": - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv18 := &x.SELinux - yyv18.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv19 := &x.RunAsUser - yyv19.CodecDecodeSelf(d) - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv20 := &x.SupplementalGroups - yyv20.CodecDecodeSelf(d) - } - case "fsGroup": - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv21 := &x.FSGroup - yyv21.CodecDecodeSelf(d) - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv25 := &x.DefaultAddCapabilities - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv25), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv27 := &x.RequiredDropCapabilities - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv27), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv29 := &x.AllowedCapabilities - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - h.decSliceapi_Capability((*[]pkg2_api.Capability)(yyv29), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv31 := &x.Volumes - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv31), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv34 := &x.HostPorts - yym35 := z.DecBinary() - _ = yym35 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv34), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv38 := &x.SELinux - yyv38.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv39 := &x.RunAsUser - yyv39.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv40 := &x.SupplementalGroups - yyv40.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv41 := &x.FSGroup - yyv41.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int(r.DecodeInt(codecSelferBitsize1234)) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int(r.DecodeInt(codecSelferBitsize1234)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int(r.DecodeInt(codecSelferBitsize1234)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int(r.DecodeInt(codecSelferBitsize1234)) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SELinuxOptions != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SELinuxStrategy(r.DecodeString()) - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_api.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SELinuxStrategy(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_api.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int64(r.DecodeInt(64)) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int64(r.DecodeInt(64)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int64(r.DecodeInt(64)) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Rule != "" - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Rule != "" - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Ingress) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.PodSelector - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.PodSelector - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSelector": - if r.TryDecodeAsNil() { - x.PodSelector = pkg1_unversioned.LabelSelector{} - } else { - yyv4 := &x.PodSelector - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv6 := &x.Ingress - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodSelector = pkg1_unversioned.LabelSelector{} - } else { - yyv9 := &x.PodSelector - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else { - z.DecFallback(yyv9, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv11 := &x.Ingress - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ports) != 0 - yyq2[1] = len(x.From) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.From == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("from")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.From == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv4 := &x.Ports - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) - } - } - case "from": - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv6 := &x.From - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv9 := &x.Ports - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv11 := &x.From - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Protocol != nil - yyq2[1] = x.Port != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Protocol == nil { - r.EncodeNil() - } else { - yy4 := *x.Protocol - yysf5 := &yy4 - yysf5.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Protocol == nil { - r.EncodeNil() - } else { - yy6 := *x.Protocol - yysf7 := &yy6 - yysf7.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Port == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Port == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "protocol": - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg2_api.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - case "port": - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg2_api.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodSelector != nil - yyq2[1] = x.NamespaceSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PodSelector == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodSelector) { - } else { - z.EncFallback(x.PodSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodSelector == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.PodSelector) { - } else { - z.EncFallback(x.PodSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { - } else { - z.EncFallback(x.NamespaceSelector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.NamespaceSelector) { - } else { - z.EncFallback(x.NamespaceSelector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSelector": - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(pkg1_unversioned.LabelSelector) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodSelector) { - } else { - z.DecFallback(x.PodSelector, false) - } - } - case "namespaceSelector": - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { - } else { - z.DecFallback(x.NamespaceSelector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(pkg1_unversioned.LabelSelector) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.PodSelector) { - } else { - z.DecFallback(x.PodSelector, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(pkg1_unversioned.LabelSelector) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.NamespaceSelector) { - } else { - z.DecFallback(x.NamespaceSelector, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CustomMetricTarget{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CustomMetricTarget, yyrl1) - } - } else { - yyv1 = make([]CustomMetricTarget, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricTarget{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricTarget{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) - } - } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []APIVersion{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]APIVersion, yyrl1) - } - } else { - yyv1 = make([]APIVersion, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, APIVersion{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []APIVersion{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ThirdPartyResource{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ThirdPartyResource, yyrl1) - } - } else { - yyv1 = make([]ThirdPartyResource, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResource{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResource{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Deployment, yyrl1) - } - } else { - yyv1 = make([]Deployment, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Deployment{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DaemonSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 696) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DaemonSet, yyrl1) - } - } else { - yyv1 = make([]DaemonSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DaemonSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DaemonSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) - } - } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResourceData{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Ingress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Ingress, yyrl1) - } - } else { - yyv1 = make([]Ingress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Ingress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Ingress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IngressTLS{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IngressTLS, yyrl1) - } - } else { - yyv1 = make([]IngressTLS, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressTLS{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressTLS{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IngressRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IngressRule, yyrl1) - } - } else { - yyv1 = make([]IngressRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HTTPIngressPath{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HTTPIngressPath, yyrl1) - } - } else { - yyv1 = make([]HTTPIngressPath, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPIngressPath{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPIngressPath{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicaSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 704) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicaSet, yyrl1) - } - } else { - yyv1 = make([]ReplicaSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicaSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicaSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf2 := &yyv1 - yysf2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_api.Capability{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_api.Capability, yyrl1) - } - } else { - yyv1 = make([]pkg2_api.Capability, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 pkg2_api.Capability - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = pkg2_api.Capability(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_api.Capability{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []FSType{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]FSType, yyrl1) - } - } else { - yyv1 = make([]FSType, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FSType(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FSType(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FSType - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FSType(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FSType{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HostPortRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HostPortRange, yyrl1) - } - } else { - yyv1 = make([]HostPortRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HostPortRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HostPortRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IDRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IDRange, yyrl1) - } - } else { - yyv1 = make([]IDRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IDRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IDRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodSecurityPolicy{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 536) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) - } - } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodSecurityPolicy{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodSecurityPolicy{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyIngressRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyIngressRule, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyIngressRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyIngressRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyIngressRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyPort, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyPeer{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyPeer, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyPeer, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyPeer{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyPeer{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicy{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicy, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicy, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicy{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicy{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go deleted file mode 100644 index 9db03ab7c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ /dev/null @@ -1,898 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -This file (together with pkg/apis/extensions/v1beta1/types.go) contain the experimental -types in kubernetes. These API objects are experimental, meaning that the -APIs may be broken at any time by the kubernetes team. - -DISCLAIMER: The implementation of the experimental API group itself is -a temporary one meant as a stopgap solution until kubernetes has proper -support for multiple API groups. The transition may require changes -beyond registration differences. In other words, experimental API group -support is experimental. -*/ - -package extensions - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - Replicas int32 `json:"replicas,omitempty"` -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas"` - - // label query over pods that should match the replicas count. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *unversioned.LabelSelector `json:"selector,omitempty"` -} - -// +genclient=true,noMethods=true - -// represents a scaling request for a resource. -type Scale struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. - api.ObjectMeta `json:"metadata,omitempty"` - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec ScaleSpec `json:"spec,omitempty"` - - // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. - Status ScaleStatus `json:"status,omitempty"` -} - -// Dummy definition -type ReplicationControllerDummy struct { - unversioned.TypeMeta `json:",inline"` -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string `json:"name"` - // Custom Metric value (average). - TargetValue resource.Quantity `json:"value"` -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items"` -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string `json:"name"` - // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value"` -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items"` -} - -// +genclient=true,nonNamespaced=true - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - unversioned.TypeMeta `json:",inline"` - - // Standard object metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Description is the description of this object. - Description string `json:"description,omitempty"` - - // Versions are versions for this third party object - Versions []APIVersion `json:"versions,omitempty"` -} - -type ThirdPartyResourceList struct { - unversioned.TypeMeta `json:",inline"` - - // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of horizontal pod autoscalers. - Items []ThirdPartyResource `json:"items"` -} - -// An APIVersion represents a single concrete version of an object model. -// TODO: we should consider merge this struct with GroupVersion in unversioned.go -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string `json:"name,omitempty"` -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata. - api.ObjectMeta `json:"metadata,omitempty"` - - // Data is the raw JSON data for this data. - Data []byte `json:"data,omitempty"` -} - -// +genclient=true - -type Deployment struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior of the Deployment. - Spec DeploymentSpec `json:"spec,omitempty"` - - // Most recently observed status of the Deployment. - Status DeploymentStatus `json:"status,omitempty"` -} - -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - Replicas int32 `json:"replicas,omitempty"` - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - - // Template describes the pods that will be created. - Template api.PodTemplateSpec `json:"template"` - - // The deployment strategy to use to replace existing pods with new ones. - Strategy DeploymentStrategy `json:"strategy,omitempty"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"` - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - Paused bool `json:"paused,omitempty"` - // The config this deployment is rolling back to. Will be cleared after rollback is done. - RollbackTo *RollbackConfig `json:"rollbackTo,omitempty"` -} - -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - unversioned.TypeMeta `json:",inline"` - // Required: This must match the Name of a deployment. - Name string `json:"name"` - // The annotations to be updated to a deployment - UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty"` - // The config of this deployment rollback. - RollbackTo RollbackConfig `json:"rollbackTo"` -} - -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. - Revision int64 `json:"revision,omitempty"` -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - Type DeploymentStrategyType `json:"type,omitempty"` - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty"` -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down by 30% - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that at least 70% of original number of pods are available at all times - // during the update. - MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"` - - // The maximum number of pods that can be scheduled above the original number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of total pods at - // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up by 30% - // immediately when the rolling update starts. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of original pods. - MaxSurge intstr.IntOrString `json:"maxSurge,omitempty"` -} - -type DeploymentStatus struct { - // The generation observed by the deployment controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - Replicas int32 `json:"replicas,omitempty"` - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - AvailableReplicas int32 `json:"availableReplicas,omitempty"` - - // Total number of unavailable pods targeted by this deployment. - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"` -} - -type DeploymentList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of deployments. - Items []Deployment `json:"items"` -} - -// TODO(madhusudancs): Uncomment while implementing DaemonSet updates. -/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out. -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. - Type DaemonSetUpdateStrategyType `json:"type,omitempty"` - - // Rolling update config params. Present only if DaemonSetUpdateStrategy = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as DeploymentStrategy.RollingUpdate. - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"` -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, 30% of the currently running DaemonSet - // pods can be stopped for an update at any given time. The update starts - // by stopping at most 30% of the currently running DaemonSet pods and then - // brings up new DaemonSet pods in their place. Once the new pods are ready, - // it then proceeds onto other DaemonSet pods, thus ensuring that at least - // 70% of original number of DaemonSet pods are available at all times - // during the update. - MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"` - - // Minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - MinReadySeconds int `json:"minReadySeconds,omitempty"` -} -*/ - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // Selector is a label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - - // Template is the object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - Template api.PodTemplateSpec `json:"template"` - - // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. - /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out. - // Update strategy to replace existing DaemonSet pods with new pods. - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` - - // Label key that is added to DaemonSet pods to distinguish between old and - // new pod templates during DaemonSet update. - // Users can set this to an empty string to indicate that the system should - // not add any label. If unspecified, system uses - // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash"). - // Value of this key is hash of DaemonSetSpec.PodTemplateSpec. - // No label is added if this is set to empty string. - UniqueLabelKey string `json:"uniqueLabelKey,omitempty"` - */ -} - -const ( - // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates during - // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information. - DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash" -) - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // CurrentNumberScheduled is the number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - CurrentNumberScheduled int32 `json:"currentNumberScheduled"` - - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - NumberMisscheduled int32 `json:"numberMisscheduled"` - - // DesiredNumberScheduled is the total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - DesiredNumberScheduled int32 `json:"desiredNumberScheduled"` -} - -// +genclient=true - -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec DaemonSetSpec `json:"spec,omitempty"` - - // Status is the current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status DaemonSetStatus `json:"status,omitempty"` -} - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is a list of daemon sets. - Items []DaemonSet `json:"items"` -} - -type ThirdPartyResourceDataList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - // Items is a list of third party objects - Items []ThirdPartyResourceData `json:"items"` -} - -// +genclient=true - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -type Ingress struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec IngressSpec `json:"spec,omitempty"` - - // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status IngressStatus `json:"status,omitempty"` -} - -// IngressList is a collection of Ingress. -type IngressList struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of Ingress. - Items []Ingress `json:"items"` -} - -// IngressSpec describes the Ingress the user wishes to exist. -type IngressSpec struct { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - Backend *IngressBackend `json:"backend,omitempty"` - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - TLS []IngressTLS `json:"tls,omitempty"` - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - Rules []IngressRule `json:"rules,omitempty"` - // TODO: Add the ability to specify load-balancer IP through claims -} - -// IngressTLS describes the transport layer security associated with an Ingress. -type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - Hosts []string `json:"hosts,omitempty"` - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - SecretName string `json:"secretName,omitempty"` - // TODO: Consider specifying different modes of termination, protocols etc. -} - -// IngressStatus describe the current state of the Ingress. -type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. - LoadBalancer api.LoadBalancerStatus `json:"loadBalancer,omitempty"` -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - Host string `json:"host,omitempty"` - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - IngressRuleValue `json:",inline,omitempty"` -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -type IngressRuleValue struct { - //TODO: - // 1. Consider renaming this resource and the associated rules so they - // aren't tied to Ingress. They can be used to route intra-cluster traffic. - // 2. Consider adding fields for ingress-type specific global options - // usable by a loadbalancer, like http keep-alive. - - HTTP *HTTPIngressRuleValue `json:"http,omitempty"` -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http://<host>/<path>?<searchpart> -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths"` - // TODO: Consider adding fields for ingress-type specific global - // options usable by a loadbalancer, like http keep-alive. -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -type HTTPIngressPath struct { - // Path is a extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - Path string `json:"path,omitempty"` - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - Backend IngressBackend `json:"backend"` -} - -// IngressBackend describes all endpoints for a given service and port. -type IngressBackend struct { - // Specifies the name of the referenced service. - ServiceName string `json:"serviceName"` - - // Specifies the port of the referenced service. - ServicePort intstr.IntOrString `json:"servicePort"` -} - -// +genclient=true - -// ReplicaSet represents the configuration of a replica set. -type ReplicaSet struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the desired behavior of this ReplicaSet. - Spec ReplicaSetSpec `json:"spec,omitempty"` - - // Status is the current status of this ReplicaSet. This data may be - // out of date by some window of time. - Status ReplicaSetStatus `json:"status,omitempty"` -} - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []ReplicaSet `json:"items"` -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -// As the internal representation of a ReplicaSet, it must have -// a Template set. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 `json:"replicas"` - - // Selector is a label query over pods that should match the replica count. - // Must match in order to be controlled. - // If empty, defaulted to labels on pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *unversioned.LabelSelector `json:"selector,omitempty"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - Template api.PodTemplateSpec `json:"template,omitempty"` -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 `json:"replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` - - // ObservedGeneration is the most recent generation observed by the controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} - -// +genclient=true,nonNamespaced=true - -// PodSecurityPolicy governs the ability to make requests that affect the SecurityContext -// that will be applied to a pod and container. -type PodSecurityPolicy struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the policy enforced. - Spec PodSecurityPolicySpec `json:"spec,omitempty"` -} - -// PodSecurityPolicySpec defines the policy enforced. -type PodSecurityPolicySpec struct { - // Privileged determines if a pod can request to be run as privileged. - Privileged bool `json:"privileged,omitempty"` - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. - DefaultAddCapabilities []api.Capability `json:"defaultAddCapabilities,omitempty"` - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - RequiredDropCapabilities []api.Capability `json:"requiredDropCapabilities,omitempty"` - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. - AllowedCapabilities []api.Capability `json:"allowedCapabilities,omitempty"` - // Volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. - Volumes []FSType `json:"volumes,omitempty"` - // HostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - HostNetwork bool `json:"hostNetwork,omitempty"` - // HostPorts determines which host port ranges are allowed to be exposed. - HostPorts []HostPortRange `json:"hostPorts,omitempty"` - // HostPID determines if the policy allows the use of HostPID in the pod spec. - HostPID bool `json:"hostPID,omitempty"` - // HostIPC determines if the policy allows the use of HostIPC in the pod spec. - HostIPC bool `json:"hostIPC,omitempty"` - // SELinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux"` - // RunAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser"` - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups"` - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup"` - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // Min is the start of the range, inclusive. - Min int `json:"min"` - // Max is the end of the range, inclusive. - Max int `json:"max"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -type FSType string - -var ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" - All FSType = "*" -) - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // Rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md#security-context - SELinuxOptions *api.SELinuxOptions `json:"seLinuxOptions,omitempty"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security. -type SELinuxStrategy string - -const ( - // container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule"` - // Ranges are the allowed ranges of uids that may be used. - Ranges []IDRange `json:"ranges,omitempty"` -} - -// IDRange provides a min/max of an allowed range of IDs. -type IDRange struct { - // Min is the start of the range, inclusive. - Min int64 `json:"min"` - // Max is the end of the range, inclusive. - Max int64 `json:"max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// SecurityContext. -type RunAsUserStrategy string - -const ( - // container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // container must run as a non-root uid - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - Rule FSGroupStrategyType `json:"rule,omitempty"` - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. - Ranges []IDRange `json:"ranges,omitempty"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - Rule SupplementalGroupsStrategyType `json:"rule,omitempty"` - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. - Ranges []IDRange `json:"ranges,omitempty"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []PodSecurityPolicy `json:"items"` -} - -type NetworkPolicy struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior for this NetworkPolicy. - Spec NetworkPolicySpec `json:"spec,omitempty"` -} - -type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - PodSelector unversioned.LabelSelector `json:"podSelector"` - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. - Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty"` -} - -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - Ports []NetworkPolicyPort `json:"ports,omitempty"` - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - From []NetworkPolicyPeer `json:"from,omitempty"` -} - -type NetworkPolicyPort struct { - // Optional. The protocol (TCP or UDP) which traffic must match. - // If not specified, this field defaults to TCP. - Protocol *api.Protocol `json:"protocol,omitempty"` - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - Port *intstr.IntOrString `json:"port,omitempty"` -} - -type NetworkPolicyPeer struct { - // Exactly one of the following must be specified. - - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. - PodSelector *unversioned.LabelSelector `json:"podSelector,omitempty"` - - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. - NamespaceSelector *unversioned.LabelSelector `json:"namespaceSelector,omitempty"` -} - -// NetworkPolicyList is a list of NetworkPolicy objects. -type NetworkPolicyList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - - Items []NetworkPolicy `json:"items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go deleted file mode 100644 index ad5c91c90..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion.go +++ /dev/null @@ -1,404 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" -) - -func addConversionFuncs(scheme *runtime.Scheme) { - // Add non-generated conversion functions - err := scheme.AddConversionFuncs( - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - // autoscaling - Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference, - Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, - Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - // batch - Convert_batch_JobSpec_To_v1beta1_JobSpec, - Convert_v1beta1_JobSpec_To_batch_JobSpec, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - - // Add field label conversions for kinds having selectable nothing but ObjectMeta fields. - for _, kind := range []string{"DaemonSet", "Deployment", "Ingress"} { - err = api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", kind, - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace": - return label, value, nil - default: - return "", "", fmt.Errorf("field label %q not supported for %q", label, kind) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } - } - - err = api.Scheme.AddFieldLabelConversionFunc("extensions/v1beta1", "Job", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name", "metadata.namespace", "status.successful": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(in *extensions.ScaleStatus, out *ScaleStatus, s conversion.Scope) error { - out.Replicas = int32(in.Replicas) - - out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := unversioned.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() - } - return nil -} - -func Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(in *ScaleStatus, out *extensions.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. - if in.TargetSelector != "" { - labelSelector, err := unversioned.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } else if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - selector := make(map[string]string) - for key, val := range in.Selector { - selector[key] = val - } - out.Selector.MatchLabels = selector - } else { - out.Selector = nil - } - return nil -} - -func Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(in *extensions.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { - out.Replicas = &in.Replicas - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - if in.RevisionHistoryLimit != nil { - out.RevisionHistoryLimit = new(int32) - *out.RevisionHistoryLimit = int32(*in.RevisionHistoryLimit) - } - out.MinReadySeconds = int32(in.MinReadySeconds) - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(RollbackConfig) - out.RollbackTo.Revision = int64(in.RollbackTo.Revision) - } else { - out.RollbackTo = nil - } - return nil -} - -func Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(in *DeploymentSpec, out *extensions.DeploymentSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { - return err - } - out.RevisionHistoryLimit = in.RevisionHistoryLimit - out.MinReadySeconds = in.MinReadySeconds - out.Paused = in.Paused - if in.RollbackTo != nil { - out.RollbackTo = new(extensions.RollbackConfig) - out.RollbackTo.Revision = in.RollbackTo.Revision - } else { - out.RollbackTo = nil - } - return nil -} - -func Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - out.Type = DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - out.RollingUpdate = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(in *extensions.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { - if out.MaxUnavailable == nil { - out.MaxUnavailable = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxUnavailable, out.MaxUnavailable, 0); err != nil { - return err - } - if out.MaxSurge == nil { - out.MaxSurge = &intstr.IntOrString{} - } - if err := s.Convert(&in.MaxSurge, out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(in *RollingUpdateDeployment, out *extensions.RollingUpdateDeployment, s conversion.Scope) error { - if err := s.Convert(in.MaxUnavailable, &out.MaxUnavailable, 0); err != nil { - return err - } - if err := s.Convert(in.MaxSurge, &out.MaxSurge, 0); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(in *extensions.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(in *ReplicaSetSpec, out *extensions.ReplicaSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_batch_JobSpec_To_v1beta1_JobSpec(in *batch.JobSpec, out *JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - // unable to generate simple pointer conversion for unversioned.LabelSelector -> v1beta1.LabelSelector - if in.Selector != nil { - out.Selector = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - - // BEGIN non-standard conversion - // autoSelector has opposite meaning as manualSelector. - // in both cases, unset means false, and unset is always preferred to false. - // unset vs set-false distinction is not preserved. - manualSelector := in.ManualSelector != nil && *in.ManualSelector - autoSelector := !manualSelector - if autoSelector { - out.AutoSelector = new(bool) - *out.AutoSelector = true - } else { - out.AutoSelector = nil - } - // END non-standard conversion - - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_JobSpec_To_batch_JobSpec(in *JobSpec, out *batch.JobSpec, s conversion.Scope) error { - out.Parallelism = in.Parallelism - out.Completions = in.Completions - out.ActiveDeadlineSeconds = in.ActiveDeadlineSeconds - // unable to generate simple pointer conversion for v1beta1.LabelSelector -> unversioned.LabelSelector - if in.Selector != nil { - out.Selector = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in.Selector, out.Selector, s); err != nil { - return err - } - } else { - out.Selector = nil - } - - // BEGIN non-standard conversion - // autoSelector has opposite meaning as manualSelector. - // in both cases, unset means false, and unset is always preferred to false. - // unset vs set-false distinction is not preserved. - autoSelector := bool(in.AutoSelector != nil && *in.AutoSelector) - manualSelector := !autoSelector - if manualSelector { - out.ManualSelector = new(bool) - *out.ManualSelector = true - } else { - out.ManualSelector = nil - } - // END non-standard conversion - - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(in *autoscaling.CrossVersionObjectReference, out *SubresourceReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = "scale" - return nil -} - -func Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(in *SubresourceReference, out *autoscaling.CrossVersionObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(in *autoscaling.HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_autoscaling_CrossVersionObjectReference_To_v1beta1_SubresourceReference(&in.ScaleTargetRef, &out.ScaleRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = *in.MinReplicas - } else { - out.MinReplicas = nil - } - out.MaxReplicas = in.MaxReplicas - if in.TargetCPUUtilizationPercentage != nil { - out.CPUUtilization = &CPUTargetUtilization{TargetPercentage: *in.TargetCPUUtilizationPercentage} - } - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(in *HorizontalPodAutoscalerSpec, out *autoscaling.HorizontalPodAutoscalerSpec, s conversion.Scope) error { - if err := Convert_v1beta1_SubresourceReference_To_autoscaling_CrossVersionObjectReference(&in.ScaleRef, &out.ScaleTargetRef, s); err != nil { - return err - } - if in.MinReplicas != nil { - out.MinReplicas = new(int32) - *out.MinReplicas = int32(*in.MinReplicas) - } else { - out.MinReplicas = nil - } - out.MaxReplicas = int32(in.MaxReplicas) - if in.CPUUtilization != nil { - out.TargetCPUUtilizationPercentage = new(int32) - *out.TargetCPUUtilizationPercentage = int32(in.CPUUtilization.TargetPercentage) - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go deleted file mode 100644 index 445394f16..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/conversion_generated.go +++ /dev/null @@ -1,2469 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - batch "k8s.io/kubernetes/pkg/apis/batch" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_APIVersion_To_extensions_APIVersion, - Convert_extensions_APIVersion_To_v1beta1_APIVersion, - Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus, - Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus, - Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList, - Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList, - Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget, - Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget, - Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList, - Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList, - Convert_v1beta1_DaemonSet_To_extensions_DaemonSet, - Convert_extensions_DaemonSet_To_v1beta1_DaemonSet, - Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList, - Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList, - Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec, - Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec, - Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus, - Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus, - Convert_v1beta1_Deployment_To_extensions_Deployment, - Convert_extensions_Deployment_To_v1beta1_Deployment, - Convert_v1beta1_DeploymentList_To_extensions_DeploymentList, - Convert_extensions_DeploymentList_To_v1beta1_DeploymentList, - Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback, - Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback, - Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec, - Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec, - Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus, - Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus, - Convert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy, - Convert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy, - Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions, - Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions, - Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath, - Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath, - Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue, - Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue, - Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler, - Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler, - Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList, - Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList, - Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec, - Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec, - Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus, - Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus, - Convert_v1beta1_HostPortRange_To_extensions_HostPortRange, - Convert_extensions_HostPortRange_To_v1beta1_HostPortRange, - Convert_v1beta1_IDRange_To_extensions_IDRange, - Convert_extensions_IDRange_To_v1beta1_IDRange, - Convert_v1beta1_Ingress_To_extensions_Ingress, - Convert_extensions_Ingress_To_v1beta1_Ingress, - Convert_v1beta1_IngressBackend_To_extensions_IngressBackend, - Convert_extensions_IngressBackend_To_v1beta1_IngressBackend, - Convert_v1beta1_IngressList_To_extensions_IngressList, - Convert_extensions_IngressList_To_v1beta1_IngressList, - Convert_v1beta1_IngressRule_To_extensions_IngressRule, - Convert_extensions_IngressRule_To_v1beta1_IngressRule, - Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue, - Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue, - Convert_v1beta1_IngressSpec_To_extensions_IngressSpec, - Convert_extensions_IngressSpec_To_v1beta1_IngressSpec, - Convert_v1beta1_IngressStatus_To_extensions_IngressStatus, - Convert_extensions_IngressStatus_To_v1beta1_IngressStatus, - Convert_v1beta1_IngressTLS_To_extensions_IngressTLS, - Convert_extensions_IngressTLS_To_v1beta1_IngressTLS, - Convert_v1beta1_Job_To_batch_Job, - Convert_batch_Job_To_v1beta1_Job, - Convert_v1beta1_JobCondition_To_batch_JobCondition, - Convert_batch_JobCondition_To_v1beta1_JobCondition, - Convert_v1beta1_JobList_To_batch_JobList, - Convert_batch_JobList_To_v1beta1_JobList, - Convert_v1beta1_JobSpec_To_batch_JobSpec, - Convert_batch_JobSpec_To_v1beta1_JobSpec, - Convert_v1beta1_JobStatus_To_batch_JobStatus, - Convert_batch_JobStatus_To_v1beta1_JobStatus, - Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector, - Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector, - Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement, - Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement, - Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy, - Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy, - Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule, - Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule, - Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList, - Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList, - Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer, - Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer, - Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort, - Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort, - Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec, - Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec, - Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy, - Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy, - Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList, - Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList, - Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec, - Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec, - Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet, - Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet, - Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList, - Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList, - Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec, - Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec, - Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus, - Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus, - Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy, - Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy, - Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig, - Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig, - Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment, - Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment, - Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions, - Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions, - Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions, - Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions, - Convert_v1beta1_Scale_To_extensions_Scale, - Convert_extensions_Scale_To_v1beta1_Scale, - Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec, - Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, - Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, - Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, - Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, - Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, - Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource, - Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData, - Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData, - Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList, - Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList, - Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList, - Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_v1beta1_APIVersion_To_extensions_APIVersion(in *APIVersion, out *extensions.APIVersion, s conversion.Scope) error { - return autoConvert_v1beta1_APIVersion_To_extensions_APIVersion(in, out, s) -} - -func autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -func Convert_extensions_APIVersion_To_v1beta1_APIVersion(in *extensions.APIVersion, out *APIVersion, s conversion.Scope) error { - return autoConvert_extensions_APIVersion_To_v1beta1_APIVersion(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.CurrentValue, &out.CurrentValue, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in *CustomMetricCurrentStatus, out *extensions.CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { - out.Name = in.Name - if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.CurrentValue, &out.CurrentValue, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in *extensions.CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.CustomMetricCurrentStatus, len(*in)) - for i := range *in { - if err := Convert_v1beta1_CustomMetricCurrentStatus_To_extensions_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in *CustomMetricCurrentStatusList, out *extensions.CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricCurrentStatusList_To_extensions_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - if err := Convert_extensions_CustomMetricCurrentStatus_To_v1beta1_CustomMetricCurrentStatus(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in *extensions.CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricCurrentStatusList_To_v1beta1_CustomMetricCurrentStatusList(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.TargetValue, &out.TargetValue, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in *CustomMetricTarget, out *extensions.CustomMetricTarget, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(in, out, s) -} - -func autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { - out.Name = in.Name - if err := api.Convert_resource_Quantity_To_resource_Quantity(&in.TargetValue, &out.TargetValue, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in *extensions.CustomMetricTarget, out *CustomMetricTarget, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(in, out, s) -} - -func autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.CustomMetricTarget, len(*in)) - for i := range *in { - if err := Convert_v1beta1_CustomMetricTarget_To_extensions_CustomMetricTarget(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in *CustomMetricTargetList, out *extensions.CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_v1beta1_CustomMetricTargetList_To_extensions_CustomMetricTargetList(in, out, s) -} - -func autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - if err := Convert_extensions_CustomMetricTarget_To_v1beta1_CustomMetricTarget(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in *extensions.CustomMetricTargetList, out *CustomMetricTargetList, s conversion.Scope) error { - return autoConvert_extensions_CustomMetricTargetList_To_v1beta1_CustomMetricTargetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - SetDefaults_DaemonSet(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(in *DaemonSet, out *extensions.DaemonSet, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSet_To_extensions_DaemonSet(in, out, s) -} - -func autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(in *extensions.DaemonSet, out *DaemonSet, s conversion.Scope) error { - return autoConvert_extensions_DaemonSet_To_v1beta1_DaemonSet(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.DaemonSet, len(*in)) - for i := range *in { - if err := Convert_v1beta1_DaemonSet_To_extensions_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in *DaemonSetList, out *extensions.DaemonSetList, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetList_To_extensions_DaemonSetList(in, out, s) -} - -func autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - if err := Convert_extensions_DaemonSet_To_v1beta1_DaemonSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in *extensions.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetList_To_v1beta1_DaemonSetList(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in *DaemonSetSpec, out *extensions.DaemonSetSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetSpec_To_extensions_DaemonSetSpec(in, out, s) -} - -func autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in *extensions.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetSpec_To_v1beta1_DaemonSetSpec(in, out, s) -} - -func autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - return nil -} - -func Convert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in *DaemonSetStatus, out *extensions.DaemonSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DaemonSetStatus_To_extensions_DaemonSetStatus(in, out, s) -} - -func autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - return nil -} - -func Convert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in *extensions.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { - return autoConvert_extensions_DaemonSetStatus_To_v1beta1_DaemonSetStatus(in, out, s) -} - -func autoConvert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - SetDefaults_Deployment(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentSpec_To_extensions_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Deployment_To_extensions_Deployment(in *Deployment, out *extensions.Deployment, s conversion.Scope) error { - return autoConvert_v1beta1_Deployment_To_extensions_Deployment(in, out, s) -} - -func autoConvert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_DeploymentSpec_To_v1beta1_DeploymentSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Deployment_To_v1beta1_Deployment(in *extensions.Deployment, out *Deployment, s conversion.Scope) error { - return autoConvert_extensions_Deployment_To_v1beta1_Deployment(in, out, s) -} - -func autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.Deployment, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Deployment_To_extensions_Deployment(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_DeploymentList_To_extensions_DeploymentList(in *DeploymentList, out *extensions.DeploymentList, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentList_To_extensions_DeploymentList(in, out, s) -} - -func autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - if err := Convert_extensions_Deployment_To_v1beta1_Deployment(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_DeploymentList_To_v1beta1_DeploymentList(in *extensions.DeploymentList, out *DeploymentList, s conversion.Scope) error { - return autoConvert_extensions_DeploymentList_To_v1beta1_DeploymentList(in, out, s) -} - -func autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Name = in.Name - out.UpdatedAnnotations = in.UpdatedAnnotations - if err := Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in *DeploymentRollback, out *extensions.DeploymentRollback, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentRollback_To_extensions_DeploymentRollback(in, out, s) -} - -func autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - out.Name = in.Name - out.UpdatedAnnotations = in.UpdatedAnnotations - if err := Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(&in.RollbackTo, &out.RollbackTo, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in *extensions.DeploymentRollback, out *DeploymentRollback, s conversion.Scope) error { - return autoConvert_extensions_DeploymentRollback_To_v1beta1_DeploymentRollback(in, out, s) -} - -func autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas - return nil -} - -func Convert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in *DeploymentStatus, out *extensions.DeploymentStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DeploymentStatus_To_extensions_DeploymentStatus(in, out, s) -} - -func autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas - return nil -} - -func Convert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in *extensions.DeploymentStatus, out *DeploymentStatus, s conversion.Scope) error { - return autoConvert_extensions_DeploymentStatus_To_v1beta1_DeploymentStatus(in, out, s) -} - -func autoConvert_v1beta1_DeploymentStrategy_To_extensions_DeploymentStrategy(in *DeploymentStrategy, out *extensions.DeploymentStrategy, s conversion.Scope) error { - out.Type = extensions.DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(extensions.RollingUpdateDeployment) - if err := Convert_v1beta1_RollingUpdateDeployment_To_extensions_RollingUpdateDeployment(*in, *out, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func autoConvert_extensions_DeploymentStrategy_To_v1beta1_DeploymentStrategy(in *extensions.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { - out.Type = DeploymentStrategyType(in.Type) - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - if err := Convert_extensions_RollingUpdateDeployment_To_v1beta1_RollingUpdateDeployment(*in, *out, s); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.FSGroupStrategyType(in.Rule) - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]extensions.IDRange, len(*in)) - for i := range *in { - if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in *FSGroupStrategyOptions, out *extensions.FSGroupStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(in, out, s) -} - -func autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { - out.Rule = FSGroupStrategyType(in.Rule) - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - for i := range *in { - if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in *extensions.FSGroupStrategyOptions, out *FSGroupStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { - out.Path = in.Path - if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(&in.Backend, &out.Backend, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in *HTTPIngressPath, out *extensions.HTTPIngressPath, s conversion.Scope) error { - return autoConvert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(in, out, s) -} - -func autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { - out.Path = in.Path - if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(&in.Backend, &out.Backend, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in *extensions.HTTPIngressPath, out *HTTPIngressPath, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(in, out, s) -} - -func autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]extensions.HTTPIngressPath, len(*in)) - for i := range *in { - if err := Convert_v1beta1_HTTPIngressPath_To_extensions_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Paths = nil - } - return nil -} - -func Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in *HTTPIngressRuleValue, out *extensions.HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(in, out, s) -} - -func autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - if err := Convert_extensions_HTTPIngressPath_To_v1beta1_HTTPIngressPath(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Paths = nil - } - return nil -} - -func Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in *extensions.HTTPIngressRuleValue, out *HTTPIngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(in, out, s) -} - -func autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - SetDefaults_HorizontalPodAutoscaler(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_HorizontalPodAutoscalerSpec_To_autoscaling_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in *HorizontalPodAutoscaler, out *autoscaling.HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerSpec_To_v1beta1_HorizontalPodAutoscalerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in *autoscaling.HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(in, out, s) -} - -func autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]autoscaling.HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_v1beta1_HorizontalPodAutoscaler_To_autoscaling_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in *HorizontalPodAutoscalerList, out *autoscaling.HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerList_To_autoscaling_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - if err := Convert_autoscaling_HorizontalPodAutoscaler_To_v1beta1_HorizontalPodAutoscaler(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in *autoscaling.HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerList_To_v1beta1_HorizontalPodAutoscalerList(in, out, s) -} - -func autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.LastScaleTime = in.LastScaleTime - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage - return nil -} - -func Convert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in *HorizontalPodAutoscalerStatus, out *autoscaling.HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_v1beta1_HorizontalPodAutoscalerStatus_To_autoscaling_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - out.ObservedGeneration = in.ObservedGeneration - out.LastScaleTime = in.LastScaleTime - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - out.CurrentCPUUtilizationPercentage = in.CurrentCPUUtilizationPercentage - return nil -} - -func Convert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in *autoscaling.HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, s conversion.Scope) error { - return autoConvert_autoscaling_HorizontalPodAutoscalerStatus_To_v1beta1_HorizontalPodAutoscalerStatus(in, out, s) -} - -func autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - out.Min = int(in.Min) - out.Max = int(in.Max) - return nil -} - -func Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(in *HostPortRange, out *extensions.HostPortRange, s conversion.Scope) error { - return autoConvert_v1beta1_HostPortRange_To_extensions_HostPortRange(in, out, s) -} - -func autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { - out.Min = int32(in.Min) - out.Max = int32(in.Max) - return nil -} - -func Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(in *extensions.HostPortRange, out *HostPortRange, s conversion.Scope) error { - return autoConvert_extensions_HostPortRange_To_v1beta1_HostPortRange(in, out, s) -} - -func autoConvert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func Convert_v1beta1_IDRange_To_extensions_IDRange(in *IDRange, out *extensions.IDRange, s conversion.Scope) error { - return autoConvert_v1beta1_IDRange_To_extensions_IDRange(in, out, s) -} - -func autoConvert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func Convert_extensions_IDRange_To_v1beta1_IDRange(in *extensions.IDRange, out *IDRange, s conversion.Scope) error { - return autoConvert_extensions_IDRange_To_v1beta1_IDRange(in, out, s) -} - -func autoConvert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Ingress_To_extensions_Ingress(in *Ingress, out *extensions.Ingress, s conversion.Scope) error { - return autoConvert_v1beta1_Ingress_To_extensions_Ingress(in, out, s) -} - -func autoConvert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Ingress_To_v1beta1_Ingress(in *extensions.Ingress, out *Ingress, s conversion.Scope) error { - return autoConvert_extensions_Ingress_To_v1beta1_Ingress(in, out, s) -} - -func autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { - out.ServiceName = in.ServiceName - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(in *IngressBackend, out *extensions.IngressBackend, s conversion.Scope) error { - return autoConvert_v1beta1_IngressBackend_To_extensions_IngressBackend(in, out, s) -} - -func autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { - out.ServiceName = in.ServiceName - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.ServicePort, &out.ServicePort, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(in *extensions.IngressBackend, out *IngressBackend, s conversion.Scope) error { - return autoConvert_extensions_IngressBackend_To_v1beta1_IngressBackend(in, out, s) -} - -func autoConvert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.Ingress, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Ingress_To_extensions_Ingress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_IngressList_To_extensions_IngressList(in *IngressList, out *extensions.IngressList, s conversion.Scope) error { - return autoConvert_v1beta1_IngressList_To_extensions_IngressList(in, out, s) -} - -func autoConvert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - if err := Convert_extensions_Ingress_To_v1beta1_Ingress(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_IngressList_To_v1beta1_IngressList(in *extensions.IngressList, out *IngressList, s conversion.Scope) error { - return autoConvert_extensions_IngressList_To_v1beta1_IngressList(in, out, s) -} - -func autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { - out.Host = in.Host - if err := Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_IngressRule_To_extensions_IngressRule(in *IngressRule, out *extensions.IngressRule, s conversion.Scope) error { - return autoConvert_v1beta1_IngressRule_To_extensions_IngressRule(in, out, s) -} - -func autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { - out.Host = in.Host - if err := Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(&in.IngressRuleValue, &out.IngressRuleValue, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressRule_To_v1beta1_IngressRule(in *extensions.IngressRule, out *IngressRule, s conversion.Scope) error { - return autoConvert_extensions_IngressRule_To_v1beta1_IngressRule(in, out, s) -} - -func autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(extensions.HTTPIngressRuleValue) - if err := Convert_v1beta1_HTTPIngressRuleValue_To_extensions_HTTPIngressRuleValue(*in, *out, s); err != nil { - return err - } - } else { - out.HTTP = nil - } - return nil -} - -func Convert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in *IngressRuleValue, out *extensions.IngressRuleValue, s conversion.Scope) error { - return autoConvert_v1beta1_IngressRuleValue_To_extensions_IngressRuleValue(in, out, s) -} - -func autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - if err := Convert_extensions_HTTPIngressRuleValue_To_v1beta1_HTTPIngressRuleValue(*in, *out, s); err != nil { - return err - } - } else { - out.HTTP = nil - } - return nil -} - -func Convert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in *extensions.IngressRuleValue, out *IngressRuleValue, s conversion.Scope) error { - return autoConvert_extensions_IngressRuleValue_To_v1beta1_IngressRuleValue(in, out, s) -} - -func autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(extensions.IngressBackend) - if err := Convert_v1beta1_IngressBackend_To_extensions_IngressBackend(*in, *out, s); err != nil { - return err - } - } else { - out.Backend = nil - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]extensions.IngressTLS, len(*in)) - for i := range *in { - if err := Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.TLS = nil - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]extensions.IngressRule, len(*in)) - for i := range *in { - if err := Convert_v1beta1_IngressRule_To_extensions_IngressRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_v1beta1_IngressSpec_To_extensions_IngressSpec(in *IngressSpec, out *extensions.IngressSpec, s conversion.Scope) error { - return autoConvert_v1beta1_IngressSpec_To_extensions_IngressSpec(in, out, s) -} - -func autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(IngressBackend) - if err := Convert_extensions_IngressBackend_To_v1beta1_IngressBackend(*in, *out, s); err != nil { - return err - } - } else { - out.Backend = nil - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]IngressTLS, len(*in)) - for i := range *in { - if err := Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.TLS = nil - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]IngressRule, len(*in)) - for i := range *in { - if err := Convert_extensions_IngressRule_To_v1beta1_IngressRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_extensions_IngressSpec_To_v1beta1_IngressSpec(in *extensions.IngressSpec, out *IngressSpec, s conversion.Scope) error { - return autoConvert_extensions_IngressSpec_To_v1beta1_IngressSpec(in, out, s) -} - -func autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_IngressStatus_To_extensions_IngressStatus(in *IngressStatus, out *extensions.IngressStatus, s conversion.Scope) error { - return autoConvert_v1beta1_IngressStatus_To_extensions_IngressStatus(in, out, s) -} - -func autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.LoadBalancer, &out.LoadBalancer, 0); err != nil { - return err - } - return nil -} - -func Convert_extensions_IngressStatus_To_v1beta1_IngressStatus(in *extensions.IngressStatus, out *IngressStatus, s conversion.Scope) error { - return autoConvert_extensions_IngressStatus_To_v1beta1_IngressStatus(in, out, s) -} - -func autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { - out.Hosts = in.Hosts - out.SecretName = in.SecretName - return nil -} - -func Convert_v1beta1_IngressTLS_To_extensions_IngressTLS(in *IngressTLS, out *extensions.IngressTLS, s conversion.Scope) error { - return autoConvert_v1beta1_IngressTLS_To_extensions_IngressTLS(in, out, s) -} - -func autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { - out.Hosts = in.Hosts - out.SecretName = in.SecretName - return nil -} - -func Convert_extensions_IngressTLS_To_v1beta1_IngressTLS(in *extensions.IngressTLS, out *IngressTLS, s conversion.Scope) error { - return autoConvert_extensions_IngressTLS_To_v1beta1_IngressTLS(in, out, s) -} - -func autoConvert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - SetDefaults_Job(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_JobSpec_To_batch_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_JobStatus_To_batch_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Job_To_batch_Job(in *Job, out *batch.Job, s conversion.Scope) error { - return autoConvert_v1beta1_Job_To_batch_Job(in, out, s) -} - -func autoConvert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_batch_JobSpec_To_v1beta1_JobSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_batch_JobStatus_To_v1beta1_JobStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_batch_Job_To_v1beta1_Job(in *batch.Job, out *Job, s conversion.Scope) error { - return autoConvert_batch_Job_To_v1beta1_Job(in, out, s) -} - -func autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - out.Type = batch.JobConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_JobCondition_To_batch_JobCondition(in *JobCondition, out *batch.JobCondition, s conversion.Scope) error { - return autoConvert_v1beta1_JobCondition_To_batch_JobCondition(in, out, s) -} - -func autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - out.Type = JobConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_batch_JobCondition_To_v1beta1_JobCondition(in *batch.JobCondition, out *JobCondition, s conversion.Scope) error { - return autoConvert_batch_JobCondition_To_v1beta1_JobCondition(in, out, s) -} - -func autoConvert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]batch.Job, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Job_To_batch_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_JobList_To_batch_JobList(in *JobList, out *batch.JobList, s conversion.Scope) error { - return autoConvert_v1beta1_JobList_To_batch_JobList(in, out, s) -} - -func autoConvert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - if err := Convert_batch_Job_To_v1beta1_Job(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_batch_JobList_To_v1beta1_JobList(in *batch.JobList, out *JobList, s conversion.Scope) error { - return autoConvert_batch_JobList_To_v1beta1_JobList(in, out, s) -} - -func autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]batch.JobCondition, len(*in)) - for i := range *in { - if err := Convert_v1beta1_JobCondition_To_batch_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.StartTime = in.StartTime - out.CompletionTime = in.CompletionTime - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_v1beta1_JobStatus_To_batch_JobStatus(in *JobStatus, out *batch.JobStatus, s conversion.Scope) error { - return autoConvert_v1beta1_JobStatus_To_batch_JobStatus(in, out, s) -} - -func autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - if err := Convert_batch_JobCondition_To_v1beta1_JobCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.StartTime = in.StartTime - out.CompletionTime = in.CompletionTime - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func Convert_batch_JobStatus_To_v1beta1_JobStatus(in *batch.JobStatus, out *JobStatus, s conversion.Scope) error { - return autoConvert_batch_JobStatus_To_v1beta1_JobStatus(in, out, s) -} - -func autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - out.MatchLabels = in.MatchLabels - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]unversioned.LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in *LabelSelector, out *unversioned.LabelSelector, s conversion.Scope) error { - return autoConvert_v1beta1_LabelSelector_To_unversioned_LabelSelector(in, out, s) -} - -func autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - out.MatchLabels = in.MatchLabels - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(*in)) - for i := range *in { - if err := Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in *unversioned.LabelSelector, out *LabelSelector, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelector_To_v1beta1_LabelSelector(in, out, s) -} - -func autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = unversioned.LabelSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in *LabelSelectorRequirement, out *unversioned.LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1beta1_LabelSelectorRequirement_To_unversioned_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = LabelSelectorOperator(in.Operator) - out.Values = in.Values - return nil -} - -func Convert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in *unversioned.LabelSelectorRequirement, out *LabelSelectorRequirement, s conversion.Scope) error { - return autoConvert_unversioned_LabelSelectorRequirement_To_v1beta1_LabelSelectorRequirement(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { - SetDefaults_NetworkPolicy(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in *NetworkPolicy, out *extensions.NetworkPolicy, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(in, out, s) -} - -func autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in *extensions.NetworkPolicy, out *NetworkPolicy, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]extensions.NetworkPolicyPort, len(*in)) - for i := range *in { - if err := Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]extensions.NetworkPolicyPeer, len(*in)) - for i := range *in { - if err := Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.From = nil - } - return nil -} - -func Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in *NetworkPolicyIngressRule, out *extensions.NetworkPolicyIngressRule, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - if err := Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - if err := Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.From = nil - } - return nil -} - -func Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in *extensions.NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.NetworkPolicy, len(*in)) - for i := range *in { - if err := Convert_v1beta1_NetworkPolicy_To_extensions_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in *NetworkPolicyList, out *extensions.NetworkPolicyList, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyList_To_extensions_NetworkPolicyList(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NetworkPolicy, len(*in)) - for i := range *in { - if err := Convert_extensions_NetworkPolicy_To_v1beta1_NetworkPolicy(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in *extensions.NetworkPolicyList, out *NetworkPolicyList, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyList_To_v1beta1_NetworkPolicyList(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { - if in.PodSelector != nil { - in, out := &in.PodSelector, &out.PodSelector - *out = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.PodSelector = nil - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(unversioned.LabelSelector) - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.NamespaceSelector = nil - } - return nil -} - -func Convert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in *NetworkPolicyPeer, out *extensions.NetworkPolicyPeer, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyPeer_To_extensions_NetworkPolicyPeer(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { - if in.PodSelector != nil { - in, out := &in.PodSelector, &out.PodSelector - *out = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.PodSelector = nil - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(LabelSelector) - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(*in, *out, s); err != nil { - return err - } - } else { - out.NamespaceSelector = nil - } - return nil -} - -func Convert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in *extensions.NetworkPolicyPeer, out *NetworkPolicyPeer, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyPeer_To_v1beta1_NetworkPolicyPeer(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { - if in.Protocol != nil { - in, out := &in.Protocol, &out.Protocol - *out = new(api.Protocol) - **out = api.Protocol(**in) - } else { - out.Protocol = nil - } - out.Port = in.Port - return nil -} - -func Convert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in *NetworkPolicyPort, out *extensions.NetworkPolicyPort, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicyPort_To_extensions_NetworkPolicyPort(in, out, s) -} - -func autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { - if in.Protocol != nil { - in, out := &in.Protocol, &out.Protocol - *out = new(v1.Protocol) - **out = v1.Protocol(**in) - } else { - out.Protocol = nil - } - out.Port = in.Port - return nil -} - -func Convert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in *extensions.NetworkPolicyPort, out *NetworkPolicyPort, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicyPort_To_v1beta1_NetworkPolicyPort(in, out, s) -} - -func autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { - if err := Convert_v1beta1_LabelSelector_To_unversioned_LabelSelector(&in.PodSelector, &out.PodSelector, s); err != nil { - return err - } - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]extensions.NetworkPolicyIngressRule, len(*in)) - for i := range *in { - if err := Convert_v1beta1_NetworkPolicyIngressRule_To_extensions_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in *NetworkPolicySpec, out *extensions.NetworkPolicySpec, s conversion.Scope) error { - return autoConvert_v1beta1_NetworkPolicySpec_To_extensions_NetworkPolicySpec(in, out, s) -} - -func autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { - if err := Convert_unversioned_LabelSelector_To_v1beta1_LabelSelector(&in.PodSelector, &out.PodSelector, s); err != nil { - return err - } - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(*in)) - for i := range *in { - if err := Convert_extensions_NetworkPolicyIngressRule_To_v1beta1_NetworkPolicyIngressRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func Convert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in *extensions.NetworkPolicySpec, out *NetworkPolicySpec, s conversion.Scope) error { - return autoConvert_extensions_NetworkPolicySpec_To_v1beta1_NetworkPolicySpec(in, out, s) -} - -func autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in *PodSecurityPolicy, out *extensions.PodSecurityPolicy, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in *extensions.PodSecurityPolicy, out *PodSecurityPolicy, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(in, out, s) -} - -func autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.PodSecurityPolicy, len(*in)) - for i := range *in { - if err := Convert_v1beta1_PodSecurityPolicy_To_extensions_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in *PodSecurityPolicyList, out *extensions.PodSecurityPolicyList, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicyList_To_extensions_PodSecurityPolicyList(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - if err := Convert_extensions_PodSecurityPolicy_To_v1beta1_PodSecurityPolicy(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in *extensions.PodSecurityPolicyList, out *PodSecurityPolicyList, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicyList_To_v1beta1_PodSecurityPolicyList(in, out, s) -} - -func autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { - out.Privileged = in.Privileged - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = api.Capability((*in)[i]) - } - } else { - out.DefaultAddCapabilities = nil - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = api.Capability((*in)[i]) - } - } else { - out.RequiredDropCapabilities = nil - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api.Capability, len(*in)) - for i := range *in { - (*out)[i] = api.Capability((*in)[i]) - } - } else { - out.AllowedCapabilities = nil - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]extensions.FSType, len(*in)) - for i := range *in { - (*out)[i] = extensions.FSType((*in)[i]) - } - } else { - out.Volumes = nil - } - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]extensions.HostPortRange, len(*in)) - for i := range *in { - if err := Convert_v1beta1_HostPortRange_To_extensions_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { - return err - } - if err := Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { - return err - } - if err := Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { - return err - } - if err := Convert_v1beta1_FSGroupStrategyOptions_To_extensions_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { - return err - } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func Convert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in *PodSecurityPolicySpec, out *extensions.PodSecurityPolicySpec, s conversion.Scope) error { - return autoConvert_v1beta1_PodSecurityPolicySpec_To_extensions_PodSecurityPolicySpec(in, out, s) -} - -func autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { - out.Privileged = in.Privileged - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]v1.Capability, len(*in)) - for i := range *in { - (*out)[i] = v1.Capability((*in)[i]) - } - } else { - out.DefaultAddCapabilities = nil - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]v1.Capability, len(*in)) - for i := range *in { - (*out)[i] = v1.Capability((*in)[i]) - } - } else { - out.RequiredDropCapabilities = nil - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]v1.Capability, len(*in)) - for i := range *in { - (*out)[i] = v1.Capability((*in)[i]) - } - } else { - out.AllowedCapabilities = nil - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - for i := range *in { - (*out)[i] = FSType((*in)[i]) - } - } else { - out.Volumes = nil - } - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - for i := range *in { - if err := Convert_extensions_HostPortRange_To_v1beta1_HostPortRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(&in.SELinux, &out.SELinux, s); err != nil { - return err - } - if err := Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(&in.RunAsUser, &out.RunAsUser, s); err != nil { - return err - } - if err := Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(&in.SupplementalGroups, &out.SupplementalGroups, s); err != nil { - return err - } - if err := Convert_extensions_FSGroupStrategyOptions_To_v1beta1_FSGroupStrategyOptions(&in.FSGroup, &out.FSGroup, s); err != nil { - return err - } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func Convert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in *extensions.PodSecurityPolicySpec, out *PodSecurityPolicySpec, s conversion.Scope) error { - return autoConvert_extensions_PodSecurityPolicySpec_To_v1beta1_PodSecurityPolicySpec(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - SetDefaults_ReplicaSet(in) - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_ReplicaSetSpec_To_extensions_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in *ReplicaSet, out *extensions.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, out, s) -} - -func autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_ReplicaSetSpec_To_v1beta1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in *extensions.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.ReplicaSet, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in *ReplicaSetList, out *extensions.ReplicaSetList, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetList_To_extensions_ReplicaSetList(in, out, s) -} - -func autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - if err := Convert_extensions_ReplicaSet_To_v1beta1_ReplicaSet(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in *extensions.ReplicaSetList, out *ReplicaSetList, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetList_To_v1beta1_ReplicaSetList(in, out, s) -} - -func autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in *ReplicaSetStatus, out *extensions.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicaSetStatus_To_extensions_ReplicaSetStatus(in, out, s) -} - -func autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func Convert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in *extensions.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_extensions_ReplicaSetStatus_To_v1beta1_ReplicaSetStatus(in, out, s) -} - -func autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in *ReplicationControllerDummy, out *extensions.ReplicationControllerDummy, s conversion.Scope) error { - return autoConvert_v1beta1_ReplicationControllerDummy_To_extensions_ReplicationControllerDummy(in, out, s) -} - -func autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in *extensions.ReplicationControllerDummy, out *ReplicationControllerDummy, s conversion.Scope) error { - return autoConvert_extensions_ReplicationControllerDummy_To_v1beta1_ReplicationControllerDummy(in, out, s) -} - -func autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - out.Revision = in.Revision - return nil -} - -func Convert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in *RollbackConfig, out *extensions.RollbackConfig, s conversion.Scope) error { - return autoConvert_v1beta1_RollbackConfig_To_extensions_RollbackConfig(in, out, s) -} - -func autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { - out.Revision = in.Revision - return nil -} - -func Convert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in *extensions.RollbackConfig, out *RollbackConfig, s conversion.Scope) error { - return autoConvert_extensions_RollbackConfig_To_v1beta1_RollbackConfig(in, out, s) -} - -func autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.RunAsUserStrategy(in.Rule) - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]extensions.IDRange, len(*in)) - for i := range *in { - if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func Convert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in *RunAsUserStrategyOptions, out *extensions.RunAsUserStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_RunAsUserStrategyOptions_To_extensions_RunAsUserStrategyOptions(in, out, s) -} - -func autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { - out.Rule = RunAsUserStrategy(in.Rule) - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - for i := range *in { - if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func Convert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in *extensions.RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_RunAsUserStrategyOptions_To_v1beta1_RunAsUserStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.SELinuxStrategy(in.Rule) - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(api.SELinuxOptions) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - return nil -} - -func Convert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in *SELinuxStrategyOptions, out *extensions.SELinuxStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_SELinuxStrategyOptions_To_extensions_SELinuxStrategyOptions(in, out, s) -} - -func autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { - out.Rule = SELinuxStrategy(in.Rule) - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(v1.SELinuxOptions) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - return nil -} - -func Convert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in *extensions.SELinuxStrategyOptions, out *SELinuxStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_SELinuxStrategyOptions_To_v1beta1_SELinuxStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Scale_To_extensions_Scale(in *Scale, out *extensions.Scale, s conversion.Scope) error { - return autoConvert_v1beta1_Scale_To_extensions_Scale(in, out, s) -} - -func autoConvert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_Scale_To_v1beta1_Scale(in *extensions.Scale, out *Scale, s conversion.Scope) error { - return autoConvert_extensions_Scale_To_v1beta1_Scale(in, out, s) -} - -func autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in *ScaleSpec, out *extensions.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ScaleSpec_To_extensions_ScaleSpec(in, out, s) -} - -func autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, out *ScaleSpec, s conversion.Scope) error { - return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) -} - -func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { - out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]extensions.IDRange, len(*in)) - for i := range *in { - if err := Convert_v1beta1_IDRange_To_extensions_IDRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { - return autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in, out, s) -} - -func autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { - out.Rule = SupplementalGroupsStrategyType(in.Rule) - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - for i := range *in { - if err := Convert_extensions_IDRange_To_v1beta1_IDRange(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in *extensions.SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, s conversion.Scope) error { - return autoConvert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - out.Description = in.Description - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]extensions.APIVersion, len(*in)) - for i := range *in { - if err := Convert_v1beta1_APIVersion_To_extensions_APIVersion(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil - } - return nil -} - -func Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in *ThirdPartyResource, out *extensions.ThirdPartyResource, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - out.Description = in.Description - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - for i := range *in { - if err := Convert_extensions_APIVersion_To_v1beta1_APIVersion(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Versions = nil - } - return nil -} - -func Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in *extensions.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in *ThirdPartyResourceData, out *extensions.ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := conversion.Convert_Slice_byte_To_Slice_byte(&in.Data, &out.Data, s); err != nil { - return err - } - return nil -} - -func Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in *extensions.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.ThirdPartyResourceData, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ThirdPartyResourceData_To_extensions_ThirdPartyResourceData(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in *ThirdPartyResourceDataList, out *extensions.ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceDataList_To_extensions_ThirdPartyResourceDataList(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - if err := Convert_extensions_ThirdPartyResourceData_To_v1beta1_ThirdPartyResourceData(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in *extensions.ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceDataList_To_v1beta1_ThirdPartyResourceDataList(in, out, s) -} - -func autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.ThirdPartyResource, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in *ThirdPartyResourceList, out *extensions.ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_v1beta1_ThirdPartyResourceList_To_extensions_ThirdPartyResourceList(in, out, s) -} - -func autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - if err := Convert_extensions_ThirdPartyResource_To_v1beta1_ThirdPartyResource(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in *extensions.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error { - return autoConvert_extensions_ThirdPartyResourceList_To_v1beta1_ThirdPartyResourceList(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go deleted file mode 100644 index 32debd197..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/deep_copy_generated.go +++ /dev/null @@ -1,1297 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/kubernetes/pkg/api" - resource "k8s.io/kubernetes/pkg/api/resource" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - intstr "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1beta1_APIVersion, - DeepCopy_v1beta1_CPUTargetUtilization, - DeepCopy_v1beta1_CustomMetricCurrentStatus, - DeepCopy_v1beta1_CustomMetricCurrentStatusList, - DeepCopy_v1beta1_CustomMetricTarget, - DeepCopy_v1beta1_CustomMetricTargetList, - DeepCopy_v1beta1_DaemonSet, - DeepCopy_v1beta1_DaemonSetList, - DeepCopy_v1beta1_DaemonSetSpec, - DeepCopy_v1beta1_DaemonSetStatus, - DeepCopy_v1beta1_Deployment, - DeepCopy_v1beta1_DeploymentList, - DeepCopy_v1beta1_DeploymentRollback, - DeepCopy_v1beta1_DeploymentSpec, - DeepCopy_v1beta1_DeploymentStatus, - DeepCopy_v1beta1_DeploymentStrategy, - DeepCopy_v1beta1_ExportOptions, - DeepCopy_v1beta1_FSGroupStrategyOptions, - DeepCopy_v1beta1_HTTPIngressPath, - DeepCopy_v1beta1_HTTPIngressRuleValue, - DeepCopy_v1beta1_HorizontalPodAutoscaler, - DeepCopy_v1beta1_HorizontalPodAutoscalerList, - DeepCopy_v1beta1_HorizontalPodAutoscalerSpec, - DeepCopy_v1beta1_HorizontalPodAutoscalerStatus, - DeepCopy_v1beta1_HostPortRange, - DeepCopy_v1beta1_IDRange, - DeepCopy_v1beta1_Ingress, - DeepCopy_v1beta1_IngressBackend, - DeepCopy_v1beta1_IngressList, - DeepCopy_v1beta1_IngressRule, - DeepCopy_v1beta1_IngressRuleValue, - DeepCopy_v1beta1_IngressSpec, - DeepCopy_v1beta1_IngressStatus, - DeepCopy_v1beta1_IngressTLS, - DeepCopy_v1beta1_Job, - DeepCopy_v1beta1_JobCondition, - DeepCopy_v1beta1_JobList, - DeepCopy_v1beta1_JobSpec, - DeepCopy_v1beta1_JobStatus, - DeepCopy_v1beta1_LabelSelector, - DeepCopy_v1beta1_LabelSelectorRequirement, - DeepCopy_v1beta1_ListOptions, - DeepCopy_v1beta1_NetworkPolicy, - DeepCopy_v1beta1_NetworkPolicyIngressRule, - DeepCopy_v1beta1_NetworkPolicyList, - DeepCopy_v1beta1_NetworkPolicyPeer, - DeepCopy_v1beta1_NetworkPolicyPort, - DeepCopy_v1beta1_NetworkPolicySpec, - DeepCopy_v1beta1_PodSecurityPolicy, - DeepCopy_v1beta1_PodSecurityPolicyList, - DeepCopy_v1beta1_PodSecurityPolicySpec, - DeepCopy_v1beta1_ReplicaSet, - DeepCopy_v1beta1_ReplicaSetList, - DeepCopy_v1beta1_ReplicaSetSpec, - DeepCopy_v1beta1_ReplicaSetStatus, - DeepCopy_v1beta1_ReplicationControllerDummy, - DeepCopy_v1beta1_RollbackConfig, - DeepCopy_v1beta1_RollingUpdateDeployment, - DeepCopy_v1beta1_RunAsUserStrategyOptions, - DeepCopy_v1beta1_SELinuxStrategyOptions, - DeepCopy_v1beta1_Scale, - DeepCopy_v1beta1_ScaleSpec, - DeepCopy_v1beta1_ScaleStatus, - DeepCopy_v1beta1_SubresourceReference, - DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, - DeepCopy_v1beta1_ThirdPartyResource, - DeepCopy_v1beta1_ThirdPartyResourceData, - DeepCopy_v1beta1_ThirdPartyResourceDataList, - DeepCopy_v1beta1_ThirdPartyResourceList, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1beta1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { - out.Name = in.Name - return nil -} - -func DeepCopy_v1beta1_CPUTargetUtilization(in CPUTargetUtilization, out *CPUTargetUtilization, c *conversion.Cloner) error { - out.TargetPercentage = in.TargetPercentage - return nil -} - -func DeepCopy_v1beta1_CustomMetricCurrentStatus(in CustomMetricCurrentStatus, out *CustomMetricCurrentStatus, c *conversion.Cloner) error { - out.Name = in.Name - if err := resource.DeepCopy_resource_Quantity(in.CurrentValue, &out.CurrentValue, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_CustomMetricCurrentStatusList(in CustomMetricCurrentStatusList, out *CustomMetricCurrentStatusList, c *conversion.Cloner) error { - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_CustomMetricCurrentStatus(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_CustomMetricTarget(in CustomMetricTarget, out *CustomMetricTarget, c *conversion.Cloner) error { - out.Name = in.Name - if err := resource.DeepCopy_resource_Quantity(in.TargetValue, &out.TargetValue, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_CustomMetricTargetList(in CustomMetricTargetList, out *CustomMetricTargetList, c *conversion.Cloner) error { - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]CustomMetricTarget, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_CustomMetricTarget(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DaemonSetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]DaemonSet, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_DaemonSet(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(LabelSelector) - if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { - out.CurrentNumberScheduled = in.CurrentNumberScheduled - out.NumberMisscheduled = in.NumberMisscheduled - out.DesiredNumberScheduled = in.DesiredNumberScheduled - return nil -} - -func DeepCopy_v1beta1_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Deployment, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_Deployment(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_DeploymentRollback(in DeploymentRollback, out *DeploymentRollback, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Name = in.Name - if in.UpdatedAnnotations != nil { - in, out := in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.UpdatedAnnotations = nil - } - if err := DeepCopy_v1beta1_RollbackConfig(in.RollbackTo, &out.RollbackTo, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { - if in.Replicas != nil { - in, out := in.Replicas, &out.Replicas - *out = new(int32) - **out = *in - } else { - out.Replicas = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(LabelSelector) - if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - if in.RevisionHistoryLimit != nil { - in, out := in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = *in - } else { - out.RevisionHistoryLimit = nil - } - out.Paused = in.Paused - if in.RollbackTo != nil { - in, out := in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - if err := DeepCopy_v1beta1_RollbackConfig(*in, *out, c); err != nil { - return err - } - } else { - out.RollbackTo = nil - } - return nil -} - -func DeepCopy_v1beta1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { - out.ObservedGeneration = in.ObservedGeneration - out.Replicas = in.Replicas - out.UpdatedReplicas = in.UpdatedReplicas - out.AvailableReplicas = in.AvailableReplicas - out.UnavailableReplicas = in.UnavailableReplicas - return nil -} - -func DeepCopy_v1beta1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { - out.Type = in.Type - if in.RollingUpdate != nil { - in, out := in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - if err := DeepCopy_v1beta1_RollingUpdateDeployment(*in, *out, c); err != nil { - return err - } - } else { - out.RollingUpdate = nil - } - return nil -} - -func DeepCopy_v1beta1_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.Export = in.Export - out.Exact = in.Exact - return nil -} - -func DeepCopy_v1beta1_FSGroupStrategyOptions(in FSGroupStrategyOptions, out *FSGroupStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.Ranges != nil { - in, out := in.Ranges, &out.Ranges - *out = make([]IDRange, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_IDRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func DeepCopy_v1beta1_HTTPIngressPath(in HTTPIngressPath, out *HTTPIngressPath, c *conversion.Cloner) error { - out.Path = in.Path - if err := DeepCopy_v1beta1_IngressBackend(in.Backend, &out.Backend, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_HTTPIngressRuleValue(in HTTPIngressRuleValue, out *HTTPIngressRuleValue, c *conversion.Cloner) error { - if in.Paths != nil { - in, out := in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_HTTPIngressPath(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Paths = nil - } - return nil -} - -func DeepCopy_v1beta1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_HorizontalPodAutoscaler(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { - if err := DeepCopy_v1beta1_SubresourceReference(in.ScaleRef, &out.ScaleRef, c); err != nil { - return err - } - if in.MinReplicas != nil { - in, out := in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = *in - } else { - out.MinReplicas = nil - } - out.MaxReplicas = in.MaxReplicas - if in.CPUUtilization != nil { - in, out := in.CPUUtilization, &out.CPUUtilization - *out = new(CPUTargetUtilization) - if err := DeepCopy_v1beta1_CPUTargetUtilization(*in, *out, c); err != nil { - return err - } - } else { - out.CPUUtilization = nil - } - return nil -} - -func DeepCopy_v1beta1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { - if in.ObservedGeneration != nil { - in, out := in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = *in - } else { - out.ObservedGeneration = nil - } - if in.LastScaleTime != nil { - in, out := in.LastScaleTime, &out.LastScaleTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.LastScaleTime = nil - } - out.CurrentReplicas = in.CurrentReplicas - out.DesiredReplicas = in.DesiredReplicas - if in.CurrentCPUUtilizationPercentage != nil { - in, out := in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage - *out = new(int32) - **out = *in - } else { - out.CurrentCPUUtilizationPercentage = nil - } - return nil -} - -func DeepCopy_v1beta1_HostPortRange(in HostPortRange, out *HostPortRange, c *conversion.Cloner) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func DeepCopy_v1beta1_IDRange(in IDRange, out *IDRange, c *conversion.Cloner) error { - out.Min = in.Min - out.Max = in.Max - return nil -} - -func DeepCopy_v1beta1_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_IngressSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_IngressStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error { - out.ServiceName = in.ServiceName - if err := intstr.DeepCopy_intstr_IntOrString(in.ServicePort, &out.ServicePort, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Ingress, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_Ingress(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error { - out.Host = in.Host - if err := DeepCopy_v1beta1_IngressRuleValue(in.IngressRuleValue, &out.IngressRuleValue, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_IngressRuleValue(in IngressRuleValue, out *IngressRuleValue, c *conversion.Cloner) error { - if in.HTTP != nil { - in, out := in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - if err := DeepCopy_v1beta1_HTTPIngressRuleValue(*in, *out, c); err != nil { - return err - } - } else { - out.HTTP = nil - } - return nil -} - -func DeepCopy_v1beta1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error { - if in.Backend != nil { - in, out := in.Backend, &out.Backend - *out = new(IngressBackend) - if err := DeepCopy_v1beta1_IngressBackend(*in, *out, c); err != nil { - return err - } - } else { - out.Backend = nil - } - if in.TLS != nil { - in, out := in.TLS, &out.TLS - *out = make([]IngressTLS, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_IngressTLS(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.TLS = nil - } - if in.Rules != nil { - in, out := in.Rules, &out.Rules - *out = make([]IngressRule, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_IngressRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func DeepCopy_v1beta1_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error { - if err := v1.DeepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_IngressTLS(in IngressTLS, out *IngressTLS, c *conversion.Cloner) error { - if in.Hosts != nil { - in, out := in.Hosts, &out.Hosts - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Hosts = nil - } - out.SecretName = in.SecretName - return nil -} - -func DeepCopy_v1beta1_Job(in Job, out *Job, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_JobSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_JobStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { - out.Type = in.Type - out.Status = in.Status - if err := unversioned.DeepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_Time(in.LastTransitionTime, &out.LastTransitionTime, c); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func DeepCopy_v1beta1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Job, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_Job(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { - if in.Parallelism != nil { - in, out := in.Parallelism, &out.Parallelism - *out = new(int32) - **out = *in - } else { - out.Parallelism = nil - } - if in.Completions != nil { - in, out := in.Completions, &out.Completions - *out = new(int32) - **out = *in - } else { - out.Completions = nil - } - if in.ActiveDeadlineSeconds != nil { - in, out := in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = *in - } else { - out.ActiveDeadlineSeconds = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(LabelSelector) - if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if in.AutoSelector != nil { - in, out := in.AutoSelector, &out.AutoSelector - *out = new(bool) - **out = *in - } else { - out.AutoSelector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { - if in.Conditions != nil { - in, out := in.Conditions, &out.Conditions - *out = make([]JobCondition, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_JobCondition(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.StartTime != nil { - in, out := in.StartTime, &out.StartTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.StartTime = nil - } - if in.CompletionTime != nil { - in, out := in.CompletionTime, &out.CompletionTime - *out = new(unversioned.Time) - if err := unversioned.DeepCopy_unversioned_Time(*in, *out, c); err != nil { - return err - } - } else { - out.CompletionTime = nil - } - out.Active = in.Active - out.Succeeded = in.Succeeded - out.Failed = in.Failed - return nil -} - -func DeepCopy_v1beta1_LabelSelector(in LabelSelector, out *LabelSelector, c *conversion.Cloner) error { - if in.MatchLabels != nil { - in, out := in.MatchLabels, &out.MatchLabels - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.MatchLabels = nil - } - if in.MatchExpressions != nil { - in, out := in.MatchExpressions, &out.MatchExpressions - *out = make([]LabelSelectorRequirement, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_LabelSelectorRequirement(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.MatchExpressions = nil - } - return nil -} - -func DeepCopy_v1beta1_LabelSelectorRequirement(in LabelSelectorRequirement, out *LabelSelectorRequirement, c *conversion.Cloner) error { - out.Key = in.Key - out.Operator = in.Operator - if in.Values != nil { - in, out := in.Values, &out.Values - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Values = nil - } - return nil -} - -func DeepCopy_v1beta1_ListOptions(in ListOptions, out *ListOptions, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - out.LabelSelector = in.LabelSelector - out.FieldSelector = in.FieldSelector - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - if in.TimeoutSeconds != nil { - in, out := in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = *in - } else { - out.TimeoutSeconds = nil - } - return nil -} - -func DeepCopy_v1beta1_NetworkPolicy(in NetworkPolicy, out *NetworkPolicy, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_NetworkPolicySpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_NetworkPolicyIngressRule(in NetworkPolicyIngressRule, out *NetworkPolicyIngressRule, c *conversion.Cloner) error { - if in.Ports != nil { - in, out := in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_NetworkPolicyPort(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ports = nil - } - if in.From != nil { - in, out := in.From, &out.From - *out = make([]NetworkPolicyPeer, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_NetworkPolicyPeer(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.From = nil - } - return nil -} - -func DeepCopy_v1beta1_NetworkPolicyList(in NetworkPolicyList, out *NetworkPolicyList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]NetworkPolicy, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_NetworkPolicy(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_NetworkPolicyPeer(in NetworkPolicyPeer, out *NetworkPolicyPeer, c *conversion.Cloner) error { - if in.PodSelector != nil { - in, out := in.PodSelector, &out.PodSelector - *out = new(LabelSelector) - if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.PodSelector = nil - } - if in.NamespaceSelector != nil { - in, out := in.NamespaceSelector, &out.NamespaceSelector - *out = new(LabelSelector) - if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.NamespaceSelector = nil - } - return nil -} - -func DeepCopy_v1beta1_NetworkPolicyPort(in NetworkPolicyPort, out *NetworkPolicyPort, c *conversion.Cloner) error { - if in.Protocol != nil { - in, out := in.Protocol, &out.Protocol - *out = new(v1.Protocol) - **out = *in - } else { - out.Protocol = nil - } - if in.Port != nil { - in, out := in.Port, &out.Port - *out = new(intstr.IntOrString) - if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { - return err - } - } else { - out.Port = nil - } - return nil -} - -func DeepCopy_v1beta1_NetworkPolicySpec(in NetworkPolicySpec, out *NetworkPolicySpec, c *conversion.Cloner) error { - if err := DeepCopy_v1beta1_LabelSelector(in.PodSelector, &out.PodSelector, c); err != nil { - return err - } - if in.Ingress != nil { - in, out := in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_NetworkPolicyIngressRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ingress = nil - } - return nil -} - -func DeepCopy_v1beta1_PodSecurityPolicy(in PodSecurityPolicy, out *PodSecurityPolicy, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_PodSecurityPolicySpec(in.Spec, &out.Spec, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_PodSecurityPolicyList(in PodSecurityPolicyList, out *PodSecurityPolicyList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_PodSecurityPolicy(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_PodSecurityPolicySpec(in PodSecurityPolicySpec, out *PodSecurityPolicySpec, c *conversion.Cloner) error { - out.Privileged = in.Privileged - if in.DefaultAddCapabilities != nil { - in, out := in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]v1.Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.DefaultAddCapabilities = nil - } - if in.RequiredDropCapabilities != nil { - in, out := in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]v1.Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.RequiredDropCapabilities = nil - } - if in.AllowedCapabilities != nil { - in, out := in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]v1.Capability, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.AllowedCapabilities = nil - } - if in.Volumes != nil { - in, out := in.Volumes, &out.Volumes - *out = make([]FSType, len(in)) - for i := range in { - (*out)[i] = in[i] - } - } else { - out.Volumes = nil - } - out.HostNetwork = in.HostNetwork - if in.HostPorts != nil { - in, out := in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_HostPortRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.HostPorts = nil - } - out.HostPID = in.HostPID - out.HostIPC = in.HostIPC - if err := DeepCopy_v1beta1_SELinuxStrategyOptions(in.SELinux, &out.SELinux, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_RunAsUserStrategyOptions(in.RunAsUser, &out.RunAsUser, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in.SupplementalGroups, &out.SupplementalGroups, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_FSGroupStrategyOptions(in.FSGroup, &out.FSGroup, c); err != nil { - return err - } - out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem - return nil -} - -func DeepCopy_v1beta1_ReplicaSet(in ReplicaSet, out *ReplicaSet, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ReplicaSetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ReplicaSetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_ReplicaSetList(in ReplicaSetList, out *ReplicaSetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ReplicaSet, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_ReplicaSet(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_ReplicaSetSpec(in ReplicaSetSpec, out *ReplicaSetSpec, c *conversion.Cloner) error { - if in.Replicas != nil { - in, out := in.Replicas, &out.Replicas - *out = new(int32) - **out = *in - } else { - out.Replicas = nil - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(LabelSelector) - if err := DeepCopy_v1beta1_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - if err := v1.DeepCopy_v1_PodTemplateSpec(in.Template, &out.Template, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_ReplicaSetStatus(in ReplicaSetStatus, out *ReplicaSetStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ObservedGeneration = in.ObservedGeneration - return nil -} - -func DeepCopy_v1beta1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_RollbackConfig(in RollbackConfig, out *RollbackConfig, c *conversion.Cloner) error { - out.Revision = in.Revision - return nil -} - -func DeepCopy_v1beta1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { - if in.MaxUnavailable != nil { - in, out := in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { - return err - } - } else { - out.MaxUnavailable = nil - } - if in.MaxSurge != nil { - in, out := in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - if err := intstr.DeepCopy_intstr_IntOrString(*in, *out, c); err != nil { - return err - } - } else { - out.MaxSurge = nil - } - return nil -} - -func DeepCopy_v1beta1_RunAsUserStrategyOptions(in RunAsUserStrategyOptions, out *RunAsUserStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.Ranges != nil { - in, out := in.Ranges, &out.Ranges - *out = make([]IDRange, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_IDRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func DeepCopy_v1beta1_SELinuxStrategyOptions(in SELinuxStrategyOptions, out *SELinuxStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.SELinuxOptions != nil { - in, out := in.SELinuxOptions, &out.SELinuxOptions - *out = new(v1.SELinuxOptions) - if err := v1.DeepCopy_v1_SELinuxOptions(*in, *out, c); err != nil { - return err - } - } else { - out.SELinuxOptions = nil - } - return nil -} - -func DeepCopy_v1beta1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ScaleStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1beta1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { - out.Replicas = in.Replicas - return nil -} - -func DeepCopy_v1beta1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { - out.Replicas = in.Replicas - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = make(map[string]string) - for key, val := range in { - (*out)[key] = val - } - } else { - out.Selector = nil - } - out.TargetSelector = in.TargetSelector - return nil -} - -func DeepCopy_v1beta1_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { - out.Kind = in.Kind - out.Name = in.Name - out.APIVersion = in.APIVersion - out.Subresource = in.Subresource - return nil -} - -func DeepCopy_v1beta1_SupplementalGroupsStrategyOptions(in SupplementalGroupsStrategyOptions, out *SupplementalGroupsStrategyOptions, c *conversion.Cloner) error { - out.Rule = in.Rule - if in.Ranges != nil { - in, out := in.Ranges, &out.Ranges - *out = make([]IDRange, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_IDRange(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Ranges = nil - } - return nil -} - -func DeepCopy_v1beta1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - out.Description = in.Description - if in.Versions != nil { - in, out := in.Versions, &out.Versions - *out = make([]APIVersion, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_APIVersion(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Versions = nil - } - return nil -} - -func DeepCopy_v1beta1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Data != nil { - in, out := in.Data, &out.Data - *out = make([]byte, len(in)) - copy(*out, in) - } else { - out.Data = nil - } - return nil -} - -func DeepCopy_v1beta1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_ThirdPartyResourceData(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1beta1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ThirdPartyResource, len(in)) - for i := range in { - if err := DeepCopy_v1beta1_ThirdPartyResource(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go deleted file mode 100644 index 71e55a467..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/defaults.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) { - scheme.AddDefaultingFuncs( - SetDefaults_DaemonSet, - SetDefaults_Deployment, - SetDefaults_Job, - SetDefaults_HorizontalPodAutoscaler, - SetDefaults_ReplicaSet, - SetDefaults_NetworkPolicy, - ) -} - -func SetDefaults_DaemonSet(obj *DaemonSet) { - labels := obj.Spec.Template.Labels - - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } -} - -func SetDefaults_Deployment(obj *Deployment) { - // Default labels and selector to labels from pod template spec. - labels := obj.Spec.Template.Labels - - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{MatchLabels: labels} - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // Set DeploymentSpec.Replicas to 1 if it is not set. - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } - strategy := &obj.Spec.Strategy - // Set default DeploymentStrategyType as RollingUpdate. - if strategy.Type == "" { - strategy.Type = RollingUpdateDeploymentStrategyType - } - if strategy.Type == RollingUpdateDeploymentStrategyType { - if strategy.RollingUpdate == nil { - rollingUpdate := RollingUpdateDeployment{} - strategy.RollingUpdate = &rollingUpdate - } - if strategy.RollingUpdate.MaxUnavailable == nil { - // Set default MaxUnavailable as 1 by default. - maxUnavailable := intstr.FromInt(1) - strategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - if strategy.RollingUpdate.MaxSurge == nil { - // Set default MaxSurge as 1 by default. - maxSurge := intstr.FromInt(1) - strategy.RollingUpdate.MaxSurge = &maxSurge - } - } -} - -func SetDefaults_Job(obj *Job) { - labels := obj.Spec.Template.Labels - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - // if an autoselector is requested, we'll build the selector later with controller-uid and job-name - autoSelector := bool(obj.Spec.AutoSelector != nil && *obj.Spec.AutoSelector) - - // otherwise, we are using a manual selector - manualSelector := !autoSelector - - // and default behavior for an unspecified manual selector is to use the pod template labels - if manualSelector && obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - // For a non-parallel job, you can leave both `.spec.completions` and - // `.spec.parallelism` unset. When both are unset, both are defaulted to 1. - if obj.Spec.Completions == nil && obj.Spec.Parallelism == nil { - obj.Spec.Completions = new(int32) - *obj.Spec.Completions = 1 - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } - if obj.Spec.Parallelism == nil { - obj.Spec.Parallelism = new(int32) - *obj.Spec.Parallelism = 1 - } -} - -func SetDefaults_HorizontalPodAutoscaler(obj *HorizontalPodAutoscaler) { - if obj.Spec.MinReplicas == nil { - minReplicas := int32(1) - obj.Spec.MinReplicas = &minReplicas - } - if obj.Spec.CPUUtilization == nil { - obj.Spec.CPUUtilization = &CPUTargetUtilization{TargetPercentage: 80} - } -} - -func SetDefaults_ReplicaSet(obj *ReplicaSet) { - labels := obj.Spec.Template.Labels - - // TODO: support templates defined elsewhere when we support them in the API - if labels != nil { - if obj.Spec.Selector == nil { - obj.Spec.Selector = &LabelSelector{ - MatchLabels: labels, - } - } - if len(obj.Labels) == 0 { - obj.Labels = labels - } - } - if obj.Spec.Replicas == nil { - obj.Spec.Replicas = new(int32) - *obj.Spec.Replicas = 1 - } -} - -func SetDefaults_NetworkPolicy(obj *NetworkPolicy) { - // Default any undefined Protocol fields to TCP. - for _, i := range obj.Spec.Ingress { - // TODO: Update Ports to be a pointer to slice as soon as auto-generation supports it. - for _, p := range i.Ports { - if p.Protocol == nil { - proto := v1.ProtocolTCP - p.Protocol = &proto - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go deleted file mode 100644 index cfdb87c53..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +genconversion=true -package v1beta1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go deleted file mode 100644 index 3120ce17f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.pb.go +++ /dev/null @@ -1,13005 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto - - It has these top-level messages: - APIVersion - CPUTargetUtilization - CustomMetricCurrentStatus - CustomMetricCurrentStatusList - CustomMetricTarget - CustomMetricTargetList - DaemonSet - DaemonSetList - DaemonSetSpec - DaemonSetStatus - Deployment - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ExportOptions - FSGroupStrategyOptions - HTTPIngressPath - HTTPIngressRuleValue - HorizontalPodAutoscaler - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - HostPortRange - IDRange - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS - Job - JobCondition - JobList - JobSpec - JobStatus - LabelSelector - LabelSelectorRequirement - ListOptions - NetworkPolicy - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - ReplicaSet - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - ReplicationControllerDummy - RollbackConfig - RollingUpdateDeployment - RunAsUserStrategyOptions - SELinuxStrategyOptions - Scale - ScaleSpec - ScaleStatus - SubresourceReference - SupplementalGroupsStrategyOptions - ThirdPartyResource - ThirdPartyResourceData - ThirdPartyResourceDataList - ThirdPartyResourceList -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1" - -import k8s_io_kubernetes_pkg_util_intstr "k8s.io/kubernetes/pkg/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *APIVersion) Reset() { *m = APIVersion{} } -func (m *APIVersion) String() string { return proto.CompactTextString(m) } -func (*APIVersion) ProtoMessage() {} - -func (m *CPUTargetUtilization) Reset() { *m = CPUTargetUtilization{} } -func (m *CPUTargetUtilization) String() string { return proto.CompactTextString(m) } -func (*CPUTargetUtilization) ProtoMessage() {} - -func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } -func (m *CustomMetricCurrentStatus) String() string { return proto.CompactTextString(m) } -func (*CustomMetricCurrentStatus) ProtoMessage() {} - -func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } -func (m *CustomMetricCurrentStatusList) String() string { return proto.CompactTextString(m) } -func (*CustomMetricCurrentStatusList) ProtoMessage() {} - -func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } -func (m *CustomMetricTarget) String() string { return proto.CompactTextString(m) } -func (*CustomMetricTarget) ProtoMessage() {} - -func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } -func (m *CustomMetricTargetList) String() string { return proto.CompactTextString(m) } -func (*CustomMetricTargetList) ProtoMessage() {} - -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (m *DaemonSet) String() string { return proto.CompactTextString(m) } -func (*DaemonSet) ProtoMessage() {} - -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (m *DaemonSetList) String() string { return proto.CompactTextString(m) } -func (*DaemonSetList) ProtoMessage() {} - -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (m *DaemonSetSpec) String() string { return proto.CompactTextString(m) } -func (*DaemonSetSpec) ProtoMessage() {} - -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (m *DaemonSetStatus) String() string { return proto.CompactTextString(m) } -func (*DaemonSetStatus) ProtoMessage() {} - -func (m *Deployment) Reset() { *m = Deployment{} } -func (m *Deployment) String() string { return proto.CompactTextString(m) } -func (*Deployment) ProtoMessage() {} - -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (m *DeploymentList) String() string { return proto.CompactTextString(m) } -func (*DeploymentList) ProtoMessage() {} - -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (m *DeploymentRollback) String() string { return proto.CompactTextString(m) } -func (*DeploymentRollback) ProtoMessage() {} - -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (m *DeploymentSpec) String() string { return proto.CompactTextString(m) } -func (*DeploymentSpec) ProtoMessage() {} - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (m *DeploymentStatus) String() string { return proto.CompactTextString(m) } -func (*DeploymentStatus) ProtoMessage() {} - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (m *DeploymentStrategy) String() string { return proto.CompactTextString(m) } -func (*DeploymentStrategy) ProtoMessage() {} - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (m *ExportOptions) String() string { return proto.CompactTextString(m) } -func (*ExportOptions) ProtoMessage() {} - -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (m *FSGroupStrategyOptions) String() string { return proto.CompactTextString(m) } -func (*FSGroupStrategyOptions) ProtoMessage() {} - -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (m *HTTPIngressPath) String() string { return proto.CompactTextString(m) } -func (*HTTPIngressPath) ProtoMessage() {} - -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (m *HTTPIngressRuleValue) String() string { return proto.CompactTextString(m) } -func (*HTTPIngressRuleValue) ProtoMessage() {} - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (m *HorizontalPodAutoscaler) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscaler) ProtoMessage() {} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (m *HorizontalPodAutoscalerList) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (m *HorizontalPodAutoscalerSpec) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (m *HorizontalPodAutoscalerStatus) String() string { return proto.CompactTextString(m) } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (m *HostPortRange) String() string { return proto.CompactTextString(m) } -func (*HostPortRange) ProtoMessage() {} - -func (m *IDRange) Reset() { *m = IDRange{} } -func (m *IDRange) String() string { return proto.CompactTextString(m) } -func (*IDRange) ProtoMessage() {} - -func (m *Ingress) Reset() { *m = Ingress{} } -func (m *Ingress) String() string { return proto.CompactTextString(m) } -func (*Ingress) ProtoMessage() {} - -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (m *IngressBackend) String() string { return proto.CompactTextString(m) } -func (*IngressBackend) ProtoMessage() {} - -func (m *IngressList) Reset() { *m = IngressList{} } -func (m *IngressList) String() string { return proto.CompactTextString(m) } -func (*IngressList) ProtoMessage() {} - -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (m *IngressRule) String() string { return proto.CompactTextString(m) } -func (*IngressRule) ProtoMessage() {} - -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (m *IngressRuleValue) String() string { return proto.CompactTextString(m) } -func (*IngressRuleValue) ProtoMessage() {} - -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (m *IngressSpec) String() string { return proto.CompactTextString(m) } -func (*IngressSpec) ProtoMessage() {} - -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (m *IngressStatus) String() string { return proto.CompactTextString(m) } -func (*IngressStatus) ProtoMessage() {} - -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (m *IngressTLS) String() string { return proto.CompactTextString(m) } -func (*IngressTLS) ProtoMessage() {} - -func (m *Job) Reset() { *m = Job{} } -func (m *Job) String() string { return proto.CompactTextString(m) } -func (*Job) ProtoMessage() {} - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (m *JobCondition) String() string { return proto.CompactTextString(m) } -func (*JobCondition) ProtoMessage() {} - -func (m *JobList) Reset() { *m = JobList{} } -func (m *JobList) String() string { return proto.CompactTextString(m) } -func (*JobList) ProtoMessage() {} - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (m *JobSpec) String() string { return proto.CompactTextString(m) } -func (*JobSpec) ProtoMessage() {} - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (m *JobStatus) String() string { return proto.CompactTextString(m) } -func (*JobStatus) ProtoMessage() {} - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (m *LabelSelector) String() string { return proto.CompactTextString(m) } -func (*LabelSelector) ProtoMessage() {} - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*LabelSelectorRequirement) ProtoMessage() {} - -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (m *ListOptions) String() string { return proto.CompactTextString(m) } -func (*ListOptions) ProtoMessage() {} - -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (m *NetworkPolicy) String() string { return proto.CompactTextString(m) } -func (*NetworkPolicy) ProtoMessage() {} - -func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } -func (m *NetworkPolicyIngressRule) String() string { return proto.CompactTextString(m) } -func (*NetworkPolicyIngressRule) ProtoMessage() {} - -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (m *NetworkPolicyList) String() string { return proto.CompactTextString(m) } -func (*NetworkPolicyList) ProtoMessage() {} - -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (m *NetworkPolicyPeer) String() string { return proto.CompactTextString(m) } -func (*NetworkPolicyPeer) ProtoMessage() {} - -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (m *NetworkPolicyPort) String() string { return proto.CompactTextString(m) } -func (*NetworkPolicyPort) ProtoMessage() {} - -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (m *NetworkPolicySpec) String() string { return proto.CompactTextString(m) } -func (*NetworkPolicySpec) ProtoMessage() {} - -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (m *PodSecurityPolicy) String() string { return proto.CompactTextString(m) } -func (*PodSecurityPolicy) ProtoMessage() {} - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (m *PodSecurityPolicyList) String() string { return proto.CompactTextString(m) } -func (*PodSecurityPolicyList) ProtoMessage() {} - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (m *PodSecurityPolicySpec) String() string { return proto.CompactTextString(m) } -func (*PodSecurityPolicySpec) ProtoMessage() {} - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (m *ReplicaSet) String() string { return proto.CompactTextString(m) } -func (*ReplicaSet) ProtoMessage() {} - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (m *ReplicaSetList) String() string { return proto.CompactTextString(m) } -func (*ReplicaSetList) ProtoMessage() {} - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (m *ReplicaSetSpec) String() string { return proto.CompactTextString(m) } -func (*ReplicaSetSpec) ProtoMessage() {} - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (m *ReplicaSetStatus) String() string { return proto.CompactTextString(m) } -func (*ReplicaSetStatus) ProtoMessage() {} - -func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } -func (m *ReplicationControllerDummy) String() string { return proto.CompactTextString(m) } -func (*ReplicationControllerDummy) ProtoMessage() {} - -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (m *RollbackConfig) String() string { return proto.CompactTextString(m) } -func (*RollbackConfig) ProtoMessage() {} - -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (m *RollingUpdateDeployment) String() string { return proto.CompactTextString(m) } -func (*RollingUpdateDeployment) ProtoMessage() {} - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (m *RunAsUserStrategyOptions) String() string { return proto.CompactTextString(m) } -func (*RunAsUserStrategyOptions) ProtoMessage() {} - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (m *SELinuxStrategyOptions) String() string { return proto.CompactTextString(m) } -func (*SELinuxStrategyOptions) ProtoMessage() {} - -func (m *Scale) Reset() { *m = Scale{} } -func (m *Scale) String() string { return proto.CompactTextString(m) } -func (*Scale) ProtoMessage() {} - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (m *ScaleSpec) String() string { return proto.CompactTextString(m) } -func (*ScaleSpec) ProtoMessage() {} - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (m *ScaleStatus) String() string { return proto.CompactTextString(m) } -func (*ScaleStatus) ProtoMessage() {} - -func (m *SubresourceReference) Reset() { *m = SubresourceReference{} } -func (m *SubresourceReference) String() string { return proto.CompactTextString(m) } -func (*SubresourceReference) ProtoMessage() {} - -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (m *SupplementalGroupsStrategyOptions) String() string { return proto.CompactTextString(m) } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} - -func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } -func (m *ThirdPartyResource) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResource) ProtoMessage() {} - -func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } -func (m *ThirdPartyResourceData) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResourceData) ProtoMessage() {} - -func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } -func (m *ThirdPartyResourceDataList) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResourceDataList) ProtoMessage() {} - -func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } -func (m *ThirdPartyResourceList) String() string { return proto.CompactTextString(m) } -func (*ThirdPartyResourceList) ProtoMessage() {} - -func init() { - proto.RegisterType((*APIVersion)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.APIVersion") - proto.RegisterType((*CPUTargetUtilization)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CPUTargetUtilization") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.CustomMetricTargetList") - proto.RegisterType((*DaemonSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSet") - proto.RegisterType((*DaemonSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DaemonSetStatus") - proto.RegisterType((*Deployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Deployment") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ExportOptions") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IDRange") - proto.RegisterType((*Ingress)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressBackend") - proto.RegisterType((*IngressList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.IngressTLS") - proto.RegisterType((*Job)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.JobStatus") - proto.RegisterType((*LabelSelector)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.LabelSelectorRequirement") - proto.RegisterType((*ListOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ListOptions") - proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicy") - proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyIngressRule") - proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyList") - proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPeer") - proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicyPort") - proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSet") - proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicaSetStatus") - proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ReplicationControllerDummy") - proto.RegisterType((*RollbackConfig)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*Scale)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*SubresourceReference)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SubresourceReference") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") - proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResource") - proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceData") - proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceDataList") - proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.kubernetes.pkg.apis.extensions.v1beta1.ThirdPartyResourceList") -} -func (m *APIVersion) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *APIVersion) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - return i, nil -} - -func (m *CPUTargetUtilization) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CPUTargetUtilization) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetPercentage)) - return i, nil -} - -func (m *CustomMetricCurrentStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CustomMetricCurrentStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentValue.Size())) - n1, err := m.CurrentValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func (m *CustomMetricCurrentStatusList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CustomMetricCurrentStatusList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CustomMetricTarget) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CustomMetricTarget) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *CustomMetricTargetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *CustomMetricTargetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSet) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonSet) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *DaemonSetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonSetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonSetSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Selector != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *DaemonSetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DaemonSetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentNumberScheduled)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NumberMisscheduled)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredNumberScheduled)) - return i, nil -} - -func (m *Deployment) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Deployment) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - return i, nil -} - -func (m *DeploymentList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n12, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n12 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DeploymentRollback) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentRollback) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - if len(m.UpdatedAnnotations) > 0 { - for k := range m.UpdatedAnnotations { - data[i] = 0x12 - i++ - v := m.UpdatedAnnotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n13, err := m.RollbackTo.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n13 - return i, nil -} - -func (m *DeploymentSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n14, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n14 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n15, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n15 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Strategy.Size())) - n16, err := m.Strategy.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n16 - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.RevisionHistoryLimit)) - } - data[i] = 0x38 - i++ - if m.Paused { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if m.RollbackTo != nil { - data[i] = 0x42 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RollbackTo.Size())) - n17, err := m.RollbackTo.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n17 - } - return i, nil -} - -func (m *DeploymentStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.UpdatedReplicas)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AvailableReplicas)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.UnavailableReplicas)) - return i, nil -} - -func (m *DeploymentStrategy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *DeploymentStrategy) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - if m.RollingUpdate != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.RollingUpdate.Size())) - n18, err := m.RollingUpdate.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n18 - } - return i, nil -} - -func (m *ExportOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ExportOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Export { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - if m.Exact { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *FSGroupStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HTTPIngressPath) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HTTPIngressPath) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Path))) - i += copy(data[i:], m.Path) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) - n19, err := m.Backend.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n19 - return i, nil -} - -func (m *HTTPIngressRuleValue) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HTTPIngressRuleValue) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, msg := range m.Paths { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n20, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n20 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n21, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n21 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n22, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n22 - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n23, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n23 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ScaleRef.Size())) - n24, err := m.ScaleRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n24 - if m.MinReplicas != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.MinReplicas)) - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxReplicas)) - if m.CPUUtilization != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CPUUtilization.Size())) - n25, err := m.CPUUtilization.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n25 - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastScaleTime.Size())) - n26, err := m.LastScaleTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n26 - } - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentReplicas)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.CurrentCPUUtilizationPercentage)) - } - return i, nil -} - -func (m *HostPortRange) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HostPortRange) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Min)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Max)) - return i, nil -} - -func (m *IDRange) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IDRange) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Min)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Max)) - return i, nil -} - -func (m *Ingress) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Ingress) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n27, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n27 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n28, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n28 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n29, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n29 - return i, nil -} - -func (m *IngressBackend) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressBackend) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServiceName))) - i += copy(data[i:], m.ServiceName) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ServicePort.Size())) - n30, err := m.ServicePort.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n30 - return i, nil -} - -func (m *IngressList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n31, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n31 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *IngressRule) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressRule) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Host))) - i += copy(data[i:], m.Host) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.IngressRuleValue.Size())) - n32, err := m.IngressRuleValue.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n32 - return i, nil -} - -func (m *IngressRuleValue) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressRuleValue) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HTTP != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.HTTP.Size())) - n33, err := m.HTTP.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n33 - } - return i, nil -} - -func (m *IngressSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Backend != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.Backend.Size())) - n34, err := m.Backend.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if len(m.TLS) > 0 { - for _, msg := range m.TLS { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *IngressStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.LoadBalancer.Size())) - n35, err := m.LoadBalancer.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *IngressTLS) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IngressTLS) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.SecretName))) - i += copy(data[i:], m.SecretName) - return i, nil -} - -func (m *Job) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Job) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n36 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n37 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n38 - return i, nil -} - -func (m *JobCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n39, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n39 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n40, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n40 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *JobList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n41, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n41 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *JobSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Parallelism != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n42, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n42 - } - if m.AutoSelector != nil { - data[i] = 0x28 - i++ - if *m.AutoSelector { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n43, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n43 - return i, nil -} - -func (m *JobStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *JobStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.StartTime != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.StartTime.Size())) - n44, err := m.StartTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if m.CompletionTime != nil { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.CompletionTime.Size())) - n45, err := m.CompletionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n45 - } - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Active)) - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Succeeded)) - data[i] = 0x30 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Failed)) - return i, nil -} - -func (m *LabelSelector) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelector) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k := range m.MatchLabels { - data[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Key))) - i += copy(data[i:], m.Key) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Operator))) - i += copy(data[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *ListOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ListOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.LabelSelector))) - i += copy(data[i:], m.LabelSelector) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.FieldSelector))) - i += copy(data[i:], m.FieldSelector) - data[i] = 0x18 - i++ - if m.Watch { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ResourceVersion))) - i += copy(data[i:], m.ResourceVersion) - if m.TimeoutSeconds != nil { - data[i] = 0x28 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.TimeoutSeconds)) - } - return i, nil -} - -func (m *NetworkPolicy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicy) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n46, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n46 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n47, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n47 - return i, nil -} - -func (m *NetworkPolicyIngressRule) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicyIngressRule) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.From) > 0 { - for _, msg := range m.From { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicyList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n48, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n48 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyPeer) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicyPeer) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodSelector != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) - n49, err := m.PodSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n49 - } - if m.NamespaceSelector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.NamespaceSelector.Size())) - n50, err := m.NamespaceSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n50 - } - return i, nil -} - -func (m *NetworkPolicyPort) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicyPort) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Protocol != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(*m.Protocol))) - i += copy(data[i:], *m.Protocol) - } - if m.Port != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Port.Size())) - n51, err := m.Port.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n51 - } - return i, nil -} - -func (m *NetworkPolicySpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *NetworkPolicySpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.PodSelector.Size())) - n52, err := m.PodSelector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n52 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSecurityPolicy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodSecurityPolicy) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n53, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n53 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n54, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n54 - return i, nil -} - -func (m *PodSecurityPolicyList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodSecurityPolicyList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n55, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n55 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSecurityPolicySpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.Privileged { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - data[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x30 - i++ - if m.HostNetwork { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - data[i] = 0x3a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x40 - i++ - if m.HostPID { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x48 - i++ - if m.HostIPC { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x52 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinux.Size())) - n56, err := m.SELinux.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n56 - data[i] = 0x5a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RunAsUser.Size())) - n57, err := m.RunAsUser.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n57 - data[i] = 0x62 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SupplementalGroups.Size())) - n58, err := m.SupplementalGroups.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n58 - data[i] = 0x6a - i++ - i = encodeVarintGenerated(data, i, uint64(m.FSGroup.Size())) - n59, err := m.FSGroup.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n59 - data[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - return i, nil -} - -func (m *ReplicaSet) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSet) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n60, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n60 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n61, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n61 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n62, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n62 - return i, nil -} - -func (m *ReplicaSetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n63, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n63 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicaSetSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSetSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n64, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n64 - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Template.Size())) - n65, err := m.Template.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n65 - return i, nil -} - -func (m *ReplicaSetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.FullyLabeledReplicas)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObservedGeneration)) - return i, nil -} - -func (m *ReplicationControllerDummy) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ReplicationControllerDummy) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *RollbackConfig) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RollbackConfig) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Revision)) - return i, nil -} - -func (m *RollingUpdateDeployment) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxUnavailable.Size())) - n66, err := m.MaxUnavailable.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n66 - } - if m.MaxSurge != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.MaxSurge.Size())) - n67, err := m.MaxSurge.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n67 - } - return i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RunAsUserStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if m.SELinuxOptions != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SELinuxOptions.Size())) - n68, err := m.SELinuxOptions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n68 - } - return i, nil -} - -func (m *Scale) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Scale) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n69, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n69 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n70, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n70 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n71, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n71 - return i, nil -} - -func (m *ScaleSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k := range m.Selector { - data[i] = 0x12 - i++ - v := m.Selector[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.TargetSelector))) - i += copy(data[i:], m.TargetSelector) - return i, nil -} - -func (m *SubresourceReference) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SubresourceReference) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Subresource))) - i += copy(data[i:], m.Subresource) - return i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Rule))) - i += copy(data[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResource) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n72, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n72 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Description))) - i += copy(data[i:], m.Description) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceData) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n73, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n73 - if m.Data != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - return i, nil -} - -func (m *ThirdPartyResourceDataList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n74 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n75 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *APIVersion) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CPUTargetUtilization) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.TargetPercentage)) - return n -} - -func (m *CustomMetricCurrentStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricCurrentStatusList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomMetricTarget) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricTargetList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetSpec) Size() (n int) { - var l int - _ = l - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - return n -} - -func (m *Deployment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DeploymentRollback) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *DeploymentStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - return n -} - -func (m *DeploymentStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *FSGroupStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPIngressPath) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - l = m.ScaleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if m.CPUUtilization != nil { - l = m.CPUUtilization.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) - } - return n -} - -func (m *HostPortRange) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *Ingress) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressBackend) Size() (n int) { - var l int - _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressRule) Size() (n int) { - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressRuleValue) Size() (n int) { - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressSpec) Size() (n int) { - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressTLS) Size() (n int) { - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Job) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JobSpec) Size() (n int) { - var l int - _ = l - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) - } - if m.Completions != nil { - n += 1 + sovGenerated(uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AutoSelector != nil { - n += 2 - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CompletionTime != nil { - l = m.CompletionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Active)) - n += 1 + sovGenerated(uint64(m.Succeeded)) - n += 1 + sovGenerated(uint64(m.Failed)) - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ListOptions) Size() (n int) { - var l int - _ = l - l = len(m.LabelSelector) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldSelector) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - return n -} - -func (m *NetworkPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicyIngressRule) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyPeer) Size() (n int) { - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NetworkPolicyPort) Size() (n int) { - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NetworkPolicySpec) Size() (n int) { - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicySpec) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ReplicaSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicaSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - return n -} - -func (m *ReplicationControllerDummy) Size() (n int) { - var l int - _ = l - return n -} - -func (m *RollbackConfig) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubresourceReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subresource) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *APIVersion) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUTargetUtilization) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUTargetUtilization: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUTargetUtilization: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPercentage", wireType) - } - m.TargetPercentage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.TargetPercentage |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatusList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTarget) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTargetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - m.UpdatedAnnotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) - } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStrategy) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentStrategyType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Export = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Exact = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = FSGroupStrategyType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressPath) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ScaleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - m.MaxReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPUUtilization", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPUUtilization == nil { - m.CPUUtilization = &CPUTargetUtilization{} - } - if err := m.CPUUtilization.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.LastScaleTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - m.DesiredReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentCPUUtilizationPercentage = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IDRange) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ingress) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressBackend) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ServicePort.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRule) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.IngressRuleValue.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressTLS) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Job) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JobConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Completions = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoSelector", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutoSelector = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.StartTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_kubernetes_pkg_api_unversioned.Time{} - } - if err := m.CompletionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - m.Failed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - m.MatchLabels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = LabelSelectorOperator(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LabelSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Watch = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicy) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyIngressRule) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyPeer) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodSelector == nil { - m.PodSelector = &LabelSelector{} - } - if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyPort) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_kubernetes_pkg_api_v1.Protocol(data[iNdEx:postIndex]) - m.Protocol = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Port == nil { - m.Port = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} - } - if err := m.Port.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicySpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodSelector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicy) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicyList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicySpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_kubernetes_pkg_api_v1.Capability(data[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, FSType(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SELinux.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RunAsUser.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SupplementalGroups.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FSGroup.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnlyRootFilesystem = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSet) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) - } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerDummy) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDeployment) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_kubernetes_pkg_util_intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunAsUserStrategyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SELinuxStrategyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SELinuxStrategy(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_kubernetes_pkg_api_v1.SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scale) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Selector == nil { - m.Selector = make(map[string]string) - } - m.Selector[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetSelector = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubresourceReference) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubresourceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubresourceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subresource = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SupplementalGroupsStrategyOptions) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResource) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, APIVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceData) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceDataList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResourceData{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto deleted file mode 100644 index bd4da6974..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/generated.proto +++ /dev/null @@ -1,1010 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.extensions.v1beta1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// An APIVersion represents a single concrete version of an object model. -message APIVersion { - // Name of this version (e.g. 'v1'). - optional string name = 1; -} - -message CPUTargetUtilization { - // fraction of the requested CPU that should be utilized/used, - // e.g. 70 means that 70% of the requested CPU should be in use. - optional int32 targetPercentage = 1; -} - -message CustomMetricCurrentStatus { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricCurrentStatusList { - repeated CustomMetricCurrentStatus items = 1; -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -message CustomMetricTarget { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.kubernetes.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricTargetList { - repeated CustomMetricTarget items = 1; -} - -// DaemonSet represents the configuration of a daemon set. -message DaemonSet { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional DaemonSetSpec spec = 2; - - // Status is the current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional DaemonSetStatus status = 3; -} - -// DaemonSetList is a collection of daemon sets. -message DaemonSetList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of daemon sets. - repeated DaemonSet items = 2; -} - -// DaemonSetSpec is the specification of a daemon set. -message DaemonSetSpec { - // Selector is a label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 1; - - // Template is the object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; -} - -// DaemonSetStatus represents the current status of a daemon set. -message DaemonSetStatus { - // CurrentNumberScheduled is the number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md - optional int32 currentNumberScheduled = 1; - - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md - optional int32 numberMisscheduled = 2; - - // DesiredNumberScheduled is the total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md - optional int32 desiredNumberScheduled = 3; -} - -// Deployment enables declarative updates for Pods and ReplicaSets. -message Deployment { - // Standard object metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the Deployment. - optional DeploymentSpec spec = 2; - - // Most recently observed status of the Deployment. - optional DeploymentStatus status = 3; -} - -// DeploymentList is a list of Deployments. -message DeploymentList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Deployments. - repeated Deployment items = 2; -} - -// DeploymentRollback stores the information required to rollback a deployment. -message DeploymentRollback { - // Required: This must match the Name of a deployment. - optional string name = 1; - - // The annotations to be updated to a deployment - map<string, string> updatedAnnotations = 2; - - // The config of this deployment rollback. - optional RollbackConfig rollbackTo = 3; -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - optional int32 replicas = 1; - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - optional LabelSelector selector = 2; - - // Template describes the pods that will be created. - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; - - // The deployment strategy to use to replace existing pods with new ones. - optional DeploymentStrategy strategy = 4; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - optional int32 minReadySeconds = 5; - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - optional int32 revisionHistoryLimit = 6; - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - optional bool paused = 7; - - // The config this deployment is rolling back to. Will be cleared after rollback is done. - optional RollbackConfig rollbackTo = 8; -} - -// DeploymentStatus is the most recently observed status of the Deployment. -message DeploymentStatus { - // The generation observed by the deployment controller. - optional int64 observedGeneration = 1; - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - optional int32 replicas = 2; - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - optional int32 updatedReplicas = 3; - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - optional int32 availableReplicas = 4; - - // Total number of unavailable pods targeted by this deployment. - optional int32 unavailableReplicas = 5; -} - -// DeploymentStrategy describes how to replace existing pods with new ones. -message DeploymentStrategy { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - optional string type = 1; - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - optional RollingUpdateDeployment rollingUpdate = 2; -} - -// ExportOptions is the query options to the standard REST get call. -message ExportOptions { - // Should this value be exported. Export strips fields that a user can not specify. - optional bool export = 1; - - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - optional bool exact = 2; -} - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - optional string rule = 1; - - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. - repeated IDRange ranges = 2; -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -message HTTPIngressPath { - // Path is a extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - optional string path = 1; - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - optional IngressBackend backend = 2; -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http://<host>/<path>?<searchpart> -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. - repeated HTTPIngressPath paths = 1; -} - -// configuration of a horizontal pod autoscaler. -message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - optional HorizontalPodAutoscalerSpec spec = 2; - - // current information about the autoscaler. - optional HorizontalPodAutoscalerStatus status = 3; -} - -// list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// specification of a horizontal pod autoscaler. -message HorizontalPodAutoscalerSpec { - // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modifying its spec. - optional SubresourceReference scaleRef = 1; - - // lower limit for the number of pods that can be set by the autoscaler, default 1. - optional int32 minReplicas = 2; - - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - optional int32 maxReplicas = 3; - - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - optional CPUTargetUtilization cpuUtilization = 4; -} - -// current status of a horizontal pod autoscaler -message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. - optional int64 observedGeneration = 1; - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastScaleTime = 2; - - // current number of replicas of pods managed by this autoscaler. - optional int32 currentReplicas = 3; - - // desired number of replicas of pods managed by this autoscaler. - optional int32 desiredReplicas = 4; - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - optional int32 currentCPUUtilizationPercentage = 5; -} - -// Host Port Range defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// ID Range provides a min/max of an allowed range of IDs. -message IDRange { - // Min is the start of the range, inclusive. - optional int64 min = 1; - - // Max is the end of the range, inclusive. - optional int64 max = 2; -} - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -message Ingress { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional IngressSpec spec = 2; - - // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional IngressStatus status = 3; -} - -// IngressBackend describes all endpoints for a given service and port. -message IngressBackend { - // Specifies the name of the referenced service. - optional string serviceName = 1; - - // Specifies the port of the referenced service. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString servicePort = 2; -} - -// IngressList is a collection of Ingress. -message IngressList { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Ingress. - repeated Ingress items = 2; -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -message IngressRule { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - optional string host = 1; - - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - optional IngressRuleValue ingressRuleValue = 2; -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -message IngressRuleValue { - optional HTTPIngressRuleValue http = 1; -} - -// IngressSpec describes the Ingress the user wishes to exist. -message IngressSpec { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - optional IngressBackend backend = 1; - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - repeated IngressTLS tls = 2; - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - repeated IngressRule rules = 3; -} - -// IngressStatus describe the current state of the Ingress. -message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. - optional k8s.io.kubernetes.pkg.api.v1.LoadBalancerStatus loadBalancer = 1; -} - -// IngressTLS describes the transport layer security associated with an Ingress. -message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - repeated string hosts = 1; - - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - optional string secretName = 2; -} - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobSpec spec = 2; - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of Job. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional int32 parallelism = 1; - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional int32 completions = 2; - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - optional int64 activeDeadlineSeconds = 3; - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 4; - - // AutoSelector controls generation of pod labels and pod selectors. - // It was not present in the original extensions/v1beta1 Job definition, but exists - // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite - // meaning as, ManualSelector. - // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md - optional bool autoSelector = 5; - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - repeated JobCondition conditions = 1; - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time startTime = 2; - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - optional k8s.io.kubernetes.pkg.api.unversioned.Time completionTime = 3; - - // Active is the number of actively running pods. - optional int32 active = 4; - - // Succeeded is the number of pods which reached Phase Succeeded. - optional int32 succeeded = 5; - - // Failed is the number of pods which reached Phase Failed. - optional int32 failed = 6; -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -message LabelSelector { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - map<string, string> matchLabels = 1; - - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - repeated LabelSelectorRequirement matchExpressions = 2; -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -message LabelSelectorRequirement { - // key is the label key that the selector applies to. - optional string key = 1; - - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - optional string operator = 2; - - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - repeated string values = 3; -} - -// ListOptions is the query options to a standard REST list call. -message ListOptions { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - optional string labelSelector = 1; - - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - optional string fieldSelector = 2; - - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - optional bool watch = 3; - - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - optional string resourceVersion = 4; - - // Timeout for the list/watch call. - optional int64 timeoutSeconds = 5; -} - -message NetworkPolicy { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior for this NetworkPolicy. - optional NetworkPolicySpec spec = 2; -} - -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - repeated NetworkPolicyPort ports = 1; - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - repeated NetworkPolicyPeer from = 2; -} - -// Network Policy List is a list of NetworkPolicy objects. -message NetworkPolicyList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated NetworkPolicy items = 2; -} - -message NetworkPolicyPeer { - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. - optional LabelSelector podSelector = 1; - - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. - optional LabelSelector namespaceSelector = 2; -} - -message NetworkPolicyPort { - // Optional. The protocol (TCP or UDP) which traffic must match. - // If not specified, this field defaults to TCP. - optional string protocol = 1; - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 2; -} - -message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - optional LabelSelector podSelector = 1; - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. - repeated NetworkPolicyIngressRule ingress = 2; -} - -// Pod Security Policy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - optional PodSecurityPolicySpec spec = 2; -} - -// Pod Security Policy List is a list of PodSecurityPolicy objects. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// Pod Security Policy Spec defines the policy enforced. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - optional bool privileged = 1; - - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. - repeated string defaultAddCapabilities = 2; - - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - repeated string requiredDropCapabilities = 3; - - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. - repeated string allowedCapabilities = 4; - - // volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - optional bool readOnlyRootFilesystem = 14; -} - -// ReplicaSet represents the configuration of a ReplicaSet. -message ReplicaSet { - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ReplicaSetSpec spec = 2; - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - optional ReplicaSetStatus status = 3; -} - -// ReplicaSetList is a collection of ReplicaSets. -message ReplicaSetList { - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of ReplicaSets. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md - repeated ReplicaSet items = 2; -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -message ReplicaSetSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. - // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - optional int32 fullyLabeledReplicas = 2; - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - optional int64 observedGeneration = 3; -} - -// Dummy definition -message ReplicationControllerDummy { -} - -message RollbackConfig { - // The revision to rollback to. If set to 0, rollbck to the last revision. - optional int64 revision = 1; -} - -// Spec to control the desired behavior of rolling update. -message RollingUpdateDeployment { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxUnavailable = 1; - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // Ranges are the allowed ranges of uids that may be used. - repeated IDRange ranges = 2; -} - -// SELinux Strategy Options defines the strategy type and any options used to create the strategy. -message SELinuxStrategyOptions { - // type is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md#security-context - optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; -} - -// represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - optional ScaleSpec spec = 2; - - // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. - optional ScaleStatus status = 3; -} - -// describes the attributes of a scale subresource -message ScaleSpec { - // desired number of instances for the scaled object. - optional int32 replicas = 1; -} - -// represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - map<string, string> selector = 2; - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - optional string targetSelector = 3; -} - -// SubresourceReference contains enough information to let you inspect or modify the referred subresource. -message SubresourceReference { - // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - optional string kind = 1; - - // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - optional string name = 2; - - // API version of the referent - optional string apiVersion = 3; - - // Subresource name of the referent - optional string subresource = 4; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - optional string rule = 1; - - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. - repeated IDRange ranges = 2; -} - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -message ThirdPartyResource { - // Standard object metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Description is the description of this object. - optional string description = 2; - - // Versions are versions for this third party object - repeated APIVersion versions = 3; -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -message ThirdPartyResourceData { - // Standard object metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Data is the raw JSON data for this data. - optional bytes data = 2; -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -message ThirdPartyResourceDataList { - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ThirdpartyResourceData. - repeated ThirdPartyResourceData items = 2; -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -message ThirdPartyResourceList { - // Standard list metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of ThirdPartyResources. - repeated ThirdPartyResource items = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go deleted file mode 100644 index e8bbf28b1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/register.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "extensions" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - &Job{}, - &JobList{}, - &ReplicationControllerDummy{}, - &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, - &DaemonSetList{}, - &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, - &Ingress{}, - &IngressList{}, - &ListOptions{}, - &v1.DeleteOptions{}, - &ReplicaSet{}, - &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, - &NetworkPolicy{}, - &NetworkPolicyList{}, - ) - // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go deleted file mode 100644 index cbe82eff3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.generated.go +++ /dev/null @@ -1,23939 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg4_resource "k8s.io/kubernetes/pkg/api/resource" - pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg3_types "k8s.io/kubernetes/pkg/types" - pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg4_resource.Quantity - var v1 pkg1_unversioned.TypeMeta - var v2 pkg2_v1.ObjectMeta - var v3 pkg3_types.UID - var v4 pkg5_intstr.IntOrString - var v5 time.Time - _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5 - } -} - -func (x *ScaleSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ScaleStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Selector) != 0 - yyq2[2] = x.TargetSelector != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.Selector, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.TargetSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ScaleStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ScaleStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "selector": - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv5 := &x.Selector - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - z.F.DecMapStringStringX(yyv5, false, d) - } - } - case "targetSelector": - if r.TryDecodeAsNil() { - x.TargetSelector = "" - } else { - x.TargetSelector = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ScaleStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Selector = nil - } else { - yyv10 := &x.Selector - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecMapStringStringX(yyv10, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetSelector = "" - } else { - x.TargetSelector = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Scale) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Scale) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Scale) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Scale) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ScaleSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ScaleStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicationControllerDummy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicationControllerDummy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicationControllerDummy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SubresourceReference) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Kind != "" - yyq2[1] = x.Name != "" - yyq2[2] = x.APIVersion != "" - yyq2[3] = x.Subresource != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subresource")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Subresource)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SubresourceReference) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SubresourceReference) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "subresource": - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SubresourceReference) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subresource = "" - } else { - x.Subresource = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CPUTargetUtilization) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.TargetPercentage)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("targetPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.TargetPercentage)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CPUTargetUtilization) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CPUTargetUtilization) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "targetPercentage": - if r.TryDecodeAsNil() { - x.TargetPercentage = 0 - } else { - x.TargetPercentage = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CPUTargetUtilization) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetPercentage = 0 - } else { - x.TargetPercentage = int32(r.DecodeInt(32)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTarget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.TargetValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.TargetValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTarget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTarget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} - } else { - yyv5 := &x.TargetValue - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTarget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TargetValue = pkg4_resource.Quantity{} - } else { - yyv9 := &x.TargetValue - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricTargetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCustomMetricTarget(([]CustomMetricTarget)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricTargetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricTargetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceCustomMetricTarget((*[]CustomMetricTarget)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.CurrentValue - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.CurrentValue - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "value": - if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} - } else { - yyv5 := &x.CurrentValue - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentValue = pkg4_resource.Quantity{} - } else { - yyv9 := &x.CurrentValue - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *CustomMetricCurrentStatusList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceCustomMetricCurrentStatus(([]CustomMetricCurrentStatus)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *CustomMetricCurrentStatusList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv4 := &x.Items - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *CustomMetricCurrentStatusList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv7 := &x.Items - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceCustomMetricCurrentStatus((*[]CustomMetricCurrentStatus)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.MinReplicas != nil - yyq2[3] = x.CPUUtilization != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.ScaleRef - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("scaleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ScaleRef - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy9 := *x.MinReplicas - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MinReplicas == nil { - r.EncodeNil() - } else { - yy11 := *x.MinReplicas - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.MaxReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.CPUUtilization == nil { - r.EncodeNil() - } else { - x.CPUUtilization.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("cpuUtilization")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CPUUtilization == nil { - r.EncodeNil() - } else { - x.CPUUtilization.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "scaleRef": - if r.TryDecodeAsNil() { - x.ScaleRef = SubresourceReference{} - } else { - yyv4 := &x.ScaleRef - yyv4.CodecDecodeSelf(d) - } - case "minReplicas": - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - case "maxReplicas": - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - case "cpuUtilization": - if r.TryDecodeAsNil() { - if x.CPUUtilization != nil { - x.CPUUtilization = nil - } - } else { - if x.CPUUtilization == nil { - x.CPUUtilization = new(CPUTargetUtilization) - } - x.CPUUtilization.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ScaleRef = SubresourceReference{} - } else { - yyv10 := &x.ScaleRef - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MinReplicas != nil { - x.MinReplicas = nil - } - } else { - if x.MinReplicas == nil { - x.MinReplicas = new(int32) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*int32)(x.MinReplicas)) = int32(r.DecodeInt(32)) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MaxReplicas = 0 - } else { - x.MaxReplicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CPUUtilization != nil { - x.CPUUtilization = nil - } - } else { - if x.CPUUtilization == nil { - x.CPUUtilization = new(CPUTargetUtilization) - } - x.CPUUtilization.CodecDecodeSelf(d) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != nil - yyq2[1] = x.LastScaleTime != nil - yyq2[4] = x.CurrentCPUUtilizationPercentage != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy4 := *x.ObservedGeneration - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ObservedGeneration == nil { - r.EncodeNil() - } else { - yy6 := *x.ObservedGeneration - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym9 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastScaleTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.LastScaleTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.LastScaleTime) { - } else if yym10 { - z.EncBinaryMarshal(x.LastScaleTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.LastScaleTime) - } else { - z.EncFallback(x.LastScaleTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.CurrentReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.DesiredReplicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy18 := *x.CurrentCPUUtilizationPercentage - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentCPUUtilizationPercentage")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CurrentCPUUtilizationPercentage == nil { - r.EncodeNil() - } else { - yy20 := *x.CurrentCPUUtilizationPercentage - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeInt(int64(yy20)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - case "lastScaleTime": - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - case "currentReplicas": - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - case "desiredReplicas": - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - case "currentCPUUtilizationPercentage": - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ObservedGeneration != nil { - x.ObservedGeneration = nil - } - } else { - if x.ObservedGeneration == nil { - x.ObservedGeneration = new(int64) - } - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - *((*int64)(x.ObservedGeneration)) = int64(r.DecodeInt(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.LastScaleTime != nil { - x.LastScaleTime = nil - } - } else { - if x.LastScaleTime == nil { - x.LastScaleTime = new(pkg1_unversioned.Time) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(x.LastScaleTime) { - } else if yym16 { - z.DecBinaryUnmarshal(x.LastScaleTime) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.LastScaleTime) - } else { - z.DecFallback(x.LastScaleTime, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentReplicas = 0 - } else { - x.CurrentReplicas = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredReplicas = 0 - } else { - x.DesiredReplicas = int32(r.DecodeInt(32)) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CurrentCPUUtilizationPercentage != nil { - x.CurrentCPUUtilizationPercentage = nil - } - } else { - if x.CurrentCPUUtilizationPercentage == nil { - x.CurrentCPUUtilizationPercentage = new(int32) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int32)(x.CurrentCPUUtilizationPercentage)) = int32(r.DecodeInt(32)) - } - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscaler) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscaler) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscaler) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = HorizontalPodAutoscalerSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = HorizontalPodAutoscalerStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HorizontalPodAutoscalerList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceHorizontalPodAutoscaler(([]HorizontalPodAutoscaler)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HorizontalPodAutoscalerList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HorizontalPodAutoscalerList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceHorizontalPodAutoscaler((*[]HorizontalPodAutoscaler)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResource) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Description != "" - yyq2[2] = len(x.Versions) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("description")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Description)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Versions == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("versions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Versions == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSliceAPIVersion(([]APIVersion)(x.Versions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResource) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResource) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "description": - if r.TryDecodeAsNil() { - x.Description = "" - } else { - x.Description = string(r.DecodeString()) - } - case "versions": - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv6 := &x.Versions - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResource) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Description = "" - } else { - x.Description = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Versions = nil - } else { - yyv13 := &x.Versions - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceAPIVersion((*[]APIVersion)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceThirdPartyResource(([]ThirdPartyResource)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceThirdPartyResource((*[]ThirdPartyResource)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *APIVersion) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Name != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *APIVersion) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *APIVersion) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *APIVersion) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceData) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = len(x.Data) != 0 - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Data == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("data")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Data == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeStringBytes(codecSelferC_RAW1234, []byte(x.Data)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceData) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "data": - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv5 := &x.Data - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - *yyv5 = r.DecodeBytes(*(*[]byte)(yyv5), false, false) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceData) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Data = nil - } else { - yyv11 := &x.Data - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *yyv11 = r.DecodeBytes(*(*[]byte)(yyv11), false, false) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Deployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Deployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Deployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Deployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DeploymentSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DeploymentStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [8]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.Selector != nil - yyq2[3] = true - yyq2[4] = x.MinReadySeconds != 0 - yyq2[5] = x.RevisionHistoryLimit != nil - yyq2[6] = x.Paused != false - yyq2[7] = x.RollbackTo != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(8) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Template - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy17 := &x.Strategy - yy17.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("strategy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy19 := &x.Strategy - yy19.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReadySeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeInt(int64(x.MinReadySeconds)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy25 := *x.RevisionHistoryLimit - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeInt(int64(yy25)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revisionHistoryLimit")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RevisionHistoryLimit == nil { - r.EncodeNil() - } else { - yy27 := *x.RevisionHistoryLimit - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeInt(int64(yy27)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym30 := z.EncBinary() - _ = yym30 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paused")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeBool(bool(x.Paused)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollbackTo == nil { - r.EncodeNil() - } else { - x.RollbackTo.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } - case "strategy": - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv8 := &x.Strategy - yyv8.CodecDecodeSelf(d) - } - case "minReadySeconds": - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - case "revisionHistoryLimit": - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - case "paused": - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - case "rollbackTo": - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv18 := &x.Template - yyv18.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Strategy = DeploymentStrategy{} - } else { - yyv19 := &x.Strategy - yyv19.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReadySeconds = 0 - } else { - x.MinReadySeconds = int32(r.DecodeInt(32)) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RevisionHistoryLimit != nil { - x.RevisionHistoryLimit = nil - } - } else { - if x.RevisionHistoryLimit == nil { - x.RevisionHistoryLimit = new(int32) - } - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - *((*int32)(x.RevisionHistoryLimit)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paused = false - } else { - x.Paused = bool(r.DecodeBool()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollbackTo != nil { - x.RollbackTo = nil - } - } else { - if x.RollbackTo == nil { - x.RollbackTo = new(RollbackConfig) - } - x.RollbackTo.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentRollback) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.UpdatedAnnotations) != 0 - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedAnnotations")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.UpdatedAnnotations == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - z.F.EncMapStringStringV(x.UpdatedAnnotations, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy10 := &x.RollbackTo - yy10.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollbackTo")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.RollbackTo - yy12.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentRollback) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentRollback) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "updatedAnnotations": - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv5 := &x.UpdatedAnnotations - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - z.F.DecMapStringStringX(yyv5, false, d) - } - } - case "rollbackTo": - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv7 := &x.RollbackTo - yyv7.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentRollback) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedAnnotations = nil - } else { - yyv12 := &x.UpdatedAnnotations - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecMapStringStringX(yyv12, false, d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RollbackTo = RollbackConfig{} - } else { - yyv14 := &x.RollbackTo - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RollbackConfig) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Revision != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("revision")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Revision)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollbackConfig) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollbackConfig) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "revision": - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollbackConfig) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Revision = 0 - } else { - x.Revision = int64(r.DecodeInt(64)) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Type != "" - yyq2[1] = x.RollingUpdate != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Type.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rollingUpdate")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RollingUpdate == nil { - r.EncodeNil() - } else { - x.RollingUpdate.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStrategy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - case "rollingUpdate": - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStrategy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = DeploymentStrategyType(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.RollingUpdate != nil { - x.RollingUpdate = nil - } - } else { - if x.RollingUpdate == nil { - x.RollingUpdate = new(RollingUpdateDeployment) - } - x.RollingUpdate.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x DeploymentStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *DeploymentStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RollingUpdateDeployment) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.MaxUnavailable != nil - yyq2[1] = x.MaxSurge != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym4 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxUnavailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxUnavailable == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxUnavailable) - } else { - z.EncFallback(x.MaxUnavailable) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxSurge")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxSurge == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.MaxSurge) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.MaxSurge) - } else { - z.EncFallback(x.MaxSurge) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RollingUpdateDeployment) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "maxUnavailable": - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - case "maxSurge": - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RollingUpdateDeployment) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxUnavailable != nil { - x.MaxUnavailable = nil - } - } else { - if x.MaxUnavailable == nil { - x.MaxUnavailable = new(pkg5_intstr.IntOrString) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxUnavailable) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxUnavailable) - } else { - z.DecFallback(x.MaxUnavailable, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxSurge != nil { - x.MaxSurge = nil - } - } else { - if x.MaxSurge == nil { - x.MaxSurge = new(pkg5_intstr.IntOrString) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.MaxSurge) { - } else if !yym12 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.MaxSurge) - } else { - z.DecFallback(x.MaxSurge, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.ObservedGeneration != 0 - yyq2[1] = x.Replicas != 0 - yyq2[2] = x.UpdatedReplicas != 0 - yyq2[3] = x.AvailableReplicas != 0 - yyq2[4] = x.UnavailableReplicas != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("updatedReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.UpdatedReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("availableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.AvailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("unavailableReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.UnavailableReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "updatedReplicas": - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - case "availableReplicas": - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - case "unavailableReplicas": - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UpdatedReplicas = 0 - } else { - x.UpdatedReplicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AvailableReplicas = 0 - } else { - x.AvailableReplicas = int32(r.DecodeInt(32)) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.UnavailableReplicas = 0 - } else { - x.UnavailableReplicas = int32(r.DecodeInt(32)) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DeploymentList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceDeployment(([]Deployment)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DeploymentList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DeploymentList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DeploymentList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceDeployment((*[]Deployment)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Template - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Template - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv5 := &x.Template - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv8 := &x.Template - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.CurrentNumberScheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("numberMisscheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.NumberMisscheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredNumberScheduled")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.DesiredNumberScheduled)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "currentNumberScheduled": - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) - } - case "numberMisscheduled": - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) - } - case "desiredNumberScheduled": - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentNumberScheduled = 0 - } else { - x.CurrentNumberScheduled = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NumberMisscheduled = 0 - } else { - x.NumberMisscheduled = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredNumberScheduled = 0 - } else { - x.DesiredNumberScheduled = int32(r.DecodeInt(32)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = DaemonSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = DaemonSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *DaemonSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceDaemonSet(([]DaemonSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *DaemonSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *DaemonSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *DaemonSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceDaemonSet((*[]DaemonSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ThirdPartyResourceDataList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceThirdPartyResourceData(([]ThirdPartyResourceData)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ThirdPartyResourceDataList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ThirdPartyResourceDataList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceThirdPartyResourceData((*[]ThirdPartyResourceData)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Job) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Job) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Job) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Job) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = JobSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = JobStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceJob(([]Job)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceJob((*[]Job)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceJob((*[]Job)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Parallelism != nil - yyq2[1] = x.Completions != nil - yyq2[2] = x.ActiveDeadlineSeconds != nil - yyq2[3] = x.Selector != nil - yyq2[4] = x.AutoSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy4 := *x.Parallelism - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parallelism")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parallelism == nil { - r.EncodeNil() - } else { - yy6 := *x.Parallelism - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Completions == nil { - r.EncodeNil() - } else { - yy9 := *x.Completions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(yy9)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Completions == nil { - r.EncodeNil() - } else { - yy11 := *x.Completions - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeInt(int64(yy11)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy14 := *x.ActiveDeadlineSeconds - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeInt(int64(yy14)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("activeDeadlineSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ActiveDeadlineSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.ActiveDeadlineSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.AutoSelector == nil { - r.EncodeNil() - } else { - yy22 := *x.AutoSelector - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeBool(bool(yy22)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("autoSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AutoSelector == nil { - r.EncodeNil() - } else { - yy24 := *x.AutoSelector - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(yy24)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy27 := &x.Template - yy27.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy29 := &x.Template - yy29.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "parallelism": - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - case "completions": - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - case "activeDeadlineSeconds": - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - case "autoSelector": - if r.TryDecodeAsNil() { - if x.AutoSelector != nil { - x.AutoSelector = nil - } - } else { - if x.AutoSelector == nil { - x.AutoSelector = new(bool) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - *((*bool)(x.AutoSelector)) = r.DecodeBool() - } - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv13 := &x.Template - yyv13.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Parallelism != nil { - x.Parallelism = nil - } - } else { - if x.Parallelism == nil { - x.Parallelism = new(int32) - } - yym16 := z.DecBinary() - _ = yym16 - if false { - } else { - *((*int32)(x.Parallelism)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Completions != nil { - x.Completions = nil - } - } else { - if x.Completions == nil { - x.Completions = new(int32) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int32)(x.Completions)) = int32(r.DecodeInt(32)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.ActiveDeadlineSeconds != nil { - x.ActiveDeadlineSeconds = nil - } - } else { - if x.ActiveDeadlineSeconds == nil { - x.ActiveDeadlineSeconds = new(int64) - } - yym20 := z.DecBinary() - _ = yym20 - if false { - } else { - *((*int64)(x.ActiveDeadlineSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.AutoSelector != nil { - x.AutoSelector = nil - } - } else { - if x.AutoSelector == nil { - x.AutoSelector = new(bool) - } - yym23 := z.DecBinary() - _ = yym23 - if false { - } else { - *((*bool)(x.AutoSelector)) = r.DecodeBool() - } - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv24 := &x.Template - yyv24.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *JobStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Conditions) != 0 - yyq2[1] = x.StartTime != nil - yyq2[2] = x.CompletionTime != nil - yyq2[3] = x.Active != 0 - yyq2[4] = x.Succeeded != 0 - yyq2[5] = x.Failed != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceJobCondition(([]JobCondition)(x.Conditions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.StartTime == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym7 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("startTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.StartTime == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(x.StartTime) { - } else if yym8 { - z.EncBinaryMarshal(x.StartTime) - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(x.StartTime) - } else { - z.EncFallback(x.StartTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym10 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("completionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.CompletionTime == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(x.CompletionTime) { - } else if yym11 { - z.EncBinaryMarshal(x.CompletionTime) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(x.CompletionTime) - } else { - z.EncFallback(x.CompletionTime) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("active")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.Active)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("succeeded")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(x.Succeeded)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("failed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeInt(int64(x.Failed)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv4 := &x.Conditions - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv4), d) - } - } - case "startTime": - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym7 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - case "completionTime": - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym9 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - case "active": - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - case "succeeded": - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - case "failed": - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj13 int - var yyb13 bool - var yyhl13 bool = l >= 0 - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv14 := &x.Conditions - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceJobCondition((*[]JobCondition)(yyv14), d) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.StartTime != nil { - x.StartTime = nil - } - } else { - if x.StartTime == nil { - x.StartTime = new(pkg1_unversioned.Time) - } - yym17 := z.DecBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.DecExt(x.StartTime) { - } else if yym17 { - z.DecBinaryUnmarshal(x.StartTime) - } else if !yym17 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.StartTime) - } else { - z.DecFallback(x.StartTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.CompletionTime != nil { - x.CompletionTime = nil - } - } else { - if x.CompletionTime == nil { - x.CompletionTime = new(pkg1_unversioned.Time) - } - yym19 := z.DecBinary() - _ = yym19 - if false { - } else if z.HasExtensions() && z.DecExt(x.CompletionTime) { - } else if yym19 { - z.DecBinaryUnmarshal(x.CompletionTime) - } else if !yym19 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.CompletionTime) - } else { - z.DecFallback(x.CompletionTime, false) - } - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Active = 0 - } else { - x.Active = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Succeeded = 0 - } else { - x.Succeeded = int32(r.DecodeInt(32)) - } - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Failed = 0 - } else { - x.Failed = int32(r.DecodeInt(32)) - } - for { - yyj13++ - if yyhl13 { - yyb13 = yyj13 > l - } else { - yyb13 = r.CheckBreak() - } - if yyb13 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj13-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x JobConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *JobConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *JobCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = true - yyq2[3] = true - yyq2[4] = x.Reason != "" - yyq2[5] = x.Message != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf7 := &x.Status - yysf7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yysf8 := &x.Status - yysf8.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy10 := &x.LastProbeTime - yym11 := z.EncBinary() - _ = yym11 - if false { - } else if z.HasExtensions() && z.EncExt(yy10) { - } else if yym11 { - z.EncBinaryMarshal(yy10) - } else if !yym11 && z.IsJSONHandle() { - z.EncJSONMarshal(yy10) - } else { - z.EncFallback(yy10) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy12 := &x.LastProbeTime - yym13 := z.EncBinary() - _ = yym13 - if false { - } else if z.HasExtensions() && z.EncExt(yy12) { - } else if yym13 { - z.EncBinaryMarshal(yy12) - } else if !yym13 && z.IsJSONHandle() { - z.EncJSONMarshal(yy12) - } else { - z.EncFallback(yy12) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yy15 := &x.LastTransitionTime - yym16 := z.EncBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.EncExt(yy15) { - } else if yym16 { - z.EncBinaryMarshal(yy15) - } else if !yym16 && z.IsJSONHandle() { - z.EncJSONMarshal(yy15) - } else { - z.EncFallback(yy15) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy17 := &x.LastTransitionTime - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(yy17) { - } else if yym18 { - z.EncBinaryMarshal(yy17) - } else if !yym18 && z.IsJSONHandle() { - z.EncJSONMarshal(yy17) - } else { - z.EncFallback(yy17) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *JobCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *JobCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv6 := &x.LastProbeTime - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if yym7 { - z.DecBinaryUnmarshal(yyv6) - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv8 := &x.LastTransitionTime - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(yyv8) { - } else if yym9 { - z.DecBinaryUnmarshal(yyv8) - } else if !yym9 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv8) - } else { - z.DecFallback(yyv8, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *JobCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = JobConditionType(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg2_v1.ConditionStatus(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg1_unversioned.Time{} - } else { - yyv15 := &x.LastProbeTime - yym16 := z.DecBinary() - _ = yym16 - if false { - } else if z.HasExtensions() && z.DecExt(yyv15) { - } else if yym16 { - z.DecBinaryUnmarshal(yyv15) - } else if !yym16 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv15) - } else { - z.DecFallback(yyv15, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg1_unversioned.Time{} - } else { - yyv17 := &x.LastTransitionTime - yym18 := z.DecBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.DecExt(yyv17) { - } else if yym18 { - z.DecBinaryUnmarshal(yyv17) - } else if !yym18 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv17) - } else { - z.DecFallback(yyv17, false) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Ingress) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Ingress) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Ingress) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Ingress) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = IngressSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = IngressStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngress(([]Ingress)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceIngress((*[]Ingress)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Backend != nil - yyq2[1] = len(x.TLS) != 0 - yyq2[2] = len(x.Rules) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Backend == nil { - r.EncodeNil() - } else { - x.Backend.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.TLS == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("tls")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TLS == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIngressTLS(([]IngressTLS)(x.TLS), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Rules == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSliceIngressRule(([]IngressRule)(x.Rules), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "backend": - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - case "tls": - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv5 := &x.TLS - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv5), d) - } - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv7 := &x.Rules - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv7), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Backend != nil { - x.Backend = nil - } - } else { - if x.Backend == nil { - x.Backend = new(IngressBackend) - } - x.Backend.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.TLS = nil - } else { - yyv11 := &x.TLS - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceIngressTLS((*[]IngressTLS)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv13 := &x.Rules - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceIngressRule((*[]IngressRule)(yyv13), d) - } - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressTLS) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Hosts) != 0 - yyq2[1] = x.SecretName != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Hosts == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hosts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Hosts == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Hosts, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.SecretName)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressTLS) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressTLS) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "hosts": - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv4 := &x.Hosts - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "secretName": - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressTLS) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Hosts = nil - } else { - yyv8 := &x.Hosts - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SecretName = "" - } else { - x.SecretName = string(r.DecodeString()) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.LoadBalancer - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("loadBalancer")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.LoadBalancer - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "loadBalancer": - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_v1.LoadBalancerStatus{} - } else { - yyv4 := &x.LoadBalancer - yyv4.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LoadBalancer = pkg2_v1.LoadBalancerStatus{} - } else { - yyv6 := &x.LoadBalancer - yyv6.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Host != "" - yyq2[1] = x.IngressRuleValue.HTTP != nil && x.HTTP != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("host")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Host)) - } - } - } - var yyn6 bool - if x.IngressRuleValue.HTTP == nil { - yyn6 = true - goto LABEL6 - } - LABEL6: - if yyr2 || yy2arr2 { - if yyn6 { - r.EncodeNil() - } else { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if yyn6 { - r.EncodeNil() - } else { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "host": - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - case "http": - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Host = "" - } else { - x.Host = string(r.DecodeString()) - } - if x.IngressRuleValue.HTTP == nil { - x.IngressRuleValue.HTTP = new(HTTPIngressRuleValue) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.HTTP != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("http")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HTTP == nil { - r.EncodeNil() - } else { - x.HTTP.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "http": - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj5 int - var yyb5 bool - var yyhl5 bool = l >= 0 - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.HTTP != nil { - x.HTTP = nil - } - } else { - if x.HTTP == nil { - x.HTTP = new(HTTPIngressRuleValue) - } - x.HTTP.CodecDecodeSelf(d) - } - for { - yyj5++ - if yyhl5 { - yyb5 = yyj5 > l - } else { - yyb5 = r.CheckBreak() - } - if yyb5 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj5-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressRuleValue) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [1]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(1) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("paths")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Paths == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceHTTPIngressPath(([]HTTPIngressPath)(x.Paths), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressRuleValue) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "paths": - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv4 := &x.Paths - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv4), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressRuleValue) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Paths = nil - } else { - yyv7 := &x.Paths - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSliceHTTPIngressPath((*[]HTTPIngressPath)(yyv7), d) - } - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *HTTPIngressPath) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Path != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("path")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Path)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.Backend - yy7.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("backend")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.Backend - yy9.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HTTPIngressPath) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HTTPIngressPath) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "path": - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - case "backend": - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv5 := &x.Backend - yyv5.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HTTPIngressPath) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Path = "" - } else { - x.Path = string(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Backend = IngressBackend{} - } else { - yyv8 := &x.Backend - yyv8.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IngressBackend) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceName")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServiceName)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy7 := &x.ServicePort - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("servicePort")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.ServicePort - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IngressBackend) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IngressBackend) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "serviceName": - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - case "servicePort": - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv5 := &x.ServicePort - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(yyv5) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv5) - } else { - z.DecFallback(yyv5, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IngressBackend) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServiceName = "" - } else { - x.ServiceName = string(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServicePort = pkg5_intstr.IntOrString{} - } else { - yyv9 := &x.ServicePort - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ExportOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("export")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Export)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("exact")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeBool(bool(x.Exact)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ExportOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ExportOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "export": - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - case "exact": - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ExportOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Export = false - } else { - x.Export = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Exact = false - } else { - x.Exact = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ListOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [7]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.LabelSelector != "" - yyq2[1] = x.FieldSelector != "" - yyq2[2] = x.Watch != false - yyq2[3] = x.ResourceVersion != "" - yyq2[4] = x.TimeoutSeconds != nil - yyq2[5] = x.Kind != "" - yyq2[6] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(7) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("labelSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.LabelSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fieldSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.FieldSelector)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("watch")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeBool(bool(x.Watch)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ResourceVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy16 := *x.TimeoutSeconds - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeInt(int64(yy16)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("timeoutSeconds")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.TimeoutSeconds == nil { - r.EncodeNil() - } else { - yy18 := *x.TimeoutSeconds - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeInt(int64(yy18)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ListOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ListOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "labelSelector": - if r.TryDecodeAsNil() { - x.LabelSelector = "" - } else { - x.LabelSelector = string(r.DecodeString()) - } - case "fieldSelector": - if r.TryDecodeAsNil() { - x.FieldSelector = "" - } else { - x.FieldSelector = string(r.DecodeString()) - } - case "watch": - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - case "resourceVersion": - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - case "timeoutSeconds": - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ListOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj12 int - var yyb12 bool - var yyhl12 bool = l >= 0 - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LabelSelector = "" - } else { - x.LabelSelector = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FieldSelector = "" - } else { - x.FieldSelector = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Watch = false - } else { - x.Watch = bool(r.DecodeBool()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceVersion = "" - } else { - x.ResourceVersion = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.TimeoutSeconds != nil { - x.TimeoutSeconds = nil - } - } else { - if x.TimeoutSeconds == nil { - x.TimeoutSeconds = new(int64) - } - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - *((*int64)(x.TimeoutSeconds)) = int64(r.DecodeInt(64)) - } - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj12++ - if yyhl12 { - yyb12 = yyj12 > l - } else { - yyb12 = r.CheckBreak() - } - if yyb12 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj12-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LabelSelector) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.MatchLabels) != 0 - yyq2[1] = len(x.MatchExpressions) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.MatchLabels == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncMapStringStringV(x.MatchLabels, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchLabels")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchLabels == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncMapStringStringV(x.MatchLabels, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("matchExpressions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MatchExpressions == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceLabelSelectorRequirement(([]LabelSelectorRequirement)(x.MatchExpressions), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LabelSelector) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LabelSelector) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "matchLabels": - if r.TryDecodeAsNil() { - x.MatchLabels = nil - } else { - yyv4 := &x.MatchLabels - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecMapStringStringX(yyv4, false, d) - } - } - case "matchExpressions": - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv6 := &x.MatchExpressions - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LabelSelector) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchLabels = nil - } else { - yyv9 := &x.MatchLabels - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - z.F.DecMapStringStringX(yyv9, false, d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MatchExpressions = nil - } else { - yyv11 := &x.MatchExpressions - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceLabelSelectorRequirement((*[]LabelSelectorRequirement)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *LabelSelectorRequirement) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[2] = len(x.Values) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Key)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Operator.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("operator")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Operator.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.Values == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("values")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Values == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - z.F.EncSliceStringV(x.Values, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *LabelSelectorRequirement) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *LabelSelectorRequirement) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "operator": - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = LabelSelectorOperator(r.DecodeString()) - } - case "values": - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv6 := &x.Values - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - z.F.DecSliceStringX(yyv6, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *LabelSelectorRequirement) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Operator = "" - } else { - x.Operator = LabelSelectorOperator(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Values = nil - } else { - yyv11 := &x.Values - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - z.F.DecSliceStringX(yyv11, false, d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x LabelSelectorOperator) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *LabelSelectorOperator) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ReplicaSet) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSet) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSet) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSet) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ReplicaSetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ReplicaSetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceReplicaSet(([]ReplicaSet)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceReplicaSet((*[]ReplicaSet)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Replicas != nil - yyq2[1] = x.Selector != nil - yyq2[2] = true - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Replicas == nil { - r.EncodeNil() - } else { - yy4 := *x.Replicas - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(yy4)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Replicas == nil { - r.EncodeNil() - } else { - yy6 := *x.Replicas - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(yy6)) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - x.Selector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy12 := &x.Template - yy12.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("template")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.Template - yy14.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - case "template": - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv7 := &x.Template - yyv7.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Replicas != nil { - x.Replicas = nil - } - } else { - if x.Replicas == nil { - x.Replicas = new(int32) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - *((*int32)(x.Replicas)) = int32(r.DecodeInt(32)) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(LabelSelector) - } - x.Selector.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Template = pkg2_v1.PodTemplateSpec{} - } else { - yyv12 := &x.Template - yyv12.CodecDecodeSelf(d) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ReplicaSetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.FullyLabeledReplicas != 0 - yyq2[2] = x.ObservedGeneration != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("replicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Replicas)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fullyLabeledReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.FullyLabeledReplicas)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("observedGeneration")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.ObservedGeneration)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ReplicaSetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "replicas": - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - case "fullyLabeledReplicas": - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - case "observedGeneration": - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ReplicaSetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Replicas = 0 - } else { - x.Replicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FullyLabeledReplicas = 0 - } else { - x.FullyLabeledReplicas = int32(r.DecodeInt(32)) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObservedGeneration = 0 - } else { - x.ObservedGeneration = int64(r.DecodeInt(64)) - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodSecurityPolicySpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodSecurityPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [14]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Privileged != false - yyq2[1] = len(x.DefaultAddCapabilities) != 0 - yyq2[2] = len(x.RequiredDropCapabilities) != 0 - yyq2[3] = len(x.AllowedCapabilities) != 0 - yyq2[4] = len(x.Volumes) != 0 - yyq2[5] = x.HostNetwork != false - yyq2[6] = len(x.HostPorts) != 0 - yyq2[7] = x.HostPID != false - yyq2[8] = x.HostIPC != false - yyq2[13] = x.ReadOnlyRootFilesystem != false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(14) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("privileged")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.Privileged)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("defaultAddCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.DefaultAddCapabilities == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.DefaultAddCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("requiredDropCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.RequiredDropCapabilities == nil { - r.EncodeNil() - } else { - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.RequiredDropCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("allowedCapabilities")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.AllowedCapabilities == nil { - r.EncodeNil() - } else { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - h.encSlicev1_Capability(([]pkg2_v1.Capability)(x.AllowedCapabilities), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.Volumes == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumes")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Volumes == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - h.encSliceFSType(([]FSType)(x.Volumes), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostNetwork")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeBool(bool(x.HostNetwork)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[6] { - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPorts")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.HostPorts == nil { - r.EncodeNil() - } else { - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - h.encSliceHostPortRange(([]HostPortRange)(x.HostPorts), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[7] { - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostPID")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym26 := z.EncBinary() - _ = yym26 - if false { - } else { - r.EncodeBool(bool(x.HostPID)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[8] { - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[8] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("hostIPC")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym29 := z.EncBinary() - _ = yym29 - if false { - } else { - r.EncodeBool(bool(x.HostIPC)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy31 := &x.SELinux - yy31.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinux")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy33 := &x.SELinux - yy33.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy36 := &x.RunAsUser - yy36.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("runAsUser")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy38 := &x.RunAsUser - yy38.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy41 := &x.SupplementalGroups - yy41.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("supplementalGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy43 := &x.SupplementalGroups - yy43.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy46 := &x.FSGroup - yy46.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("fsGroup")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy48 := &x.FSGroup - yy48.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[13] { - yym51 := z.EncBinary() - _ = yym51 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq2[13] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("readOnlyRootFilesystem")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeBool(bool(x.ReadOnlyRootFilesystem)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "privileged": - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - case "defaultAddCapabilities": - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv5 := &x.DefaultAddCapabilities - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv5), d) - } - } - case "requiredDropCapabilities": - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv7 := &x.RequiredDropCapabilities - yym8 := z.DecBinary() - _ = yym8 - if false { - } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv7), d) - } - } - case "allowedCapabilities": - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv9 := &x.AllowedCapabilities - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv9), d) - } - } - case "volumes": - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv11 := &x.Volumes - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv11), d) - } - } - case "hostNetwork": - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - case "hostPorts": - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv14 := &x.HostPorts - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv14), d) - } - } - case "hostPID": - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - case "hostIPC": - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - case "seLinux": - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv18 := &x.SELinux - yyv18.CodecDecodeSelf(d) - } - case "runAsUser": - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv19 := &x.RunAsUser - yyv19.CodecDecodeSelf(d) - } - case "supplementalGroups": - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv20 := &x.SupplementalGroups - yyv20.CodecDecodeSelf(d) - } - case "fsGroup": - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv21 := &x.FSGroup - yyv21.CodecDecodeSelf(d) - } - case "readOnlyRootFilesystem": - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj23 int - var yyb23 bool - var yyhl23 bool = l >= 0 - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Privileged = false - } else { - x.Privileged = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DefaultAddCapabilities = nil - } else { - yyv25 := &x.DefaultAddCapabilities - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv25), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RequiredDropCapabilities = nil - } else { - yyv27 := &x.RequiredDropCapabilities - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv27), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AllowedCapabilities = nil - } else { - yyv29 := &x.AllowedCapabilities - yym30 := z.DecBinary() - _ = yym30 - if false { - } else { - h.decSlicev1_Capability((*[]pkg2_v1.Capability)(yyv29), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Volumes = nil - } else { - yyv31 := &x.Volumes - yym32 := z.DecBinary() - _ = yym32 - if false { - } else { - h.decSliceFSType((*[]FSType)(yyv31), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostNetwork = false - } else { - x.HostNetwork = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPorts = nil - } else { - yyv34 := &x.HostPorts - yym35 := z.DecBinary() - _ = yym35 - if false { - } else { - h.decSliceHostPortRange((*[]HostPortRange)(yyv34), d) - } - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostPID = false - } else { - x.HostPID = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.HostIPC = false - } else { - x.HostIPC = bool(r.DecodeBool()) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SELinux = SELinuxStrategyOptions{} - } else { - yyv38 := &x.SELinux - yyv38.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RunAsUser = RunAsUserStrategyOptions{} - } else { - yyv39 := &x.RunAsUser - yyv39.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.SupplementalGroups = SupplementalGroupsStrategyOptions{} - } else { - yyv40 := &x.SupplementalGroups - yyv40.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.FSGroup = FSGroupStrategyOptions{} - } else { - yyv41 := &x.FSGroup - yyv41.CodecDecodeSelf(d) - } - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ReadOnlyRootFilesystem = false - } else { - x.ReadOnlyRootFilesystem = bool(r.DecodeBool()) - } - for { - yyj23++ - if yyhl23 { - yyb23 = yyj23 > l - } else { - yyb23 = r.CheckBreak() - } - if yyb23 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj23-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *HostPortRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *HostPortRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *HostPortRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int32(r.DecodeInt(32)) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *HostPortRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int32(r.DecodeInt(32)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int32(r.DecodeInt(32)) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *SELinuxStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.SELinuxOptions != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("seLinuxOptions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SELinuxOptions == nil { - r.EncodeNil() - } else { - x.SELinuxOptions.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SELinuxStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SELinuxStrategy(r.DecodeString()) - } - case "seLinuxOptions": - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_v1.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SELinuxStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SELinuxStrategy(r.DecodeString()) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SELinuxOptions != nil { - x.SELinuxOptions = nil - } - } else { - if x.SELinuxOptions == nil { - x.SELinuxOptions = new(pkg2_v1.SELinuxOptions) - } - x.SELinuxOptions.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SELinuxStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SELinuxStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *RunAsUserStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Rule.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RunAsUserStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RunAsUserStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = RunAsUserStrategy(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *IDRange) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("min")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeInt(int64(x.Min)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("max")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.Max)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *IDRange) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *IDRange) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "min": - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int64(r.DecodeInt(64)) - } - case "max": - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *IDRange) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Min = 0 - } else { - x.Min = int64(r.DecodeInt(64)) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Max = 0 - } else { - x.Max = int64(r.DecodeInt(64)) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x RunAsUserStrategy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *RunAsUserStrategy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *FSGroupStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Rule != "" - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FSGroupStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FSGroupStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = FSGroupStrategyType(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x FSGroupStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *FSGroupStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Rule != "" - yyq2[1] = len(x.Ranges) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - x.Rule.CodecEncodeSelf(e) - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rule")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Rule.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ranges == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ranges")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ranges == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceIDRange(([]IDRange)(x.Ranges), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *SupplementalGroupsStrategyOptions) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "rule": - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) - } - case "ranges": - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv5 := &x.Ranges - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *SupplementalGroupsStrategyOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rule = "" - } else { - x.Rule = SupplementalGroupsStrategyType(r.DecodeString()) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ranges = nil - } else { - yyv9 := &x.Ranges - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceIDRange((*[]IDRange)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x SupplementalGroupsStrategyType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *SupplementalGroupsStrategyType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *PodSecurityPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodSecurityPolicy(([]PodSecurityPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodSecurityPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodSecurityPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePodSecurityPolicy((*[]PodSecurityPolicy)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicy) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicy) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicy) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicy) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv9 := &x.ObjectMeta - yyv9.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = NetworkPolicySpec{} - } else { - yyv10 := &x.Spec - yyv10.CodecDecodeSelf(d) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicySpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = len(x.Ingress) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy4 := &x.PodSelector - yy4.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.PodSelector - yy6.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Ingress == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ingress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ingress == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNetworkPolicyIngressRule(([]NetworkPolicyIngressRule)(x.Ingress), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicySpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSelector": - if r.TryDecodeAsNil() { - x.PodSelector = LabelSelector{} - } else { - yyv4 := &x.PodSelector - yyv4.CodecDecodeSelf(d) - } - case "ingress": - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv5 := &x.Ingress - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv5), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicySpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodSelector = LabelSelector{} - } else { - yyv8 := &x.PodSelector - yyv8.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ingress = nil - } else { - yyv9 := &x.Ingress - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNetworkPolicyIngressRule((*[]NetworkPolicyIngressRule)(yyv9), d) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyIngressRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = len(x.Ports) != 0 - yyq2[1] = len(x.From) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Ports == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("ports")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Ports == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - h.encSliceNetworkPolicyPort(([]NetworkPolicyPort)(x.Ports), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.From == nil { - r.EncodeNil() - } else { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("from")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.From == nil { - r.EncodeNil() - } else { - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - h.encSliceNetworkPolicyPeer(([]NetworkPolicyPeer)(x.From), e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyIngressRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "ports": - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv4 := &x.Ports - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv4), d) - } - } - case "from": - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv6 := &x.From - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv6), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyIngressRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Ports = nil - } else { - yyv9 := &x.Ports - yym10 := z.DecBinary() - _ = yym10 - if false { - } else { - h.decSliceNetworkPolicyPort((*[]NetworkPolicyPort)(yyv9), d) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.From = nil - } else { - yyv11 := &x.From - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSliceNetworkPolicyPeer((*[]NetworkPolicyPeer)(yyv11), d) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPort) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.Protocol != nil - yyq2[1] = x.Port != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.Protocol == nil { - r.EncodeNil() - } else { - yy4 := *x.Protocol - yysf5 := &yy4 - yysf5.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("protocol")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Protocol == nil { - r.EncodeNil() - } else { - yy6 := *x.Protocol - yysf7 := &yy6 - yysf7.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Port == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym9 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("port")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Port == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Port) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Port) - } else { - z.EncFallback(x.Port) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPort) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "protocol": - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg2_v1.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - case "port": - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym6 := z.DecBinary() - _ = yym6 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym6 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPort) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj7 int - var yyb7 bool - var yyhl7 bool = l >= 0 - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Protocol != nil { - x.Protocol = nil - } - } else { - if x.Protocol == nil { - x.Protocol = new(pkg2_v1.Protocol) - } - x.Protocol.CodecDecodeSelf(d) - } - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Port != nil { - x.Port = nil - } - } else { - if x.Port == nil { - x.Port = new(pkg5_intstr.IntOrString) - } - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(x.Port) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Port) - } else { - z.DecFallback(x.Port, false) - } - } - for { - yyj7++ - if yyhl7 { - yyb7 = yyj7 > l - } else { - yyb7 = r.CheckBreak() - } - if yyb7 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj7-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyPeer) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = x.PodSelector != nil - yyq2[1] = x.NamespaceSelector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - if x.PodSelector == nil { - r.EncodeNil() - } else { - x.PodSelector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.PodSelector == nil { - r.EncodeNil() - } else { - x.PodSelector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - x.NamespaceSelector.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSelector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NamespaceSelector == nil { - r.EncodeNil() - } else { - x.NamespaceSelector.CodecEncodeSelf(e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyPeer) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "podSelector": - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(LabelSelector) - } - x.PodSelector.CodecDecodeSelf(d) - } - case "namespaceSelector": - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(LabelSelector) - } - x.NamespaceSelector.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyPeer) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj6 int - var yyb6 bool - var yyhl6 bool = l >= 0 - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.PodSelector != nil { - x.PodSelector = nil - } - } else { - if x.PodSelector == nil { - x.PodSelector = new(LabelSelector) - } - x.PodSelector.CodecDecodeSelf(d) - } - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.NamespaceSelector != nil { - x.NamespaceSelector = nil - } - } else { - if x.NamespaceSelector == nil { - x.NamespaceSelector = new(LabelSelector) - } - x.NamespaceSelector.CodecDecodeSelf(d) - } - for { - yyj6++ - if yyhl6 { - yyb6 = yyj6 > l - } else { - yyb6 = r.CheckBreak() - } - if yyb6 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj6-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *NetworkPolicyList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceNetworkPolicy(([]NetworkPolicy)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *NetworkPolicyList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *NetworkPolicyList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceNetworkPolicy((*[]NetworkPolicy)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CustomMetricTarget{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CustomMetricTarget, yyrl1) - } - } else { - yyv1 = make([]CustomMetricTarget, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricTarget{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricTarget{}) // var yyz1 CustomMetricTarget - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricTarget{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricTarget{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurrentStatus, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurrentStatus, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) - } - } else { - yyv1 = make([]CustomMetricCurrentStatus, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, CustomMetricCurrentStatus{}) // var yyz1 CustomMetricCurrentStatus - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = CustomMetricCurrentStatus{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []CustomMetricCurrentStatus{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutoscaler, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutoscaler, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 360) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - } else { - yyv1 = make([]HorizontalPodAutoscaler, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HorizontalPodAutoscaler{}) // var yyz1 HorizontalPodAutoscaler - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HorizontalPodAutoscaler{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HorizontalPodAutoscaler{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []APIVersion{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]APIVersion, yyrl1) - } - } else { - yyv1 = make([]APIVersion, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, APIVersion{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, APIVersion{}) // var yyz1 APIVersion - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = APIVersion{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []APIVersion{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ThirdPartyResource{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ThirdPartyResource, yyrl1) - } - } else { - yyv1 = make([]ThirdPartyResource, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResource{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResource{}) // var yyz1 ThirdPartyResource - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResource{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResource{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Deployment, yyrl1) - } - } else { - yyv1 = make([]Deployment, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Deployment{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Deployment{}) // var yyz1 Deployment - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Deployment{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Deployment{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []DaemonSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]DaemonSet, yyrl1) - } - } else { - yyv1 = make([]DaemonSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, DaemonSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, DaemonSet{}) // var yyz1 DaemonSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = DaemonSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []DaemonSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) - } - } else { - yyv1 = make([]ThirdPartyResourceData, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ThirdPartyResourceData{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ThirdPartyResourceData{}) // var yyz1 ThirdPartyResourceData - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ThirdPartyResourceData{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ThirdPartyResourceData{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 792) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Job, yyrl1) - } - } else { - yyv1 = make([]Job, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Job{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Job{}) // var yyz1 Job - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Job{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Job{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]JobCondition, yyrl1) - } - } else { - yyv1 = make([]JobCondition, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, JobCondition{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, JobCondition{}) // var yyz1 JobCondition - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = JobCondition{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []JobCondition{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Ingress{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 320) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Ingress, yyrl1) - } - } else { - yyv1 = make([]Ingress, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Ingress{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Ingress{}) // var yyz1 Ingress - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Ingress{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Ingress{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IngressTLS{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IngressTLS, yyrl1) - } - } else { - yyv1 = make([]IngressTLS, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressTLS{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressTLS{}) // var yyz1 IngressTLS - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressTLS{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressTLS{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IngressRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IngressRule, yyrl1) - } - } else { - yyv1 = make([]IngressRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IngressRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IngressRule{}) // var yyz1 IngressRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IngressRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IngressRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HTTPIngressPath{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HTTPIngressPath, yyrl1) - } - } else { - yyv1 = make([]HTTPIngressPath, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HTTPIngressPath{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HTTPIngressPath{}) // var yyz1 HTTPIngressPath - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HTTPIngressPath{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HTTPIngressPath{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) - } - } else { - yyv1 = make([]LabelSelectorRequirement, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, LabelSelectorRequirement{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, LabelSelectorRequirement{}) // var yyz1 LabelSelectorRequirement - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = LabelSelectorRequirement{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []LabelSelectorRequirement{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ReplicaSet{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 728) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ReplicaSet, yyrl1) - } - } else { - yyv1 = make([]ReplicaSet, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ReplicaSet{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ReplicaSet{}) // var yyz1 ReplicaSet - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ReplicaSet{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ReplicaSet{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yysf2 := &yyv1 - yysf2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []pkg2_v1.Capability{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]pkg2_v1.Capability, yyrl1) - } - } else { - yyv1 = make([]pkg2_v1.Capability, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 pkg2_v1.Capability - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = pkg2_v1.Capability(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []pkg2_v1.Capability{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []FSType{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]FSType, yyrl1) - } - } else { - yyv1 = make([]FSType, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FSType(r.DecodeString()) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, "") - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FSType(r.DecodeString()) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, "") // var yyz1 FSType - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = "" - } else { - yyv1[yyj1] = FSType(r.DecodeString()) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []FSType{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []HostPortRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]HostPortRange, yyrl1) - } - } else { - yyv1 = make([]HostPortRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, HostPortRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, HostPortRange{}) // var yyz1 HostPortRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = HostPortRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []HostPortRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []IDRange{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]IDRange, yyrl1) - } - } else { - yyv1 = make([]IDRange, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, IDRange{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, IDRange{}) // var yyz1 IDRange - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = IDRange{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []IDRange{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodSecurityPolicy{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 536) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) - } - } else { - yyv1 = make([]PodSecurityPolicy, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodSecurityPolicy{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodSecurityPolicy{}) // var yyz1 PodSecurityPolicy - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodSecurityPolicy{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodSecurityPolicy{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyIngressRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyIngressRule, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyIngressRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyIngressRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyIngressRule{}) // var yyz1 NetworkPolicyIngressRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyIngressRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyIngressRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyPort{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyPort, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyPort, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyPort{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyPort{}) // var yyz1 NetworkPolicyPort - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPort{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyPort{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicyPeer{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicyPeer, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicyPeer, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicyPeer{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicyPeer{}) // var yyz1 NetworkPolicyPeer - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicyPeer{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicyPeer{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []NetworkPolicy{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]NetworkPolicy, yyrl1) - } - } else { - yyv1 = make([]NetworkPolicy, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, NetworkPolicy{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, NetworkPolicy{}) // var yyz1 NetworkPolicy - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = NetworkPolicy{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []NetworkPolicy{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go deleted file mode 100644 index 7fead65b9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types.go +++ /dev/null @@ -1,1195 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` -} - -// +genclient=true,noMethods=true - -// represents a scaling request for a resource. -type Scale struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only. - Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// Dummy definition -type ReplicationControllerDummy struct { - unversioned.TypeMeta `json:",inline"` -} - -// SubresourceReference contains enough information to let you inspect or modify the referred subresource. -type SubresourceReference struct { - // Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names - Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` - // Subresource name of the referent - Subresource string `json:"subresource,omitempty" protobuf:"bytes,4,opt,name=subresource"` -} - -type CPUTargetUtilization struct { - // fraction of the requested CPU that should be utilized/used, - // e.g. 70 means that 70% of the requested CPU should be in use. - TargetPercentage int32 `json:"targetPercentage" protobuf:"varint,1,opt,name=targetPercentage"` -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -// specification of a horizontal pod autoscaler. -type HorizontalPodAutoscalerSpec struct { - // reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, - // and will set the desired number of pods by modifying its spec. - ScaleRef SubresourceReference `json:"scaleRef" protobuf:"bytes,1,opt,name=scaleRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified it defaults to the target CPU utilization at 80% of the requested resources. - CPUUtilization *CPUTargetUtilization `json:"cpuUtilization,omitempty" protobuf:"bytes,4,opt,name=cpuUtilization"` -} - -// current status of a horizontal pod autoscaler -type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - LastScaleTime *unversioned.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` -} - -// +genclient=true - -// configuration of a horizontal pod autoscaler. -type HorizontalPodAutoscaler struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current information about the autoscaler. - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true,nonNamespaced=true - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - unversioned.TypeMeta `json:",inline"` - - // Standard object metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Description is the description of this object. - Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` - - // Versions are versions for this third party object - Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -type ThirdPartyResourceList struct { - unversioned.TypeMeta `json:",inline"` - - // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdPartyResources. - Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the raw JSON data for this data. - Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` -} - -// +genclient=true - -// Deployment enables declarative updates for Pods and ReplicaSets. -type Deployment struct { - unversioned.TypeMeta `json:",inline"` - // Standard object metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the Deployment. - Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the Deployment. - Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // Template describes the pods that will be created. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // The deployment strategy to use to replace existing pods with new ones. - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` - // The config this deployment is rolling back to. Will be cleared after rollback is done. - RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` -} - -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - unversioned.TypeMeta `json:",inline"` - // Required: This must match the Name of a deployment. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The annotations to be updated to a deployment - UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` - // The config of this deployment rollback. - RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` -} - -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollbck to the last revision. - Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -// DeploymentStrategy describes how to replace existing pods with new ones. -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -// DeploymentStatus is the most recently observed status of the Deployment. -type DeploymentStatus struct { - // The generation observed by the deployment controller. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable pods targeted by this deployment. - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` -} - -// DeploymentList is a list of Deployments. -type DeploymentList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Deployments. - Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// TODO(madhusudancs): Uncomment while implementing DaemonSet updates. -/* Commenting out for v1.2. We are planning to bring these types back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting the types out. -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Only "RollingUpdate" is supported at this time. Default is RollingUpdate. - Type DaemonSetUpdateStrategyType `json:"type,omitempty"` - - // Rolling update config params. Present only if DaemonSetUpdateStrategy = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as DeploymentStrategy.RollingUpdate. - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty"` -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, 30% of the currently running DaemonSet - // pods can be stopped for an update at any given time. The update starts - // by stopping at most 30% of the currently running DaemonSet pods and then - // brings up new DaemonSet pods in their place. Once the new pods are ready, - // it then proceeds onto other DaemonSet pods, thus ensuring that at least - // 70% of original number of DaemonSet pods are available at all times - // during the update. - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"` - - // Minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - MinReadySeconds int32 `json:"minReadySeconds,omitempty"` -} -*/ - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // Selector is a label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - - // Template is the object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` - - // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. - /* Commenting out for v1.2. We are planning to bring these fields back with a more robust DaemonSet update implementation in v1.3, hence not deleting but just commenting these fields out. - // Update strategy to replace existing DaemonSet pods with new pods. - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` - - // Label key that is added to DaemonSet pods to distinguish between old and - // new pod templates during DaemonSet update. - // Users can set this to an empty string to indicate that the system should - // not add any label. If unspecified, system uses - // DefaultDaemonSetUniqueLabelKey("daemonset.kubernetes.io/podTemplateHash"). - // Value of this key is hash of DaemonSetSpec.PodTemplateSpec. - // No label is added if this is set to empty string. - UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"` - */ -} - -const ( - // DefaultDaemonSetUniqueLabelKey is the default key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates during - // DaemonSet update. See DaemonSetSpec's UniqueLabelKey field for more information. - DefaultDaemonSetUniqueLabelKey string = "daemonset.kubernetes.io/podTemplateHash" -) - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // CurrentNumberScheduled is the number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md - CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` - - // NumberMisscheduled is the number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md - NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` - - // DesiredNumberScheduled is the total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md - DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` -} - -// +genclient=true - -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of daemon sets. - Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -type ThirdPartyResourceDataList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdpartyResourceData. - Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true - -// Job represents the configuration of a single job. -type Job struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// JobList is a collection of jobs. -type JobList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Job. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Parallelism specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Completions specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Optional duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Selector is a label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // AutoSelector controls generation of pod labels and pod selectors. - // It was not present in the original extensions/v1beta1 Job definition, but exists - // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite - // meaning as, ManualSelector. - // More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md - AutoSelector *bool `json:"autoSelector,omitempty" protobuf:"varint,5,opt,name=autoSelector"` - - // Template is the object that describes the pod that will be created when - // executing a job. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - - // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // StartTime represents time when the job was acknowledged by the Job Manager. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // CompletionTime represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - CompletionTime *unversioned.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // Active is the number of actively running pods. - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // Succeeded is the number of pods which reached Phase Succeeded. - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // Failed is the number of pods which reached Phase Failed. - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// +genclient=true - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -type Ingress struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// IngressList is a collection of Ingress. -type IngressList struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Ingress. - Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// IngressSpec describes the Ingress the user wishes to exist. -type IngressSpec struct { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - // TODO: Add the ability to specify load-balancer IP through claims -} - -// IngressTLS describes the transport layer security associated with an Ingress. -type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` - // TODO: Consider specifying different modes of termination, protocols etc. -} - -// IngressStatus describe the current state of the Ingress. -type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -type IngressRuleValue struct { - //TODO: - // 1. Consider renaming this resource and the associated rules so they - // aren't tied to Ingress. They can be used to route intra-cluster traffic. - // 2. Consider adding fields for ingress-type specific global options - // usable by a loadbalancer, like http keep-alive. - - HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http://<host>/<path>?<searchpart> -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` - // TODO: Consider adding fields for ingress-type specific global - // options usable by a loadbalancer, like http keep-alive. -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -type HTTPIngressPath struct { - // Path is a extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` -} - -// IngressBackend describes all endpoints for a given service and port. -type IngressBackend struct { - // Specifies the name of the referenced service. - ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` - - // Specifies the port of the referenced service. - ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - unversioned.TypeMeta `json:",inline"` - // Should this value be exported. Export strips fields that a user can not specify. - Export bool `json:"export" protobuf:"varint,1,opt,name=export"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace' - Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` -} - -// ListOptions is the query options to a standard REST list call. -type ListOptions struct { - unversioned.TypeMeta `json:",inline"` - - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` - // Timeout for the list/watch call. - TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` - // operator represents a key's relationship to a set of values. - // Valid operators ard In, NotIn, Exists and DoesNotExist. - Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A label selector operator is the set of operators that can be used in a selector requirement. -type LabelSelectorOperator string - -const ( - LabelSelectorOpIn LabelSelectorOperator = "In" - LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" - LabelSelectorOpExists LabelSelectorOperator = "Exists" - LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" -) - -// +genclient=true - -// ReplicaSet represents the configuration of a ReplicaSet. -type ReplicaSet struct { - unversioned.TypeMeta `json:",inline"` - - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status - Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ReplicaSets. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md - Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. - // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors - Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template - Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` -} - -// +genclient=true,nonNamespaced=true - -// Pod Security Policy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -type PodSecurityPolicy struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// Pod Security Policy Spec defines the policy enforced. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` - // RequiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` - // AllowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/kubernetes/pkg/api/v1.Capability"` - // volumes is a white list of allowed volume plugins. Empty indicates that all plugins - // may be used. - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` -} - -// FS Type gives strong typing to different file systems that are used by volumes. -type FSType string - -var ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - All FSType = "*" -) - -// Host Port Range defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinux Strategy Options defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // type is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md#security-context - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -type SELinuxStrategy string - -const ( - // container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// Run A sUser Strategy Options defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // Rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // Ranges are the allowed ranges of uids that may be used. - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// ID Range provides a min/max of an allowed range of IDs. -type IDRange struct { - // Min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // Max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -type RunAsUserStrategy string - -const ( - // container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // container must run as a non-root uid - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // Rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // Ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // Ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// Pod Security Policy List is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -type NetworkPolicy struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior for this NetworkPolicy. - Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - PodSelector LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not affect ingress isolation. - // If this field is present and contains at least one rule, this policy allows any traffic - // which matches at least one of the ingress rules in this list. - Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` -} - -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is not provided, this rule matches all ports (traffic not restricted by port). - // If this field is empty, this rule matches no ports (no traffic matches). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is not provided, this rule matches all sources (traffic not restricted by source). - // If this field is empty, this rule matches no sources (no traffic matches). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // TODO: Update this to be a pointer to slice as soon as auto-generation supports it. - From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` -} - -type NetworkPolicyPort struct { - // Optional. The protocol (TCP or UDP) which traffic must match. - // If not specified, this field defaults to TCP. - Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/kubernetes/pkg/api/v1.Protocol"` - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` -} - -type NetworkPolicyPeer struct { - // Exactly one of the following must be specified. - - // This is a label selector which selects Pods in this namespace. - // This field follows standard label selector semantics. - // If not provided, this selector selects no pods. - // If present but empty, this selector selects all pods in this namespace. - PodSelector *LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - - // Selects Namespaces using cluster scoped-labels. This - // matches all pods in all namespaces selected by this label selector. - // This field follows standard label selector semantics. - // If omitted, this selector selects no namespaces. - // If present but empty, this selector selects all namespaces. - NamespaceSelector *LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` -} - -// Network Policy List is a list of NetworkPolicy objects. -type NetworkPolicyList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 1864a9658..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,740 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_APIVersion = map[string]string{ - "": "An APIVersion represents a single concrete version of an object model.", - "name": "Name of this version (e.g. 'v1').", -} - -func (APIVersion) SwaggerDoc() map[string]string { - return map_APIVersion -} - -var map_CPUTargetUtilization = map[string]string{ - "targetPercentage": "fraction of the requested CPU that should be utilized/used, e.g. 70 means that 70% of the requested CPU should be in use.", -} - -func (CPUTargetUtilization) SwaggerDoc() map[string]string { - return map_CPUTargetUtilization -} - -var map_CustomMetricCurrentStatus = map[string]string{ - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { - return map_CustomMetricCurrentStatus -} - -var map_CustomMetricTarget = map[string]string{ - "": "Alpha-level support for Custom Metrics in HPA (as annotations).", - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricTarget) SwaggerDoc() map[string]string { - return map_CustomMetricTarget -} - -var map_DaemonSet = map[string]string{ - "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (DaemonSet) SwaggerDoc() map[string]string { - return map_DaemonSet -} - -var map_DaemonSetList = map[string]string{ - "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of daemon sets.", -} - -func (DaemonSetList) SwaggerDoc() map[string]string { - return map_DaemonSetList -} - -var map_DaemonSetSpec = map[string]string{ - "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template", -} - -func (DaemonSetSpec) SwaggerDoc() map[string]string { - return map_DaemonSetSpec -} - -var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md", - "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md", - "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/release-1.3/docs/admin/daemons.md", -} - -func (DaemonSetStatus) SwaggerDoc() map[string]string { - return map_DaemonSetStatus -} - -var map_Deployment = map[string]string{ - "": "Deployment enables declarative updates for Pods and ReplicaSets.", - "metadata": "Standard object metadata.", - "spec": "Specification of the desired behavior of the Deployment.", - "status": "Most recently observed status of the Deployment.", -} - -func (Deployment) SwaggerDoc() map[string]string { - return map_Deployment -} - -var map_DeploymentList = map[string]string{ - "": "DeploymentList is a list of Deployments.", - "metadata": "Standard list metadata.", - "items": "Items is the list of Deployments.", -} - -func (DeploymentList) SwaggerDoc() map[string]string { - return map_DeploymentList -} - -var map_DeploymentRollback = map[string]string{ - "": "DeploymentRollback stores the information required to rollback a deployment.", - "name": "Required: This must match the Name of a deployment.", - "updatedAnnotations": "The annotations to be updated to a deployment", - "rollbackTo": "The config of this deployment rollback.", -} - -func (DeploymentRollback) SwaggerDoc() map[string]string { - return map_DeploymentRollback -} - -var map_DeploymentSpec = map[string]string{ - "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", - "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", - "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", - "rollbackTo": "The config this deployment is rolling back to. Will be cleared after rollback is done.", -} - -func (DeploymentSpec) SwaggerDoc() map[string]string { - return map_DeploymentSpec -} - -var map_DeploymentStatus = map[string]string{ - "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment.", -} - -func (DeploymentStatus) SwaggerDoc() map[string]string { - return map_DeploymentStatus -} - -var map_DeploymentStrategy = map[string]string{ - "": "DeploymentStrategy describes how to replace existing pods with new ones.", - "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", -} - -func (DeploymentStrategy) SwaggerDoc() map[string]string { - return map_DeploymentStrategy -} - -var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call.", - "export": "Should this value be exported. Export strips fields that a user can not specify.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", -} - -func (ExportOptions) SwaggerDoc() map[string]string { - return map_ExportOptions -} - -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "Rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - -var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is a extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", -} - -func (HTTPIngressPath) SwaggerDoc() map[string]string { - return map_HTTPIngressPath -} - -var map_HTTPIngressRuleValue = map[string]string{ - "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://<host>/<path>?<searchpart> -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", -} - -func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { - return map_HTTPIngressRuleValue -} - -var map_HorizontalPodAutoscaler = map[string]string{ - "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "list of horizontal pod autoscaler objects.", - "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "specification of a horizontal pod autoscaler.", - "scaleRef": "reference to Scale subresource; horizontal pod autoscaler will learn the current resource consumption from its status, and will set the desired number of pods by modifying its spec.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "cpuUtilization": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified it defaults to the target CPU utilization at 80% of the requested resources.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - -var map_HostPortRange = map[string]string{ - "": "Host Port Range defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "ID Range provides a min/max of an allowed range of IDs.", - "min": "Min is the start of the range, inclusive.", - "max": "Max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - -var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Ingress) SwaggerDoc() map[string]string { - return map_Ingress -} - -var map_IngressBackend = map[string]string{ - "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", -} - -func (IngressBackend) SwaggerDoc() map[string]string { - return map_IngressBackend -} - -var map_IngressList = map[string]string{ - "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", -} - -func (IngressList) SwaggerDoc() map[string]string { - return map_IngressList -} - -var map_IngressRule = map[string]string{ - "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", -} - -func (IngressRule) SwaggerDoc() map[string]string { - return map_IngressRule -} - -var map_IngressRuleValue = map[string]string{ - "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", -} - -func (IngressRuleValue) SwaggerDoc() map[string]string { - return map_IngressRuleValue -} - -var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", -} - -func (IngressSpec) SwaggerDoc() map[string]string { - return map_IngressSpec -} - -var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", -} - -func (IngressStatus) SwaggerDoc() map[string]string { - return map_IngressStatus -} - -var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", -} - -func (IngressTLS) SwaggerDoc() map[string]string { - return map_IngressTLS -} - -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Job.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/release-1.3/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.3/docs/user-guide/jobs.md", - "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "Active is the number of actively running pods.", - "succeeded": "Succeeded is the number of pods which reached Phase Succeeded.", - "failed": "Failed is the number of pods which reached Phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - -var map_LabelSelector = map[string]string{ - "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", -} - -func (LabelSelector) SwaggerDoc() map[string]string { - return map_LabelSelector -} - -var map_LabelSelectorRequirement = map[string]string{ - "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "key is the label key that the selector applies to.", - "operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", - "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", -} - -func (LabelSelectorRequirement) SwaggerDoc() map[string]string { - return map_LabelSelectorRequirement -} - -var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", - "timeoutSeconds": "Timeout for the list/watch call.", -} - -func (ListOptions) SwaggerDoc() map[string]string { - return map_ListOptions -} - -var map_NetworkPolicy = map[string]string{ - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", -} - -func (NetworkPolicy) SwaggerDoc() map[string]string { - return map_NetworkPolicy -} - -var map_NetworkPolicyIngressRule = map[string]string{ - "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is not provided, this rule matches all ports (traffic not restricted by port). If this field is empty, this rule matches no ports (no traffic matches). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is not provided, this rule matches all sources (traffic not restricted by source). If this field is empty, this rule matches no sources (no traffic matches). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", -} - -func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { - return map_NetworkPolicyIngressRule -} - -var map_NetworkPolicyList = map[string]string{ - "": "Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (NetworkPolicyList) SwaggerDoc() map[string]string { - return map_NetworkPolicyList -} - -var map_NetworkPolicyPeer = map[string]string{ - "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If not provided, this selector selects no pods. If present but empty, this selector selects all pods in this namespace.", - "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If omitted, this selector selects no namespaces. If present but empty, this selector selects all namespaces.", -} - -func (NetworkPolicyPeer) SwaggerDoc() map[string]string { - return map_NetworkPolicyPeer -} - -var map_NetworkPolicyPort = map[string]string{ - "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", -} - -func (NetworkPolicyPort) SwaggerDoc() map[string]string { - return map_NetworkPolicyPort -} - -var map_NetworkPolicySpec = map[string]string{ - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if namespace.networkPolicy.ingress.isolation is undefined and cluster policy allows it, OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not affect ingress isolation. If this field is present and contains at least one rule, this policy allows any traffic which matches at least one of the ingress rules in this list.", -} - -func (NetworkPolicySpec) SwaggerDoc() map[string]string { - return map_NetworkPolicySpec -} - -var map_PodSecurityPolicy = map[string]string{ - "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "Pod Security Policy List is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "Pod Security Policy Spec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", - "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", - "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - -var map_ReplicaSet = map[string]string{ - "": "ReplicaSet represents the configuration of a ReplicaSet.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status", -} - -func (ReplicaSet) SwaggerDoc() map[string]string { - return map_ReplicaSet -} - -var map_ReplicaSetList = map[string]string{ - "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md", -} - -func (ReplicaSetList) SwaggerDoc() map[string]string { - return map_ReplicaSetList -} - -var map_ReplicaSetSpec = map[string]string{ - "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#pod-template", -} - -func (ReplicaSetSpec) SwaggerDoc() map[string]string { - return map_ReplicaSetSpec -} - -var map_ReplicaSetStatus = map[string]string{ - "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/release-1.3/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", -} - -func (ReplicaSetStatus) SwaggerDoc() map[string]string { - return map_ReplicaSetStatus -} - -var map_ReplicationControllerDummy = map[string]string{ - "": "Dummy definition", -} - -func (ReplicationControllerDummy) SwaggerDoc() map[string]string { - return map_ReplicationControllerDummy -} - -var map_RollbackConfig = map[string]string{ - "revision": "The revision to rollback to. If set to 0, rollbck to the last revision.", -} - -func (RollbackConfig) SwaggerDoc() map[string]string { - return map_RollbackConfig -} - -var map_RollingUpdateDeployment = map[string]string{ - "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", -} - -func (RollingUpdateDeployment) SwaggerDoc() map[string]string { - return map_RollingUpdateDeployment -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "Run A sUser Strategy Options defines the strategy type and any options used to create the strategy.", - "rule": "Rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "Ranges are the allowed ranges of uids that may be used.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", - "rule": "type is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/release-1.3/docs/design/security_context.md#security-context", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - -var map_Scale = map[string]string{ - "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#spec-and-status. Read-only.", -} - -func (Scale) SwaggerDoc() map[string]string { - return map_Scale -} - -var map_ScaleSpec = map[string]string{ - "": "describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", -} - -func (ScaleSpec) SwaggerDoc() map[string]string { - return map_ScaleSpec -} - -var map_ScaleStatus = map[string]string{ - "": "represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://releases.k8s.io/release-1.3/docs/user-guide/labels.md#label-selectors", -} - -func (ScaleStatus) SwaggerDoc() map[string]string { - return map_ScaleStatus -} - -var map_SubresourceReference = map[string]string{ - "": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://releases.k8s.io/release-1.3/docs/user-guide/identifiers.md#names", - "apiVersion": "API version of the referent", - "subresource": "Subresource name of the referent", -} - -func (SubresourceReference) SwaggerDoc() map[string]string { - return map_SubresourceReference -} - -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "Rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - -var map_ThirdPartyResource = map[string]string{ - "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "metadata": "Standard object metadata", - "description": "Description is the description of this object.", - "versions": "Versions are versions for this third party object", -} - -func (ThirdPartyResource) SwaggerDoc() map[string]string { - return map_ThirdPartyResource -} - -var map_ThirdPartyResourceData = map[string]string{ - "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", - "metadata": "Standard object metadata.", - "data": "Data is the raw JSON data for this data.", -} - -func (ThirdPartyResourceData) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceData -} - -var map_ThirdPartyResourceDataList = map[string]string{ - "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.3/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of ThirdpartyResourceData.", -} - -func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceDataList -} - -var map_ThirdPartyResourceList = map[string]string{ - "": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "metadata": "Standard list metadata.", - "items": "Items is the list of ThirdPartyResources.", -} - -func (ThirdPartyResourceList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go deleted file mode 100644 index d0405c377..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go +++ /dev/null @@ -1,746 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "net" - "reflect" - "regexp" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - unversionedvalidation "k8s.io/kubernetes/pkg/api/unversioned/validation" - apivalidation "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/labels" - psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/validation" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -func ValidateThirdPartyResourceUpdate(update, old *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateThirdPartyResource(update)...) - return allErrs -} - -func ValidateThirdPartyResourceName(name string, prefix bool) []string { - // Make sure it's a valid DNS subdomain - if msgs := apivalidation.NameIsDNSSubdomain(name, prefix); len(msgs) != 0 { - return msgs - } - - // Make sure it's at least three segments (kind + two-segment group name) - if !prefix { - parts := strings.Split(name, ".") - if len(parts) < 3 { - return []string{"must be at least three segments long: <kind>.<domain>.<tld>"} - } - } - - return nil -} - -func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...) - - versions := sets.String{} - for ix := range obj.Versions { - version := &obj.Versions[ix] - if len(version.Name) == 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, "must not be empty")) - } else { - for _, msg := range validation.IsDNS1123Label(version.Name) { - allErrs = append(allErrs, field.Invalid(field.NewPath("versions").Index(ix).Child("name"), version, msg)) - } - } - if versions.Has(version.Name) { - allErrs = append(allErrs, field.Duplicate(field.NewPath("versions").Index(ix).Child("name"), version)) - } - versions.Insert(version.Name) - } - return allErrs -} - -// ValidateDaemonSet tests if required fields in the DaemonSet are set. -func ValidateDaemonSet(ds *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&ds.ObjectMeta, true, ValidateDaemonSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. -func ValidateDaemonSetUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDaemonSetSpec(&ds.Spec, field.NewPath("spec"))...) - return allErrs -} - -// validateDaemonSetStatus validates a DaemonSetStatus -func validateDaemonSetStatus(status *extensions.DaemonSetStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.CurrentNumberScheduled), fldPath.Child("currentNumberScheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.NumberMisscheduled), fldPath.Child("numberMisscheduled"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.DesiredNumberScheduled), fldPath.Child("desiredNumberScheduled"))...) - return allErrs -} - -// ValidateDaemonSetStatus validates tests if required fields in the DaemonSet Status section -func ValidateDaemonSetStatusUpdate(ds, oldDS *extensions.DaemonSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ds.ObjectMeta, &oldDS.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateDaemonSetStatus(&ds.Status, field.NewPath("status"))...) - return allErrs -} - -// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. -func ValidateDaemonSetSpec(spec *extensions.DaemonSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - - selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) - if err == nil && !selector.Matches(labels.Set(spec.Template.Labels)) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("template", "metadata", "labels"), spec.Template.Labels, "`selector` does not match template `labels`")) - } - if spec.Selector != nil && len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for daemonset.")) - } - - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(&spec.Template, fldPath.Child("template"))...) - // Daemons typically run on more than one node, so mark Read-Write persistent disks as invalid. - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(spec.Template.Spec.Volumes, fldPath.Child("template", "spec", "volumes"))...) - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if spec.Template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("template", "spec", "restartPolicy"), spec.Template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - return allErrs -} - -// ValidateDaemonSetName can be used to check whether the given daemon set name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateDaemonSetName = apivalidation.NameIsDNSSubdomain - -// Validates that the given name can be used as a deployment name. -var ValidateDeploymentName = apivalidation.NameIsDNSSubdomain - -func ValidatePositiveIntOrPercent(intOrPercent intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if intOrPercent.Type == intstr.String { - if !validation.IsValidPercent(intOrPercent.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath, intOrPercent, "must be an integer or percentage (e.g '5%')")) - } - } else if intOrPercent.Type == intstr.Int { - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(intOrPercent.IntValue()), fldPath)...) - } - return allErrs -} - -func getPercentValue(intOrStringValue intstr.IntOrString) (int, bool) { - if intOrStringValue.Type != intstr.String || !validation.IsValidPercent(intOrStringValue.StrVal) { - return 0, false - } - value, _ := strconv.Atoi(intOrStringValue.StrVal[:len(intOrStringValue.StrVal)-1]) - return value, true -} - -func getIntOrPercentValue(intOrStringValue intstr.IntOrString) int { - value, isPercent := getPercentValue(intOrStringValue) - if isPercent { - return value - } - return intOrStringValue.IntValue() -} - -func IsNotMoreThan100Percent(intOrStringValue intstr.IntOrString, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - value, isPercent := getPercentValue(intOrStringValue) - if !isPercent || value <= 100 { - return nil - } - allErrs = append(allErrs, field.Invalid(fldPath, intOrStringValue, "must not be greater than 100%")) - return allErrs -} - -func ValidateRollingUpdateDeployment(rollingUpdate *extensions.RollingUpdateDeployment, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - allErrs = append(allErrs, ValidatePositiveIntOrPercent(rollingUpdate.MaxSurge, fldPath.Child("maxSurge"))...) - if getIntOrPercentValue(rollingUpdate.MaxUnavailable) == 0 && getIntOrPercentValue(rollingUpdate.MaxSurge) == 0 { - // Both MaxSurge and MaxUnavailable cannot be zero. - allErrs = append(allErrs, field.Invalid(fldPath.Child("maxUnavailable"), rollingUpdate.MaxUnavailable, "may not be 0 when `maxSurge` is 0")) - } - // Validate that MaxUnavailable is not more than 100%. - allErrs = append(allErrs, IsNotMoreThan100Percent(rollingUpdate.MaxUnavailable, fldPath.Child("maxUnavailable"))...) - return allErrs -} - -func ValidateDeploymentStrategy(strategy *extensions.DeploymentStrategy, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch strategy.Type { - case extensions.RecreateDeploymentStrategyType: - if strategy.RollingUpdate != nil { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("rollingUpdate"), "may not be specified when strategy `type` is '"+string(extensions.RecreateDeploymentStrategyType+"'"))) - } - case extensions.RollingUpdateDeploymentStrategyType: - // This should never happen since it's set and checked in defaults.go - if strategy.RollingUpdate == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("rollingUpdate"), "this should be defaulted and never be nil")) - } else { - allErrs = append(allErrs, ValidateRollingUpdateDeployment(strategy.RollingUpdate, fldPath.Child("rollingUpdate"))...) - } - default: - validValues := []string{string(extensions.RecreateDeploymentStrategyType), string(extensions.RollingUpdateDeploymentStrategyType)} - allErrs = append(allErrs, field.NotSupported(fldPath, strategy, validValues)) - } - return allErrs -} - -func ValidateRollback(rollback *extensions.RollbackConfig, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - v := rollback.Revision - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(v), fldPath.Child("version"))...) - return allErrs -} - -// Validates given deployment spec. -func ValidateDeploymentSpec(spec *extensions.DeploymentSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment.")) - } - } - - selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector.")) - } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) - } - - allErrs = append(allErrs, ValidateDeploymentStrategy(&spec.Strategy, fldPath.Child("strategy"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) - if spec.RevisionHistoryLimit != nil { - // zero is a valid RevisionHistoryLimit - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*spec.RevisionHistoryLimit), fldPath.Child("revisionHistoryLimit"))...) - } - if spec.RollbackTo != nil { - allErrs = append(allErrs, ValidateRollback(spec.RollbackTo, fldPath.Child("rollback"))...) - } - return allErrs -} - -// Validates given deployment status. -func ValidateDeploymentStatus(status *extensions.DeploymentStatus, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(status.ObservedGeneration, fldPath.Child("observedGeneration"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.Replicas), fldPath.Child("replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UpdatedReplicas), fldPath.Child("updatedReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.AvailableReplicas), fldPath.Child("availableReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(status.UnavailableReplicas), fldPath.Child("unavailableReplicas"))...) - return allErrs -} - -func ValidateDeploymentUpdate(update, old *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&update.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDeploymentStatusUpdate(update, old *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentStatus(&update.Status, field.NewPath("status"))...) - return allErrs -} - -func ValidateDeployment(obj *extensions.Deployment) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, ValidateDeploymentName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateDeploymentSpec(&obj.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidateDeploymentRollback(obj *extensions.DeploymentRollback) field.ErrorList { - allErrs := apivalidation.ValidateAnnotations(obj.UpdatedAnnotations, field.NewPath("updatedAnnotations")) - if len(obj.Name) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("name"), "name is required")) - } - allErrs = append(allErrs, ValidateRollback(&obj.RollbackTo, field.NewPath("rollback"))...) - return allErrs -} - -func ValidateThirdPartyResourceDataUpdate(update, old *extensions.ThirdPartyResourceData) field.ErrorList { - return apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata")) -} - -func ValidateThirdPartyResourceData(obj *extensions.ThirdPartyResourceData) field.ErrorList { - return apivalidation.ValidateObjectMeta(&obj.ObjectMeta, true, apivalidation.NameIsDNSLabel, field.NewPath("metadata")) -} - -// ValidateIngress tests if required fields in the Ingress are set. -func ValidateIngress(ingress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&ingress.ObjectMeta, true, ValidateIngressName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateIngressName validates that the given name can be used as an Ingress name. -var ValidateIngressName = apivalidation.NameIsDNSSubdomain - -func validateIngressTLS(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: Perform a more thorough validation of spec.TLS.Hosts that takes - // the wildcard spec from RFC 6125 into account. - return allErrs -} - -// ValidateIngressSpec tests if required fields in the IngressSpec are set. -func ValidateIngressSpec(spec *extensions.IngressSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // TODO: Is a default backend mandatory? - if spec.Backend != nil { - allErrs = append(allErrs, validateIngressBackend(spec.Backend, fldPath.Child("backend"))...) - } else if len(spec.Rules) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath, spec.Rules, "either `backend` or `rules` must be specified")) - } - if len(spec.Rules) > 0 { - allErrs = append(allErrs, validateIngressRules(spec.Rules, fldPath.Child("rules"))...) - } - if len(spec.TLS) > 0 { - allErrs = append(allErrs, validateIngressTLS(spec, fldPath.Child("tls"))...) - } - return allErrs -} - -// ValidateIngressUpdate tests if required fields in the Ingress are set. -func ValidateIngressUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateIngressSpec(&ingress.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateIngressStatusUpdate tests if required fields in the Ingress are set when updating status. -func ValidateIngressStatusUpdate(ingress, oldIngress *extensions.Ingress) field.ErrorList { - allErrs := apivalidation.ValidateObjectMetaUpdate(&ingress.ObjectMeta, &oldIngress.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, apivalidation.ValidateLoadBalancerStatus(&ingress.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) - return allErrs -} - -func validateIngressRules(IngressRules []extensions.IngressRule, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(IngressRules) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - for i, ih := range IngressRules { - if len(ih.Host) > 0 { - // TODO: Ports and ips are allowed in the host part of a url - // according to RFC 3986, consider allowing them. - for _, msg := range validation.IsDNS1123Subdomain(ih.Host) { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, msg)) - } - if isIP := (net.ParseIP(ih.Host) != nil); isIP { - allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("host"), ih.Host, "must be a DNS name, not an IP address")) - } - } - allErrs = append(allErrs, validateIngressRuleValue(&ih.IngressRuleValue, fldPath.Index(0))...) - } - return allErrs -} - -func validateIngressRuleValue(ingressRule *extensions.IngressRuleValue, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ingressRule.HTTP != nil { - allErrs = append(allErrs, validateHTTPIngressRuleValue(ingressRule.HTTP, fldPath.Child("http"))...) - } - return allErrs -} - -func validateHTTPIngressRuleValue(httpIngressRuleValue *extensions.HTTPIngressRuleValue, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if len(httpIngressRuleValue.Paths) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("paths"), "")) - } - for i, rule := range httpIngressRuleValue.Paths { - if len(rule.Path) > 0 { - if !strings.HasPrefix(rule.Path, "/") { - allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be an absolute path")) - } - // TODO: More draconian path regex validation. - // Path must be a valid regex. This is the basic requirement. - // In addition to this any characters not allowed in a path per - // RFC 3986 section-3.3 cannot appear as a literal in the regex. - // Consider the example: http://host/valid?#bar, everything after - // the last '/' is a valid regex that matches valid#bar, which - // isn't a valid path, because the path terminates at the first ? - // or #. A more sophisticated form of validation would detect that - // the user is confusing url regexes with path regexes. - _, err := regexp.CompilePOSIX(rule.Path) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("paths").Index(i).Child("path"), rule.Path, "must be a valid regex")) - } - } - allErrs = append(allErrs, validateIngressBackend(&rule.Backend, fldPath.Child("backend"))...) - } - return allErrs -} - -// validateIngressBackend tests if a given backend is valid. -func validateIngressBackend(backend *extensions.IngressBackend, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - // All backends must reference a single local service by name, and a single service port by name or number. - if len(backend.ServiceName) == 0 { - return append(allErrs, field.Required(fldPath.Child("serviceName"), "")) - } else { - for _, msg := range apivalidation.ValidateServiceName(backend.ServiceName, false) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceName"), backend.ServiceName, msg)) - } - } - if backend.ServicePort.Type == intstr.String { - for _, msg := range validation.IsDNS1123Label(backend.ServicePort.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, msg)) - } - if !validation.IsValidPortName(backend.ServicePort.StrVal) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort.StrVal, apivalidation.PortNameErrorMsg)) - } - } else if !validation.IsValidPortNum(backend.ServicePort.IntValue()) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("servicePort"), backend.ServicePort, apivalidation.PortRangeErrorMsg)) - } - return allErrs -} - -func ValidateScale(scale *extensions.Scale) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&scale.ObjectMeta, true, apivalidation.NameIsDNSSubdomain, field.NewPath("metadata"))...) - - if scale.Spec.Replicas < 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "replicas"), scale.Spec.Replicas, "must be greater than or equal to 0")) - } - - return allErrs -} - -// ValidateReplicaSetName can be used to check whether the given ReplicaSet -// name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidateReplicaSetName = apivalidation.NameIsDNSSubdomain - -// ValidateReplicaSet tests if required fields in the ReplicaSet are set. -func ValidateReplicaSet(rs *extensions.ReplicaSet) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&rs.ObjectMeta, true, ValidateReplicaSetName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicaSetUpdate tests if required fields in the ReplicaSet are set. -func ValidateReplicaSetUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidateReplicaSetSpec(&rs.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateReplicaSetStatusUpdate tests if required fields in the ReplicaSet are set. -func ValidateReplicaSetStatusUpdate(rs, oldRs *extensions.ReplicaSet) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&rs.ObjectMeta, &oldRs.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.Replicas), field.NewPath("status", "replicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.FullyLabeledReplicas), field.NewPath("status", "fullyLabeledReplicas"))...) - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(rs.Status.ObservedGeneration), field.NewPath("status", "observedGeneration"))...) - return allErrs -} - -// ValidateReplicaSetSpec tests if required fields in the ReplicaSet spec are set. -func ValidateReplicaSetSpec(spec *extensions.ReplicaSetSpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) - - if spec.Selector == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("selector"), "")) - } else { - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) - if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "empty selector is not valid for deployment.")) - } - } - - selector, err := unversioned.LabelSelectorAsSelector(spec.Selector) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("selector"), spec.Selector, "invalid label selector.")) - } else { - allErrs = append(allErrs, ValidatePodTemplateSpecForReplicaSet(&spec.Template, selector, spec.Replicas, fldPath.Child("template"))...) - } - return allErrs -} - -// Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForReplicaSet(template *api.PodTemplateSpec, selector labels.Selector, replicas int32, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if template == nil { - allErrs = append(allErrs, field.Required(fldPath, "")) - } else { - if !selector.Empty() { - // Verify that the ReplicaSet selector matches the labels in template. - labels := labels.Set(template.Labels) - if !selector.Matches(labels) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) - } - } - allErrs = append(allErrs, apivalidation.ValidatePodTemplateSpec(template, fldPath)...) - if replicas > 1 { - allErrs = append(allErrs, apivalidation.ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) - } - // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) - } - } - return allErrs -} - -// ValidatePodSecurityPolicyName can be used to check whether the given -// pod security policy name is valid. -// Prefix indicates this name will be used as part of generation, in which case -// trailing dashes are allowed. -var ValidatePodSecurityPolicyName = apivalidation.NameIsDNSSubdomain - -func ValidatePodSecurityPolicy(psp *extensions.PodSecurityPolicy) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, field.NewPath("spec"))...) - return allErrs -} - -func ValidatePodSecurityPolicySpec(spec *extensions.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - - allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...) - allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...) - allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...) - allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...) - allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...) - allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.DefaultAddCapabilities, field.NewPath("defaultAddCapabilities"))...) - allErrs = append(allErrs, validatePSPCapsAgainstDrops(spec.RequiredDropCapabilities, spec.AllowedCapabilities, field.NewPath("allowedCapabilities"))...) - - return allErrs -} - -// validatePSPSELinux validates the SELinux fields of PodSecurityPolicy. -func validatePSPSELinux(fldPath *field.Path, seLinux *extensions.SELinuxStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - // ensure the selinux strategy has a valid rule - supportedSELinuxRules := sets.NewString(string(extensions.SELinuxStrategyMustRunAs), - string(extensions.SELinuxStrategyRunAsAny)) - if !supportedSELinuxRules.Has(string(seLinux.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), seLinux.Rule, supportedSELinuxRules.List())) - } - - return allErrs -} - -// validatePSPRunAsUser validates the RunAsUser fields of PodSecurityPolicy. -func validatePSPRunAsUser(fldPath *field.Path, runAsUser *extensions.RunAsUserStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - // ensure the user strategy has a valid rule - supportedRunAsUserRules := sets.NewString(string(extensions.RunAsUserStrategyMustRunAs), - string(extensions.RunAsUserStrategyMustRunAsNonRoot), - string(extensions.RunAsUserStrategyRunAsAny)) - if !supportedRunAsUserRules.Has(string(runAsUser.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), runAsUser.Rule, supportedRunAsUserRules.List())) - } - - // validate range settings - for idx, rng := range runAsUser.Ranges { - allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) - } - - return allErrs -} - -// validatePSPFSGroup validates the FSGroupStrategyOptions fields of the PodSecurityPolicy. -func validatePSPFSGroup(fldPath *field.Path, groupOptions *extensions.FSGroupStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - supportedRules := sets.NewString( - string(extensions.FSGroupStrategyMustRunAs), - string(extensions.FSGroupStrategyRunAsAny), - ) - if !supportedRules.Has(string(groupOptions.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) - } - - for idx, rng := range groupOptions.Ranges { - allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) - } - return allErrs -} - -// validatePSPSupplementalGroup validates the SupplementalGroupsStrategyOptions fields of the PodSecurityPolicy. -func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *extensions.SupplementalGroupsStrategyOptions) field.ErrorList { - allErrs := field.ErrorList{} - - supportedRules := sets.NewString( - string(extensions.SupplementalGroupsStrategyRunAsAny), - string(extensions.SupplementalGroupsStrategyMustRunAs), - ) - if !supportedRules.Has(string(groupOptions.Rule)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("rule"), groupOptions.Rule, supportedRules.List())) - } - - for idx, rng := range groupOptions.Ranges { - allErrs = append(allErrs, validateIDRanges(fldPath.Child("ranges").Index(idx), rng)...) - } - return allErrs -} - -// validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy. -func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []extensions.FSType) field.ErrorList { - allErrs := field.ErrorList{} - allowed := psputil.GetAllFSTypesAsSet() - // add in the * value since that is a pseudo type that is not included by default - allowed.Insert(string(extensions.All)) - for _, v := range volumes { - if !allowed.Has(string(v)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List())) - } - } - - return allErrs -} - -// validateIDRanges ensures the range is valid. -func validateIDRanges(fldPath *field.Path, rng extensions.IDRange) field.ErrorList { - allErrs := field.ErrorList{} - - // if 0 <= Min <= Max then we do not need to validate max. It is always greater than or - // equal to 0 and Min. - if rng.Min < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be negative")) - } - if rng.Max < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("max"), rng.Max, "max cannot be negative")) - } - if rng.Min > rng.Max { - allErrs = append(allErrs, field.Invalid(fldPath.Child("min"), rng.Min, "min cannot be greater than max")) - } - - return allErrs -} - -// validatePSPCapsAgainstDrops ensures an allowed cap is not listed in the required drops. -func validatePSPCapsAgainstDrops(requiredDrops []api.Capability, capsToCheck []api.Capability, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if requiredDrops == nil { - return allErrs - } - for _, cap := range capsToCheck { - if hasCap(cap, requiredDrops) { - allErrs = append(allErrs, field.Invalid(fldPath, cap, - fmt.Sprintf("capability is listed in %s and requiredDropCapabilities", fldPath.String()))) - } - } - return allErrs -} - -// hasCap checks for needle in haystack. -func hasCap(needle api.Capability, haystack []api.Capability) bool { - for _, c := range haystack { - if needle == c { - return true - } - } - return false -} - -// ValidatePodSecurityPolicyUpdate validates a PSP for updates. -func ValidatePodSecurityPolicyUpdate(old *extensions.PodSecurityPolicy, new *extensions.PodSecurityPolicy) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&old.ObjectMeta, &new.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateNetworkPolicyName can be used to check whether the given networkpolicy -// name is valid. -func ValidateNetworkPolicyName(name string, prefix bool) []string { - return apivalidation.NameIsDNSSubdomain(name, prefix) -} - -// ValidateNetworkPolicySpec tests if required fields in the networkpolicy spec are set. -func ValidateNetworkPolicySpec(spec *extensions.NetworkPolicySpec, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(&spec.PodSelector, fldPath.Child("podSelector"))...) - - // Validate ingress rules. - for _, i := range spec.Ingress { - // TODO: Update From to be a pointer to slice as soon as auto-generation supports it. - for _, f := range i.From { - numFroms := 0 - if f.PodSelector != nil { - numFroms++ - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.PodSelector, fldPath.Child("podSelector"))...) - } - if f.NamespaceSelector != nil { - if numFroms > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath, "may not specify more than 1 from type")) - } else { - numFroms++ - allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(f.NamespaceSelector, fldPath.Child("namespaces"))...) - } - } - - if numFroms == 0 { - // At least one of PodSelector and NamespaceSelector must be defined. - allErrs = append(allErrs, field.Required(fldPath, "must specify a from type")) - } - } - } - return allErrs -} - -// ValidateNetworkPolicy validates a networkpolicy. -func ValidateNetworkPolicy(np *extensions.NetworkPolicy) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&np.ObjectMeta, true, ValidateNetworkPolicyName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateNetworkPolicySpec(&np.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateNetworkPolicyUpdate tests if an update to a NetworkPolicy is valid. -func ValidateNetworkPolicyUpdate(update, old *extensions.NetworkPolicy) field.ErrorList { - allErrs := field.ErrorList{} - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - if !reflect.DeepEqual(update.Spec, old.Spec) { - allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "updates to networkpolicy spec are forbidden.")) - } - return allErrs -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go deleted file mode 100644 index 390e4b4a7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/deep_copy_generated.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package policy - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - intstr "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_policy_PodDisruptionBudget, - DeepCopy_policy_PodDisruptionBudgetList, - DeepCopy_policy_PodDisruptionBudgetSpec, - DeepCopy_policy_PodDisruptionBudgetStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_policy_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_policy_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_policy_PodDisruptionBudgetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_policy_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PodDisruptionBudget, len(in)) - for i := range in { - if err := DeepCopy_policy_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_policy_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error { - if err := intstr.DeepCopy_intstr_IntOrString(in.MinAvailable, &out.MinAvailable, c); err != nil { - return err - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - return nil -} - -func DeepCopy_policy_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error { - out.PodDisruptionAllowed = in.PodDisruptionAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go deleted file mode 100644 index 7882a0c53..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/install/install.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the experimental API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/policy/v1alpha1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/policy" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", policy.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1alpha1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(policy.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - policy.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1alpha1.SchemeGroupVersion: - v1alpha1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go deleted file mode 100644 index 76ea1a552..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package policy - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// GroupName is the group name use in this package -const GroupName = "policy" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - // TODO this gets cleaned up when the types are fixed - scheme.AddKnownTypes(SchemeGroupVersion, - &PodDisruptionBudget{}, - &PodDisruptionBudgetList{}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go deleted file mode 100644 index 08be370f1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.generated.go +++ /dev/null @@ -1,1440 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package policy - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg3_api "k8s.io/kubernetes/pkg/api" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg4_types "k8s.io/kubernetes/pkg/types" - pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg3_api.ObjectMeta - var v1 pkg2_unversioned.LabelSelector - var v2 pkg4_types.UID - var v3 pkg1_intstr.IntOrString - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 - } -} - -func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.MinAvailable - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.MinAvailable - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "minAvailable": - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv4 := &x.MinAvailable - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv9 := &x.MinAvailable - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.PodDisruptionAllowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("disruptionAllowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.PodDisruptionAllowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "disruptionAllowed": - if r.TryDecodeAsNil() { - x.PodDisruptionAllowed = false - } else { - x.PodDisruptionAllowed = bool(r.DecodeBool()) - } - case "currentHealthy": - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) - } - case "desiredHealthy": - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) - } - case "expectedPods": - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - x.ExpectedPods = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodDisruptionAllowed = false - } else { - x.PodDisruptionAllowed = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - x.ExpectedPods = int32(r.DecodeInt(32)) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_api.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_api.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodDisruptionBudget{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodDisruptionBudget, yyrl1) - } - } else { - yyv1 = make([]PodDisruptionBudget, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodDisruptionBudget{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodDisruptionBudget{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go deleted file mode 100644 index 2ecf41bcf..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/types.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package policy - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -type PodDisruptionBudgetSpec struct { - // The minimum number of pods that must be available simultaneously. This - // can be either an integer or a string specifying a percentage, e.g. "28%". - MinAvailable intstr.IntOrString `json:"minAvailable,omitempty"` - - // Label query over pods whose evictions are managed by the disruption - // budget. - Selector *unversioned.LabelSelector `json:"selector,omitempty"` -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -type PodDisruptionBudgetStatus struct { - // Whether or not a disruption is currently allowed. - PodDisruptionAllowed bool `json:"disruptionAllowed"` - - // current number of healthy pods - CurrentHealthy int32 `json:"currentHealthy"` - - // minimum desired number of healthy pods - DesiredHealthy int32 `json:"desiredHealthy"` - - // total number of pods counted by this disruption budget - ExpectedPods int32 `json:"expectedPods"` -} - -// +genclient=true,noMethods=true - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -type PodDisruptionBudget struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // Specification of the desired behavior of the PodDisruptionBudget. - Spec PodDisruptionBudgetSpec `json:"spec,omitempty"` - // Most recently observed status of the PodDisruptionBudget. - Status PodDisruptionBudgetStatus `json:"status,omitempty"` -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -type PodDisruptionBudgetList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty"` - Items []PodDisruptionBudget `json:"items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go deleted file mode 100644 index 23aaa9a37..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/conversion_generated.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - policy "k8s.io/kubernetes/pkg/apis/policy" - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget, - Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget, - Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList, - Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList, - Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec, - Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec, - Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus, - Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in *PodDisruptionBudget, out *policy.PodDisruptionBudget, s conversion.Scope) error { - return autoConvert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in *policy.PodDisruptionBudget, out *PodDisruptionBudget, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(in, out, s) -} - -func autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]policy.PodDisruptionBudget, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PodDisruptionBudget_To_policy_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in *PodDisruptionBudgetList, out *policy.PodDisruptionBudgetList, s conversion.Scope) error { - return autoConvert_v1alpha1_PodDisruptionBudgetList_To_policy_PodDisruptionBudgetList(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodDisruptionBudget, len(*in)) - for i := range *in { - if err := Convert_policy_PodDisruptionBudget_To_v1alpha1_PodDisruptionBudget(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in *policy.PodDisruptionBudgetList, out *PodDisruptionBudgetList, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudgetList_To_v1alpha1_PodDisruptionBudgetList(in, out, s) -} - -func autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil { - return err - } - out.Selector = in.Selector - return nil -} - -func Convert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in *PodDisruptionBudgetSpec, out *policy.PodDisruptionBudgetSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_PodDisruptionBudgetSpec_To_policy_PodDisruptionBudgetSpec(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { - if err := api.Convert_intstr_IntOrString_To_intstr_IntOrString(&in.MinAvailable, &out.MinAvailable, s); err != nil { - return err - } - out.Selector = in.Selector - return nil -} - -func Convert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in *policy.PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudgetSpec_To_v1alpha1_PodDisruptionBudgetSpec(in, out, s) -} - -func autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { - out.PodDisruptionAllowed = in.PodDisruptionAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods - return nil -} - -func Convert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in *PodDisruptionBudgetStatus, out *policy.PodDisruptionBudgetStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudgetStatus(in, out, s) -} - -func autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { - out.PodDisruptionAllowed = in.PodDisruptionAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods - return nil -} - -func Convert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in *policy.PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, s conversion.Scope) error { - return autoConvert_policy_PodDisruptionBudgetStatus_To_v1alpha1_PodDisruptionBudgetStatus(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go deleted file mode 100644 index 74680aff8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/deep_copy_generated.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - intstr "k8s.io/kubernetes/pkg/util/intstr" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1alpha1_PodDisruptionBudget, - DeepCopy_v1alpha1_PodDisruptionBudgetList, - DeepCopy_v1alpha1_PodDisruptionBudgetSpec, - DeepCopy_v1alpha1_PodDisruptionBudgetStatus, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1alpha1_PodDisruptionBudget(in PodDisruptionBudget, out *PodDisruptionBudget, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in.Status, &out.Status, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1alpha1_PodDisruptionBudgetList(in PodDisruptionBudgetList, out *PodDisruptionBudgetList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]PodDisruptionBudget, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_PodDisruptionBudget(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1alpha1_PodDisruptionBudgetSpec(in PodDisruptionBudgetSpec, out *PodDisruptionBudgetSpec, c *conversion.Cloner) error { - if err := intstr.DeepCopy_intstr_IntOrString(in.MinAvailable, &out.MinAvailable, c); err != nil { - return err - } - if in.Selector != nil { - in, out := in.Selector, &out.Selector - *out = new(unversioned.LabelSelector) - if err := unversioned.DeepCopy_unversioned_LabelSelector(*in, *out, c); err != nil { - return err - } - } else { - out.Selector = nil - } - return nil -} - -func DeepCopy_v1alpha1_PodDisruptionBudgetStatus(in PodDisruptionBudgetStatus, out *PodDisruptionBudgetStatus, c *conversion.Cloner) error { - out.PodDisruptionAllowed = in.PodDisruptionAllowed - out.CurrentHealthy = in.CurrentHealthy - out.DesiredHealthy = in.DesiredHealthy - out.ExpectedPods = in.ExpectedPods - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go deleted file mode 100644 index 5cb716c29..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, -// NetworkPolicy, etc. -// +genconversion=true -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go deleted file mode 100644 index 867a6b0a6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.pb.go +++ /dev/null @@ -1,903 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto - - It has these top-level messages: - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (m *PodDisruptionBudget) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudget) ProtoMessage() {} - -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (m *PodDisruptionBudgetList) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudgetList) ProtoMessage() {} - -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (m *PodDisruptionBudgetSpec) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} - -func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } -func (m *PodDisruptionBudgetStatus) String() string { return proto.CompactTextString(m) } -func (*PodDisruptionBudgetStatus) ProtoMessage() {} - -func init() { - proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudget") - proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetList") - proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetSpec") - proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.kubernetes.pkg.apis.policy.v1alpha1.PodDisruptionBudgetStatus") -} -func (m *PodDisruptionBudget) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudget) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *PodDisruptionBudgetList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudgetList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodDisruptionBudgetSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudgetSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.MinAvailable.Size())) - n5, err := m.MinAvailable.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.Selector != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Selector.Size())) - n6, err := m.Selector.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *PodDisruptionBudgetStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - if m.PodDisruptionAllowed { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.CurrentHealthy)) - data[i] = 0x18 - i++ - i = encodeVarintGenerated(data, i, uint64(m.DesiredHealthy)) - data[i] = 0x20 - i++ - i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods)) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *PodDisruptionBudget) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - var l int - _ = l - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - var l int - _ = l - n += 2 - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PodDisruptionBudget) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.MinAvailable.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_kubernetes_pkg_api_unversioned.LabelSelector{} - } - if err := m.Selector.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionAllowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PodDisruptionAllowed = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - m.ExpectedPods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto deleted file mode 100644 index 866d0ae57..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/generated.proto +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.policy.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -message PodDisruptionBudget { - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the PodDisruptionBudget. - optional PodDisruptionBudgetSpec spec = 2; - - // Most recently observed status of the PodDisruptionBudget. - optional PodDisruptionBudgetStatus status = 3; -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -message PodDisruptionBudgetList { - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - repeated PodDisruptionBudget items = 2; -} - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -message PodDisruptionBudgetSpec { - // The minimum number of pods that must be available simultaneously. This - // can be either an integer or a string specifying a percentage, e.g. "28%". - optional k8s.io.kubernetes.pkg.util.intstr.IntOrString minAvailable = 1; - - // Label query over pods whose evictions are managed by the disruption - // budget. - optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -message PodDisruptionBudgetStatus { - // Whether or not a disruption is currently allowed. - optional bool disruptionAllowed = 1; - - // current number of healthy pods - optional int32 currentHealthy = 2; - - // minimum desired number of healthy pods - optional int32 desiredHealthy = 3; - - // total number of pods counted by this disruption budget - optional int32 expectedPods = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go deleted file mode 100644 index a6a94d96d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - versionedwatch "k8s.io/kubernetes/pkg/watch/versioned" -) - -// GroupName is the group name use in this package -const GroupName = "policy" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) - /* - addDefaultingFuncs(scheme) - addConversionFuncs(scheme) - */ -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &PodDisruptionBudget{}, - &PodDisruptionBudgetList{}, - &v1.ListOptions{}, - &v1.DeleteOptions{}, - ) - // Add the watch version that applies - versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go deleted file mode 100644 index 7ed4308bc..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.generated.go +++ /dev/null @@ -1,1440 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg4_types "k8s.io/kubernetes/pkg/types" - pkg1_intstr "k8s.io/kubernetes/pkg/util/intstr" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.LabelSelector - var v1 pkg3_v1.ObjectMeta - var v2 pkg4_types.UID - var v3 pkg1_intstr.IntOrString - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 - } -} - -func (x *PodDisruptionBudgetSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = x.Selector != nil - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.MinAvailable - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else if !yym5 && z.IsJSONHandle() { - z.EncJSONMarshal(yy4) - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minAvailable")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.MinAvailable - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else if !yym7 && z.IsJSONHandle() { - z.EncJSONMarshal(yy6) - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - if x.Selector == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("selector")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Selector == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(x.Selector) { - } else { - z.EncFallback(x.Selector) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "minAvailable": - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv4 := &x.MinAvailable - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else if !yym5 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv4) - } else { - z.DecFallback(yyv4, false) - } - } - case "selector": - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinAvailable = pkg1_intstr.IntOrString{} - } else { - yyv9 := &x.MinAvailable - yym10 := z.DecBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.DecExt(yyv9) { - } else if !yym10 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv9) - } else { - z.DecFallback(yyv9, false) - } - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.Selector != nil { - x.Selector = nil - } - } else { - if x.Selector == nil { - x.Selector = new(pkg2_unversioned.LabelSelector) - } - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(x.Selector) { - } else { - z.DecFallback(x.Selector, false) - } - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 4 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeBool(bool(x.PodDisruptionAllowed)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("disruptionAllowed")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeBool(bool(x.PodDisruptionAllowed)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("currentHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeInt(int64(x.CurrentHealthy)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("desiredHealthy")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeInt(int64(x.DesiredHealthy)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("expectedPods")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeInt(int64(x.ExpectedPods)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "disruptionAllowed": - if r.TryDecodeAsNil() { - x.PodDisruptionAllowed = false - } else { - x.PodDisruptionAllowed = bool(r.DecodeBool()) - } - case "currentHealthy": - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) - } - case "desiredHealthy": - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) - } - case "expectedPods": - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - x.ExpectedPods = int32(r.DecodeInt(32)) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.PodDisruptionAllowed = false - } else { - x.PodDisruptionAllowed = bool(r.DecodeBool()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.CurrentHealthy = 0 - } else { - x.CurrentHealthy = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.DesiredHealthy = 0 - } else { - x.DesiredHealthy = int32(r.DecodeInt(32)) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ExpectedPods = 0 - } else { - x.ExpectedPods = int32(r.DecodeInt(32)) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudget) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[1] = true - yyq2[2] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 0 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy9 := &x.Spec - yy9.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy11 := &x.Spec - yy11.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yy14 := &x.Status - yy14.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy16 := &x.Status - yy16.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym23 := z.EncBinary() - _ = yym23 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudget) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv5 := &x.Spec - yyv5.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv6 := &x.Status - yyv6.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudget) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = PodDisruptionBudgetSpec{} - } else { - yyv11 := &x.Spec - yyv11.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = PodDisruptionBudgetStatus{} - } else { - yyv12 := &x.Status - yyv12.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *PodDisruptionBudgetList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePodDisruptionBudget(([]PodDisruptionBudget)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PodDisruptionBudgetList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PodDisruptionBudgetList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSlicePodDisruptionBudget((*[]PodDisruptionBudget)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicePodDisruptionBudget(v []PodDisruptionBudget, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePodDisruptionBudget(v *[]PodDisruptionBudget, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PodDisruptionBudget{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PodDisruptionBudget, yyrl1) - } - } else { - yyv1 = make([]PodDisruptionBudget, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PodDisruptionBudget{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PodDisruptionBudget{}) // var yyz1 PodDisruptionBudget - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PodDisruptionBudget{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PodDisruptionBudget{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go deleted file mode 100644 index 1f3265ae2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -type PodDisruptionBudgetSpec struct { - // The minimum number of pods that must be available simultaneously. This - // can be either an integer or a string specifying a percentage, e.g. "28%". - MinAvailable intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` - - // Label query over pods whose evictions are managed by the disruption - // budget. - Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -type PodDisruptionBudgetStatus struct { - // Whether or not a disruption is currently allowed. - PodDisruptionAllowed bool `json:"disruptionAllowed" protobuf:"varint,1,opt,name=disruptionAllowed"` - - // current number of healthy pods - CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,2,opt,name=currentHealthy"` - - // minimum desired number of healthy pods - DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,3,opt,name=desiredHealthy"` - - // total number of pods counted by this disruption budget - ExpectedPods int32 `json:"expectedPods" protobuf:"varint,4,opt,name=expectedPods"` -} - -// +genclient=true,noMethods=true - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -type PodDisruptionBudget struct { - unversioned.TypeMeta `json:",inline"` - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the PodDisruptionBudget. - Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Most recently observed status of the PodDisruptionBudget. - Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -type PodDisruptionBudgetList struct { - unversioned.TypeMeta `json:",inline"` - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 8ca1782f4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/policy/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_PodDisruptionBudget = map[string]string{ - "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", - "spec": "Specification of the desired behavior of the PodDisruptionBudget.", - "status": "Most recently observed status of the PodDisruptionBudget.", -} - -func (PodDisruptionBudget) SwaggerDoc() map[string]string { - return map_PodDisruptionBudget -} - -var map_PodDisruptionBudgetList = map[string]string{ - "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", -} - -func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetList -} - -var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "The minimum number of pods that must be available simultaneously. This can be either an integer or a string specifying a percentage, e.g. \"28%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget.", -} - -func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetSpec -} - -var map_PodDisruptionBudgetStatus = map[string]string{ - "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "disruptionAllowed": "Whether or not a disruption is currently allowed.", - "currentHealthy": "current number of healthy pods", - "desiredHealthy": "minimum desired number of healthy pods", - "expectedPods": "total number of pods counted by this disruption budget", -} - -func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go deleted file mode 100644 index 5e9339a98..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/deep_copy_generated.go +++ /dev/null @@ -1,274 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package rbac - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_rbac_ClusterRole, - DeepCopy_rbac_ClusterRoleBinding, - DeepCopy_rbac_ClusterRoleBindingList, - DeepCopy_rbac_ClusterRoleList, - DeepCopy_rbac_PolicyRule, - DeepCopy_rbac_Role, - DeepCopy_rbac_RoleBinding, - DeepCopy_rbac_RoleBindingList, - DeepCopy_rbac_RoleList, - DeepCopy_rbac_Subject, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_rbac_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Rules != nil { - in, out := in.Rules, &out.Rules - *out = make([]PolicyRule, len(in)) - for i := range in { - if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func DeepCopy_rbac_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Subjects != nil { - in, out := in.Subjects, &out.Subjects - *out = make([]Subject, len(in)) - for i := range in { - if err := DeepCopy_rbac_Subject(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { - return err - } - return nil -} - -func DeepCopy_rbac_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(in)) - for i := range in { - if err := DeepCopy_rbac_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_rbac_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ClusterRole, len(in)) - for i := range in { - if err := DeepCopy_rbac_ClusterRole(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_rbac_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error { - if in.Verbs != nil { - in, out := in.Verbs, &out.Verbs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Verbs = nil - } - if in.AttributeRestrictions == nil { - out.AttributeRestrictions = nil - } else if newVal, err := c.DeepCopy(in.AttributeRestrictions); err != nil { - return err - } else { - out.AttributeRestrictions = newVal.(runtime.Object) - } - if in.APIGroups != nil { - in, out := in.APIGroups, &out.APIGroups - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.APIGroups = nil - } - if in.Resources != nil { - in, out := in.Resources, &out.Resources - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Resources = nil - } - if in.ResourceNames != nil { - in, out := in.ResourceNames, &out.ResourceNames - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.ResourceNames = nil - } - if in.NonResourceURLs != nil { - in, out := in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.NonResourceURLs = nil - } - return nil -} - -func DeepCopy_rbac_Role(in Role, out *Role, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Rules != nil { - in, out := in.Rules, &out.Rules - *out = make([]PolicyRule, len(in)) - for i := range in { - if err := DeepCopy_rbac_PolicyRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func DeepCopy_rbac_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := api.DeepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Subjects != nil { - in, out := in.Subjects, &out.Subjects - *out = make([]Subject, len(in)) - for i := range in { - if err := DeepCopy_rbac_Subject(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - if err := api.DeepCopy_api_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { - return err - } - return nil -} - -func DeepCopy_rbac_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]RoleBinding, len(in)) - for i := range in { - if err := DeepCopy_rbac_RoleBinding(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_rbac_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Role, len(in)) - for i := range in { - if err := DeepCopy_rbac_Role(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_rbac_Subject(in Subject, out *Subject, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go deleted file mode 100644 index 15f91da2c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=rbac.authorization.k8s.io -package rbac diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go deleted file mode 100644 index 8cac247f4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/install/install.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the batch API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" -) - -const importPrefix = "k8s.io/kubernetes/pkg/apis/rbac" - -var accessor = meta.NewAccessor() - -// availableVersions lists all known external versions for this group from most preferred to least preferred -var availableVersions = []unversioned.GroupVersion{v1alpha1.SchemeGroupVersion} - -func init() { - registered.RegisterVersions(availableVersions) - externalVersions := []unversioned.GroupVersion{} - for _, v := range availableVersions { - if registered.IsAllowedVersion(v) { - externalVersions = append(externalVersions, v) - } - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", rbac.GroupName) - return - } - - if err := registered.EnableVersions(externalVersions...); err != nil { - glog.V(4).Infof("%v", err) - return - } - if err := enableVersions(externalVersions); err != nil { - glog.V(4).Infof("%v", err) - return - } -} - -// TODO: enableVersions should be centralized rather than spread in each API -// group. -// We can combine registered.RegisterVersions, registered.EnableVersions and -// registered.RegisterGroup once we have moved enableVersions there. -func enableVersions(externalVersions []unversioned.GroupVersion) error { - addVersionsToScheme(externalVersions...) - preferredExternalVersion := externalVersions[0] - - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - RESTMapper: newRESTMapper(externalVersions), - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: interfacesFor, - } - - if err := registered.RegisterGroup(groupMeta); err != nil { - return err - } - api.RegisterRESTMapper(groupMeta.RESTMapper) - return nil -} - -func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper { - rootScoped := sets.NewString( - "ClusterRole", - "ClusterRoleBinding", - ) - - ignoredKinds := sets.NewString() - - return api.NewDefaultRESTMapper(externalVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped) -} - -// interfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - switch version { - case v1alpha1.SchemeGroupVersion: - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: accessor, - }, nil - default: - g, _ := registered.Group(rbac.GroupName) - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) - } -} - -func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { - // add the internal version to Scheme - rbac.AddToScheme(api.Scheme) - // add the enabled external versions to Scheme - for _, v := range externalVersions { - if !registered.IsEnabledVersion(v) { - glog.Errorf("Version %s is not enabled, so it will not be added to the Scheme.", v) - continue - } - switch v { - case v1alpha1.SchemeGroupVersion: - v1alpha1.AddToScheme(api.Scheme) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go deleted file mode 100644 index 58464d74d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/register.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -const GroupName = "rbac.authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) unversioned.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns back a Group qualified GroupResource -func Resource(resource string) unversioned.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func AddToScheme(scheme *runtime.Scheme) { - // Add the API to Scheme. - addKnownTypes(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Role{}, - &RoleBinding{}, - &RoleBindingList{}, - &RoleList{}, - - &ClusterRole{}, - &ClusterRoleBinding{}, - &ClusterRoleBindingList{}, - &ClusterRoleList{}, - - &api.ListOptions{}, - &api.DeleteOptions{}, - &api.ExportOptions{}, - ) - versioned.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go deleted file mode 100644 index a35eb7db9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/types.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rbac - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -const ( - APIGroupAll = "*" - ResourceAll = "*" - VerbAll = "*" - NonResourceAll = "*" - - GroupKind = "Group" - ServiceAccountKind = "ServiceAccount" - UserKind = "User" - - UserAll = "*" -) - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - Verbs []string - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - AttributeRestrictions runtime.Object - // APIGroups is the name of the APIGroup that contains the resources. - // If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. - APIGroups []string - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - Resources []string - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - ResourceNames []string - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // If an action is not a resource API request, then the URL is split on '/' and is checked against the NonResourceURLs to look for a match. - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - NonResourceURLs []string -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -type Subject struct { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - Kind string - // APIVersion holds the API group and version of the referenced object. For non-object references such as "Group" and "User" this is - // expected to be API version of this API group. For example "rbac/v1alpha1". - APIVersion string - // Name of the object being referenced. - Name string - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - Namespace string -} - -// +genclient=true - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -type Role struct { - unversioned.TypeMeta - // Standard object's metadata. - api.ObjectMeta - - // Rules holds all the PolicyRules for this Role - Rules []PolicyRule -} - -// +genclient=true - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -type RoleBinding struct { - unversioned.TypeMeta - api.ObjectMeta - - // Subjects holds references to the objects the role applies to. - Subjects []Subject - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef api.ObjectReference -} - -// RoleBindingList is a collection of RoleBindings -type RoleBindingList struct { - unversioned.TypeMeta - // Standard object's metadata. - unversioned.ListMeta - - // Items is a list of roleBindings - Items []RoleBinding -} - -// RoleList is a collection of Roles -type RoleList struct { - unversioned.TypeMeta - // Standard object's metadata. - unversioned.ListMeta - - // Items is a list of roles - Items []Role -} - -// +genclient=true,nonNamespaced=true - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -type ClusterRole struct { - unversioned.TypeMeta - // Standard object's metadata. - api.ObjectMeta - - // Rules holds all the PolicyRules for this ClusterRole - Rules []PolicyRule -} - -// +genclient=true,nonNamespaced=true - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -type ClusterRoleBinding struct { - unversioned.TypeMeta - // Standard object's metadata. - api.ObjectMeta - - // Subjects holds references to the objects the role applies to. - Subjects []Subject - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef api.ObjectReference -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -type ClusterRoleBindingList struct { - unversioned.TypeMeta - // Standard object's metadata. - unversioned.ListMeta - - // Items is a list of ClusterRoleBindings - Items []ClusterRoleBinding -} - -// ClusterRoleList is a collection of ClusterRoles -type ClusterRoleList struct { - unversioned.TypeMeta - // Standard object's metadata. - unversioned.ListMeta - - // Items is a list of ClusterRoles - Items []ClusterRole -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go deleted file mode 100644 index f176aa090..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/conversion_generated.go +++ /dev/null @@ -1,536 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" -) - -func init() { - if err := api.Scheme.AddGeneratedConversionFuncs( - Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole, - Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole, - Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding, - Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding, - Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList, - Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList, - Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList, - Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList, - Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule, - Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule, - Convert_v1alpha1_Role_To_rbac_Role, - Convert_rbac_Role_To_v1alpha1_Role, - Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding, - Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding, - Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList, - Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList, - Convert_v1alpha1_RoleList_To_rbac_RoleList, - Convert_rbac_RoleList_To_v1alpha1_RoleList, - Convert_v1alpha1_Subject_To_rbac_Subject, - Convert_rbac_Subject_To_v1alpha1_Subject, - ); err != nil { - // if one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} - -func autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]rbac.PolicyRule, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in *ClusterRole, out *rbac.ClusterRole, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRole_To_rbac_ClusterRole(in, out, s) -} - -func autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in *rbac.ClusterRole, out *ClusterRole, s conversion.Scope) error { - return autoConvert_rbac_ClusterRole_To_v1alpha1_ClusterRole(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]rbac.Subject, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in *ClusterRoleBinding, out *rbac.ClusterRoleBinding, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - for i := range *in { - if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { - return err - } - return nil -} - -func Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in *rbac.ClusterRoleBinding, out *ClusterRoleBinding, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.ClusterRoleBinding, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_ClusterRoleBinding_To_rbac_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in *ClusterRoleBindingList, out *rbac.ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleBindingList_To_rbac_ClusterRoleBindingList(in, out, s) -} - -func autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - if err := Convert_rbac_ClusterRoleBinding_To_v1alpha1_ClusterRoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in *rbac.ClusterRoleBindingList, out *ClusterRoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleBindingList_To_v1alpha1_ClusterRoleBindingList(in, out, s) -} - -func autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.ClusterRole, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_ClusterRole_To_rbac_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in *ClusterRoleList, out *rbac.ClusterRoleList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterRoleList_To_rbac_ClusterRoleList(in, out, s) -} - -func autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - if err := Convert_rbac_ClusterRole_To_v1alpha1_ClusterRole(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in *rbac.ClusterRoleList, out *ClusterRoleList, s conversion.Scope) error { - return autoConvert_rbac_ClusterRoleList_To_v1alpha1_ClusterRoleList(in, out, s) -} - -func autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { - out.Verbs = in.Verbs - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { - return err - } - out.APIGroups = in.APIGroups - out.Resources = in.Resources - out.ResourceNames = in.ResourceNames - out.NonResourceURLs = in.NonResourceURLs - return nil -} - -func Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in *PolicyRule, out *rbac.PolicyRule, s conversion.Scope) error { - return autoConvert_v1alpha1_PolicyRule_To_rbac_PolicyRule(in, out, s) -} - -func autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - out.Verbs = in.Verbs - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.AttributeRestrictions, &out.AttributeRestrictions, s); err != nil { - return err - } - out.APIGroups = in.APIGroups - out.Resources = in.Resources - out.ResourceNames = in.ResourceNames - out.NonResourceURLs = in.NonResourceURLs - return nil -} - -func Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in *rbac.PolicyRule, out *PolicyRule, s conversion.Scope) error { - return autoConvert_rbac_PolicyRule_To_v1alpha1_PolicyRule(in, out, s) -} - -func autoConvert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]rbac.PolicyRule, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_PolicyRule_To_rbac_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_v1alpha1_Role_To_rbac_Role(in *Role, out *rbac.Role, s conversion.Scope) error { - return autoConvert_v1alpha1_Role_To_rbac_Role(in, out, s) -} - -func autoConvert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - if err := Convert_rbac_PolicyRule_To_v1alpha1_PolicyRule(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func Convert_rbac_Role_To_v1alpha1_Role(in *rbac.Role, out *Role, s conversion.Scope) error { - return autoConvert_rbac_Role_To_v1alpha1_Role(in, out, s) -} - -func autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]rbac.Subject, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_Subject_To_rbac_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { - return err - } - return nil -} - -func Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in *RoleBinding, out *rbac.RoleBinding, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleBinding_To_rbac_RoleBinding(in, out, s) -} - -func autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - for i := range *in { - if err := Convert_rbac_Subject_To_v1alpha1_Subject(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.RoleRef, &out.RoleRef, 0); err != nil { - return err - } - return nil -} - -func Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in *rbac.RoleBinding, out *RoleBinding, s conversion.Scope) error { - return autoConvert_rbac_RoleBinding_To_v1alpha1_RoleBinding(in, out, s) -} - -func autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.RoleBinding, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_RoleBinding_To_rbac_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in *RoleBindingList, out *rbac.RoleBindingList, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleBindingList_To_rbac_RoleBindingList(in, out, s) -} - -func autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - if err := Convert_rbac_RoleBinding_To_v1alpha1_RoleBinding(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in *rbac.RoleBindingList, out *RoleBindingList, s conversion.Scope) error { - return autoConvert_rbac_RoleBindingList_To_v1alpha1_RoleBindingList(in, out, s) -} - -func autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]rbac.Role, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_Role_To_rbac_Role(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1alpha1_RoleList_To_rbac_RoleList(in *RoleList, out *rbac.RoleList, s conversion.Scope) error { - return autoConvert_v1alpha1_RoleList_To_rbac_RoleList(in, out, s) -} - -func autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - if err := Convert_rbac_Role_To_v1alpha1_Role(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_rbac_RoleList_To_v1alpha1_RoleList(in *rbac.RoleList, out *RoleList, s conversion.Scope) error { - return autoConvert_rbac_RoleList_To_v1alpha1_RoleList(in, out, s) -} - -func autoConvert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -func Convert_v1alpha1_Subject_To_rbac_Subject(in *Subject, out *rbac.Subject, s conversion.Scope) error { - return autoConvert_v1alpha1_Subject_To_rbac_Subject(in, out, s) -} - -func autoConvert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -func Convert_rbac_Subject_To_v1alpha1_Subject(in *rbac.Subject, out *Subject, s conversion.Scope) error { - return autoConvert_rbac_Subject_To_v1alpha1_Subject(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go deleted file mode 100644 index f898a434b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/deep_copy_generated.go +++ /dev/null @@ -1,271 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1alpha1 - -import ( - api "k8s.io/kubernetes/pkg/api" - unversioned "k8s.io/kubernetes/pkg/api/unversioned" - v1 "k8s.io/kubernetes/pkg/api/v1" - conversion "k8s.io/kubernetes/pkg/conversion" - runtime "k8s.io/kubernetes/pkg/runtime" -) - -func init() { - if err := api.Scheme.AddGeneratedDeepCopyFuncs( - DeepCopy_v1alpha1_ClusterRole, - DeepCopy_v1alpha1_ClusterRoleBinding, - DeepCopy_v1alpha1_ClusterRoleBindingList, - DeepCopy_v1alpha1_ClusterRoleList, - DeepCopy_v1alpha1_PolicyRule, - DeepCopy_v1alpha1_Role, - DeepCopy_v1alpha1_RoleBinding, - DeepCopy_v1alpha1_RoleBindingList, - DeepCopy_v1alpha1_RoleList, - DeepCopy_v1alpha1_Subject, - ); err != nil { - // if one of the deep copy functions is malformed, detect it immediately. - panic(err) - } -} - -func DeepCopy_v1alpha1_ClusterRole(in ClusterRole, out *ClusterRole, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Rules != nil { - in, out := in.Rules, &out.Rules - *out = make([]PolicyRule, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func DeepCopy_v1alpha1_ClusterRoleBinding(in ClusterRoleBinding, out *ClusterRoleBinding, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Subjects != nil { - in, out := in.Subjects, &out.Subjects - *out = make([]Subject, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_Subject(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - if err := v1.DeepCopy_v1_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1alpha1_ClusterRoleBindingList(in ClusterRoleBindingList, out *ClusterRoleBindingList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_ClusterRoleBinding(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1alpha1_ClusterRoleList(in ClusterRoleList, out *ClusterRoleList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]ClusterRole, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_ClusterRole(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1alpha1_PolicyRule(in PolicyRule, out *PolicyRule, c *conversion.Cloner) error { - if in.Verbs != nil { - in, out := in.Verbs, &out.Verbs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Verbs = nil - } - if err := runtime.DeepCopy_runtime_RawExtension(in.AttributeRestrictions, &out.AttributeRestrictions, c); err != nil { - return err - } - if in.APIGroups != nil { - in, out := in.APIGroups, &out.APIGroups - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.APIGroups = nil - } - if in.Resources != nil { - in, out := in.Resources, &out.Resources - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.Resources = nil - } - if in.ResourceNames != nil { - in, out := in.ResourceNames, &out.ResourceNames - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.ResourceNames = nil - } - if in.NonResourceURLs != nil { - in, out := in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(in)) - copy(*out, in) - } else { - out.NonResourceURLs = nil - } - return nil -} - -func DeepCopy_v1alpha1_Role(in Role, out *Role, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Rules != nil { - in, out := in.Rules, &out.Rules - *out = make([]PolicyRule, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_PolicyRule(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Rules = nil - } - return nil -} - -func DeepCopy_v1alpha1_RoleBinding(in RoleBinding, out *RoleBinding, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := v1.DeepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if in.Subjects != nil { - in, out := in.Subjects, &out.Subjects - *out = make([]Subject, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_Subject(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Subjects = nil - } - if err := v1.DeepCopy_v1_ObjectReference(in.RoleRef, &out.RoleRef, c); err != nil { - return err - } - return nil -} - -func DeepCopy_v1alpha1_RoleBindingList(in RoleBindingList, out *RoleBindingList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]RoleBinding, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_RoleBinding(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1alpha1_RoleList(in RoleList, out *RoleList, c *conversion.Cloner) error { - if err := unversioned.DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if err := unversioned.DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil { - return err - } - if in.Items != nil { - in, out := in.Items, &out.Items - *out = make([]Role, len(in)) - for i := range in { - if err := DeepCopy_v1alpha1_Role(in[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func DeepCopy_v1alpha1_Subject(in Subject, out *Subject, c *conversion.Cloner) error { - out.Kind = in.Kind - out.APIVersion = in.APIVersion - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go deleted file mode 100644 index 6873ebb10..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +groupName=rbac.authorization.k8s.io -// +genconversion=true -package v1alpha1 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go deleted file mode 100644 index 54b03ed15..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.pb.go +++ /dev/null @@ -1,2209 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto - - It has these top-level messages: - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - Subject -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (m *ClusterRole) String() string { return proto.CompactTextString(m) } -func (*ClusterRole) ProtoMessage() {} - -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (m *ClusterRoleBinding) String() string { return proto.CompactTextString(m) } -func (*ClusterRoleBinding) ProtoMessage() {} - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (m *ClusterRoleBindingList) String() string { return proto.CompactTextString(m) } -func (*ClusterRoleBindingList) ProtoMessage() {} - -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (m *ClusterRoleList) String() string { return proto.CompactTextString(m) } -func (*ClusterRoleList) ProtoMessage() {} - -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (m *PolicyRule) String() string { return proto.CompactTextString(m) } -func (*PolicyRule) ProtoMessage() {} - -func (m *Role) Reset() { *m = Role{} } -func (m *Role) String() string { return proto.CompactTextString(m) } -func (*Role) ProtoMessage() {} - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (m *RoleBinding) String() string { return proto.CompactTextString(m) } -func (*RoleBinding) ProtoMessage() {} - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (m *RoleBindingList) String() string { return proto.CompactTextString(m) } -func (*RoleBindingList) ProtoMessage() {} - -func (m *RoleList) Reset() { *m = RoleList{} } -func (m *RoleList) String() string { return proto.CompactTextString(m) } -func (*RoleList) ProtoMessage() {} - -func (m *Subject) Reset() { *m = Subject{} } -func (m *Subject) String() string { return proto.CompactTextString(m) } -func (*Subject) ProtoMessage() {} - -func init() { - proto.RegisterType((*ClusterRole)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.PolicyRule") - proto.RegisterType((*Role)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Role") - proto.RegisterType((*RoleBinding)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.RoleList") - proto.RegisterType((*Subject)(nil), "k8s.io.kubernetes.pkg.apis.rbac.v1alpha1.Subject") -} -func (m *ClusterRole) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterRole) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRoleBinding) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterRoleBinding) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ClusterRoleBindingList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterRoleBindingList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRoleList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterRoleList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PolicyRule) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *PolicyRule) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - data[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.AttributeRestrictions.Size())) - n6, err := m.AttributeRestrictions.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - data[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - data[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - data[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - return i, nil -} - -func (m *Role) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Role) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleBinding) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RoleBinding) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *RoleBindingList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RoleBindingList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n10 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RoleList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Subject) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Subject) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Name))) - i += copy(data[i:], m.Name) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Namespace))) - i += copy(data[i:], m.Namespace) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *ClusterRole) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterRoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PolicyRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.AttributeRestrictions.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Subject) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ClusterRole) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBinding) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBindingList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRole{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PolicyRule) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AttributeRestrictions.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceNames = append(m.ResourceNames, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceURLs = append(m.NonResourceURLs, string(data[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBinding) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBindingList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Role{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Subject) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subject: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto deleted file mode 100644 index 15c63b2f5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/generated.proto +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.apis.rbac.v1alpha1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -message ClusterRole { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this ClusterRole - repeated PolicyRule rules = 2; -} - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -message ClusterRoleBinding { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - repeated Subject subjects = 2; - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -message ClusterRoleBindingList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ClusterRoleBindings - repeated ClusterRoleBinding items = 2; -} - -// ClusterRoleList is a collection of ClusterRoles -message ClusterRoleList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of ClusterRoles - repeated ClusterRole items = 2; -} - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - repeated string verbs = 1; - - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - optional k8s.io.kubernetes.pkg.runtime.RawExtension attributeRestrictions = 2; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - repeated string apiGroups = 3; - - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - repeated string resources = 4; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - repeated string resourceNames = 5; - - // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - repeated string nonResourceURLs = 6; -} - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -message Role { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this Role - repeated PolicyRule rules = 2; -} - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -message RoleBinding { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - repeated Subject subjects = 2; - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional k8s.io.kubernetes.pkg.api.v1.ObjectReference roleRef = 3; -} - -// RoleBindingList is a collection of RoleBindings -message RoleBindingList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of RoleBindings - repeated RoleBinding items = 2; -} - -// RoleList is a collection of Roles -message RoleList { - // Standard object's metadata. - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is a list of Roles - repeated Role items = 2; -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -message Subject { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - optional string kind = 1; - - // APIVersion holds the API group and version of the referenced object. - optional string apiVersion = 2; - - // Name of the object being referenced. - optional string name = 3; - - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - optional string namespace = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go deleted file mode 100644 index eadcb4fbd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = unversioned.GroupVersion{Group: rbac.GroupName, Version: "v1alpha1"} - -func AddToScheme(scheme *runtime.Scheme) { - addKnownTypes(scheme) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) { - scheme.AddKnownTypes(SchemeGroupVersion, - &Role{}, - &RoleBinding{}, - &RoleBindingList{}, - &RoleList{}, - - &ClusterRole{}, - &ClusterRoleBinding{}, - &ClusterRoleBindingList{}, - &ClusterRoleList{}, - - &v1.ListOptions{}, - &v1.DeleteOptions{}, - &v1.ExportOptions{}, - ) - versioned.AddToGroupVersion(scheme, SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go deleted file mode 100644 index 58965dec0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.generated.go +++ /dev/null @@ -1,4327 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1alpha1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/kubernetes/pkg/api/unversioned" - pkg3_v1 "k8s.io/kubernetes/pkg/api/v1" - pkg1_runtime "k8s.io/kubernetes/pkg/runtime" - pkg4_types "k8s.io/kubernetes/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.TypeMeta - var v1 pkg3_v1.ObjectMeta - var v2 pkg1_runtime.RawExtension - var v3 pkg4_types.UID - var v4 time.Time - _, _, _, _, _ = v0, v1, v2, v3, v4 - } -} - -func (x *PolicyRule) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [6]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = true - yyq2[4] = len(x.ResourceNames) != 0 - yyq2[5] = len(x.NonResourceURLs) != 0 - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(6) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Verbs == nil { - r.EncodeNil() - } else { - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - z.F.EncSliceStringV(x.Verbs, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("verbs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Verbs == nil { - r.EncodeNil() - } else { - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - z.F.EncSliceStringV(x.Verbs, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yy7 := &x.AttributeRestrictions - yym8 := z.EncBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.EncExt(yy7) { - } else if !yym8 && z.IsJSONHandle() { - z.EncJSONMarshal(yy7) - } else { - z.EncFallback(yy7) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("attributeRestrictions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy9 := &x.AttributeRestrictions - yym10 := z.EncBinary() - _ = yym10 - if false { - } else if z.HasExtensions() && z.EncExt(yy9) { - } else if !yym10 && z.IsJSONHandle() { - z.EncJSONMarshal(yy9) - } else { - z.EncFallback(yy9) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.APIGroups == nil { - r.EncodeNil() - } else { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - z.F.EncSliceStringV(x.APIGroups, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiGroups")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.APIGroups == nil { - r.EncodeNil() - } else { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - z.F.EncSliceStringV(x.APIGroups, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Resources == nil { - r.EncodeNil() - } else { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - z.F.EncSliceStringV(x.Resources, false, e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resources")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Resources == nil { - r.EncodeNil() - } else { - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - z.F.EncSliceStringV(x.Resources, false, e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - if x.ResourceNames == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - z.F.EncSliceStringV(x.ResourceNames, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceNames")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ResourceNames == nil { - r.EncodeNil() - } else { - yym19 := z.EncBinary() - _ = yym19 - if false { - } else { - z.F.EncSliceStringV(x.ResourceNames, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[5] { - if x.NonResourceURLs == nil { - r.EncodeNil() - } else { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - z.F.EncSliceStringV(x.NonResourceURLs, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq2[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nonResourceURLs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.NonResourceURLs == nil { - r.EncodeNil() - } else { - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - z.F.EncSliceStringV(x.NonResourceURLs, false, e) - } - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *PolicyRule) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *PolicyRule) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "verbs": - if r.TryDecodeAsNil() { - x.Verbs = nil - } else { - yyv4 := &x.Verbs - yym5 := z.DecBinary() - _ = yym5 - if false { - } else { - z.F.DecSliceStringX(yyv4, false, d) - } - } - case "attributeRestrictions": - if r.TryDecodeAsNil() { - x.AttributeRestrictions = pkg1_runtime.RawExtension{} - } else { - yyv6 := &x.AttributeRestrictions - yym7 := z.DecBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.DecExt(yyv6) { - } else if !yym7 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv6) - } else { - z.DecFallback(yyv6, false) - } - } - case "apiGroups": - if r.TryDecodeAsNil() { - x.APIGroups = nil - } else { - yyv8 := &x.APIGroups - yym9 := z.DecBinary() - _ = yym9 - if false { - } else { - z.F.DecSliceStringX(yyv8, false, d) - } - } - case "resources": - if r.TryDecodeAsNil() { - x.Resources = nil - } else { - yyv10 := &x.Resources - yym11 := z.DecBinary() - _ = yym11 - if false { - } else { - z.F.DecSliceStringX(yyv10, false, d) - } - } - case "resourceNames": - if r.TryDecodeAsNil() { - x.ResourceNames = nil - } else { - yyv12 := &x.ResourceNames - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - z.F.DecSliceStringX(yyv12, false, d) - } - } - case "nonResourceURLs": - if r.TryDecodeAsNil() { - x.NonResourceURLs = nil - } else { - yyv14 := &x.NonResourceURLs - yym15 := z.DecBinary() - _ = yym15 - if false { - } else { - z.F.DecSliceStringX(yyv14, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *PolicyRule) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj16 int - var yyb16 bool - var yyhl16 bool = l >= 0 - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Verbs = nil - } else { - yyv17 := &x.Verbs - yym18 := z.DecBinary() - _ = yym18 - if false { - } else { - z.F.DecSliceStringX(yyv17, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.AttributeRestrictions = pkg1_runtime.RawExtension{} - } else { - yyv19 := &x.AttributeRestrictions - yym20 := z.DecBinary() - _ = yym20 - if false { - } else if z.HasExtensions() && z.DecExt(yyv19) { - } else if !yym20 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv19) - } else { - z.DecFallback(yyv19, false) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIGroups = nil - } else { - yyv21 := &x.APIGroups - yym22 := z.DecBinary() - _ = yym22 - if false { - } else { - z.F.DecSliceStringX(yyv21, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Resources = nil - } else { - yyv23 := &x.Resources - yym24 := z.DecBinary() - _ = yym24 - if false { - } else { - z.F.DecSliceStringX(yyv23, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ResourceNames = nil - } else { - yyv25 := &x.ResourceNames - yym26 := z.DecBinary() - _ = yym26 - if false { - } else { - z.F.DecSliceStringX(yyv25, false, d) - } - } - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.NonResourceURLs = nil - } else { - yyv27 := &x.NonResourceURLs - yym28 := z.DecBinary() - _ = yym28 - if false { - } else { - z.F.DecSliceStringX(yyv27, false, d) - } - } - for { - yyj16++ - if yyhl16 { - yyb16 = yyj16 > l - } else { - yyb16 = r.CheckBreak() - } - if yyb16 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj16-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Subject) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[1] = x.APIVersion != "" - yyq2[3] = x.Namespace != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[1] { - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("name")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym11 := z.EncBinary() - _ = yym11 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Name)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespace")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym14 := z.EncBinary() - _ = yym14 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Namespace)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Subject) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Subject) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "name": - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - case "namespace": - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Subject) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj8 int - var yyb8 bool - var yyhl8 bool = l >= 0 - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Name = "" - } else { - x.Name = string(r.DecodeString()) - } - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Namespace = "" - } else { - x.Namespace = string(r.DecodeString()) - } - for { - yyj8++ - if yyhl8 { - yyb8 = yyj8 > l - } else { - yyb8 = r.CheckBreak() - } - if yyb8 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj8-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Role) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Role) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Role) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv5 := &x.Rules - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv5), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Role) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv11 := &x.Rules - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subjects")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.RoleRef - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("roleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.RoleRef - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "subjects": - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv5 := &x.Subjects - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv5), d) - } - } - case "roleRef": - if r.TryDecodeAsNil() { - x.RoleRef = pkg3_v1.ObjectReference{} - } else { - yyv7 := &x.RoleRef - yyv7.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv12 := &x.Subjects - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv12), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RoleRef = pkg3_v1.ObjectReference{} - } else { - yyv14 := &x.RoleRef - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceRoleBinding(([]RoleBinding)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceRoleBinding((*[]RoleBinding)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *RoleList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceRole(([]Role)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceRole(([]Role)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *RoleList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *RoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceRole((*[]Role)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *RoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceRole((*[]Role)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRole) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rules")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Rules == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSlicePolicyRule(([]PolicyRule)(x.Rules), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRole) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRole) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "rules": - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv5 := &x.Rules - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv5), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRole) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj9 int - var yyb9 bool - var yyhl9 bool = l >= 0 - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv10 := &x.ObjectMeta - yyv10.CodecDecodeSelf(d) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rules = nil - } else { - yyv11 := &x.Rules - yym12 := z.DecBinary() - _ = yym12 - if false { - } else { - h.decSlicePolicyRule((*[]PolicyRule)(yyv11), d) - } - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj9++ - if yyhl9 { - yyb9 = yyj9 > l - } else { - yyb9 = r.CheckBreak() - } - if yyb9 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj9-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleBinding) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [5]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[3] = x.Kind != "" - yyq2[4] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(5) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ObjectMeta - yy4.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ObjectMeta - yy6.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("subjects")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Subjects == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceSubject(([]Subject)(x.Subjects), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy12 := &x.RoleRef - yy12.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("roleRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy14 := &x.RoleRef - yy14.CodecEncodeSelf(e) - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym18 := z.EncBinary() - _ = yym18 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[4] { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleBinding) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleBinding) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv4 := &x.ObjectMeta - yyv4.CodecDecodeSelf(d) - } - case "subjects": - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv5 := &x.Subjects - yym6 := z.DecBinary() - _ = yym6 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv5), d) - } - } - case "roleRef": - if r.TryDecodeAsNil() { - x.RoleRef = pkg3_v1.ObjectReference{} - } else { - yyv7 := &x.RoleRef - yyv7.CodecDecodeSelf(d) - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleBinding) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg3_v1.ObjectMeta{} - } else { - yyv11 := &x.ObjectMeta - yyv11.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Subjects = nil - } else { - yyv12 := &x.Subjects - yym13 := z.DecBinary() - _ = yym13 - if false { - } else { - h.decSliceSubject((*[]Subject)(yyv12), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.RoleRef = pkg3_v1.ObjectReference{} - } else { - yyv14 := &x.RoleRef - yyv14.CodecDecodeSelf(d) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleBindingList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceClusterRoleBinding(([]ClusterRoleBinding)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleBindingList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleBindingList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleBindingList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceClusterRoleBinding((*[]ClusterRoleBinding)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterRoleList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [4]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - yyq2[0] = true - yyq2[2] = x.Kind != "" - yyq2[3] = x.APIVersion != "" - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(4) - } else { - yynn2 = 1 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[0] { - yy4 := &x.ListMeta - yym5 := z.EncBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.EncExt(yy4) { - } else { - z.EncFallback(yy4) - } - } else { - r.EncodeNil() - } - } else { - if yyq2[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy6 := &x.ListMeta - yym7 := z.EncBinary() - _ = yym7 - if false { - } else if z.HasExtensions() && z.EncExt(yy6) { - } else { - z.EncFallback(yy6) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym9 := z.EncBinary() - _ = yym9 - if false { - } else { - h.encSliceClusterRole(([]ClusterRole)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym10 := z.EncBinary() - _ = yym10 - if false { - } else { - h.encSliceClusterRole(([]ClusterRole)(x.Items), e) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[2] { - yym12 := z.EncBinary() - _ = yym12 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym13 := z.EncBinary() - _ = yym13 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq2[3] { - yym15 := z.EncBinary() - _ = yym15 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq2[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym16 := z.EncBinary() - _ = yym16 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterRoleList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1 := z.DecBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct2 := r.ContainerType() - if yyct2 == codecSelferValueTypeMap1234 { - yyl2 := r.ReadMapStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl2, d) - } - } else if yyct2 == codecSelferValueTypeArray1234 { - yyl2 := r.ReadArrayStart() - if yyl2 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl2, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterRoleList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys3Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys3Slc - var yyhl3 bool = l >= 0 - for yyj3 := 0; ; yyj3++ { - if yyhl3 { - if yyj3 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys3Slc = r.DecodeBytes(yys3Slc, true, true) - yys3 := string(yys3Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys3 { - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv4 := &x.ListMeta - yym5 := z.DecBinary() - _ = yym5 - if false { - } else if z.HasExtensions() && z.DecExt(yyv4) { - } else { - z.DecFallback(yyv4, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv6 := &x.Items - yym7 := z.DecBinary() - _ = yym7 - if false { - } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv6), d) - } - } - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys3) - } // end switch yys3 - } // end for yyj3 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterRoleList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj10 int - var yyb10 bool - var yyhl10 bool = l >= 0 - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv11 := &x.ListMeta - yym12 := z.DecBinary() - _ = yym12 - if false { - } else if z.HasExtensions() && z.DecExt(yyv11) { - } else { - z.DecFallback(yyv11, false) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv13 := &x.Items - yym14 := z.DecBinary() - _ = yym14 - if false { - } else { - h.decSliceClusterRole((*[]ClusterRole)(yyv13), d) - } - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - for { - yyj10++ - if yyhl10 { - yyb10 = yyj10 > l - } else { - yyb10 = r.CheckBreak() - } - if yyb10 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj10-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSlicePolicyRule(v []PolicyRule, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSlicePolicyRule(v *[]PolicyRule, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []PolicyRule{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 160) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]PolicyRule, yyrl1) - } - } else { - yyv1 = make([]PolicyRule, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, PolicyRule{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, PolicyRule{}) // var yyz1 PolicyRule - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = PolicyRule{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []PolicyRule{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceSubject(v []Subject, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceSubject(v *[]Subject, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Subject{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Subject, yyrl1) - } - } else { - yyv1 = make([]Subject, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Subject{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Subject{}) // var yyz1 Subject - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Subject{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Subject{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceRoleBinding(v []RoleBinding, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceRoleBinding(v *[]RoleBinding, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []RoleBinding{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]RoleBinding, yyrl1) - } - } else { - yyv1 = make([]RoleBinding, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, RoleBinding{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, RoleBinding{}) // var yyz1 RoleBinding - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = RoleBinding{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []RoleBinding{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceRole(v []Role, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceRole(v *[]Role, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []Role{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]Role, yyrl1) - } - } else { - yyv1 = make([]Role, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, Role{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, Role{}) // var yyz1 Role - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = Role{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []Role{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterRoleBinding(v []ClusterRoleBinding, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterRoleBinding(v *[]ClusterRoleBinding, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterRoleBinding{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 376) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterRoleBinding, yyrl1) - } - } else { - yyv1 = make([]ClusterRoleBinding, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterRoleBinding{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterRoleBinding{}) // var yyz1 ClusterRoleBinding - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRoleBinding{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterRoleBinding{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} - -func (x codecSelfer1234) encSliceClusterRole(v []ClusterRole, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy2 := &yyv1 - yy2.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterRole(v *[]ClusterRole, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1 := *v - yyh1, yyl1 := z.DecSliceHelperStart() - var yyc1 bool - _ = yyc1 - if yyl1 == 0 { - if yyv1 == nil { - yyv1 = []ClusterRole{} - yyc1 = true - } else if len(yyv1) != 0 { - yyv1 = yyv1[:0] - yyc1 = true - } - } else if yyl1 > 0 { - var yyrr1, yyrl1 int - var yyrt1 bool - _, _ = yyrl1, yyrt1 - yyrr1 = yyl1 // len(yyv1) - if yyl1 > cap(yyv1) { - - yyrg1 := len(yyv1) > 0 - yyv21 := yyv1 - yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 264) - if yyrt1 { - if yyrl1 <= cap(yyv1) { - yyv1 = yyv1[:yyrl1] - } else { - yyv1 = make([]ClusterRole, yyrl1) - } - } else { - yyv1 = make([]ClusterRole, yyrl1) - } - yyc1 = true - yyrr1 = len(yyv1) - if yyrg1 { - copy(yyv1, yyv21) - } - } else if yyl1 != len(yyv1) { - yyv1 = yyv1[:yyl1] - yyc1 = true - } - yyj1 := 0 - for ; yyj1 < yyrr1; yyj1++ { - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv2 := &yyv1[yyj1] - yyv2.CodecDecodeSelf(d) - } - - } - if yyrt1 { - for ; yyj1 < yyl1; yyj1++ { - yyv1 = append(yyv1, ClusterRole{}) - yyh1.ElemContainerState(yyj1) - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv3 := &yyv1[yyj1] - yyv3.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1 := 0 - for ; !r.CheckBreak(); yyj1++ { - - if yyj1 >= len(yyv1) { - yyv1 = append(yyv1, ClusterRole{}) // var yyz1 ClusterRole - yyc1 = true - } - yyh1.ElemContainerState(yyj1) - if yyj1 < len(yyv1) { - if r.TryDecodeAsNil() { - yyv1[yyj1] = ClusterRole{} - } else { - yyv4 := &yyv1[yyj1] - yyv4.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1 < len(yyv1) { - yyv1 = yyv1[:yyj1] - yyc1 = true - } else if yyj1 == 0 && yyv1 == nil { - yyv1 = []ClusterRole{} - yyc1 = true - } - } - yyh1.End() - if yyc1 { - *v = yyv1 - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go deleted file mode 100644 index 52eacfe3f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. - // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. - AttributeRestrictions runtime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - APIGroups []string `json:"apiGroups" protobuf:"bytes,3,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` - // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -type Subject struct { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // APIVersion holds the API group and version of the referenced object. - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` - // Name of the object being referenced. - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// +genclient=true - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -type Role struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this Role - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` -} - -// +genclient=true - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -type RoleBinding struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// RoleBindingList is a collection of RoleBindings -type RoleBindingList struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of RoleBindings - Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// RoleList is a collection of Roles -type RoleList struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of Roles - Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient=true,nonNamespaced=true - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -type ClusterRole struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this ClusterRole - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` -} - -// +genclient=true,nonNamespaced=true - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -type ClusterRoleBinding struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - Subjects []Subject `json:"subjects" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef v1.ObjectReference `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -type ClusterRoleBindingList struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoleBindings - Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ClusterRoleList is a collection of ClusterRoles -type ClusterRoleList struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoles - Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index b88c93c03..000000000 --- a/vendor/k8s.io/kubernetes/pkg/apis/rbac/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", -} - -func (ClusterRole) SwaggerDoc() map[string]string { - return map_ClusterRole -} - -var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (ClusterRoleBinding) SwaggerDoc() map[string]string { - return map_ClusterRoleBinding -} - -var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoleBindings", -} - -func (ClusterRoleBindingList) SwaggerDoc() map[string]string { - return map_ClusterRoleBindingList -} - -var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoles", -} - -func (ClusterRoleList) SwaggerDoc() map[string]string { - return map_ClusterRoleList -} - -var map_PolicyRule = map[string]string{ - "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.", -} - -func (PolicyRule) SwaggerDoc() map[string]string { - return map_PolicyRule -} - -var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this Role", -} - -func (Role) SwaggerDoc() map[string]string { - return map_Role -} - -var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (RoleBinding) SwaggerDoc() map[string]string { - return map_RoleBinding -} - -var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of RoleBindings", -} - -func (RoleBindingList) SwaggerDoc() map[string]string { - return map_RoleBindingList -} - -var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of Roles", -} - -func (RoleList) SwaggerDoc() map[string]string { - return map_RoleList -} - -var map_Subject = map[string]string{ - "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", - "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", - "apiVersion": "APIVersion holds the API group and version of the referenced object.", - "name": "Name of the object being referenced.", - "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", -} - -func (Subject) SwaggerDoc() map[string]string { - return map_Subject -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go b/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go deleted file mode 100644 index b11258494..000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/user/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package user contains utilities for dealing with simple user exchange in the auth -// packages. The user.Info interface defines an interface for exchanging that info. -package user diff --git a/vendor/k8s.io/kubernetes/pkg/auth/user/user.go b/vendor/k8s.io/kubernetes/pkg/auth/user/user.go deleted file mode 100644 index c4a4c00d5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/auth/user/user.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package user - -// Info describes a user that has been authenticated to the system. -type Info interface { - // GetName returns the name that uniquely identifies this user among all - // other active users. - GetName() string - // GetUID returns a unique value for a particular user that will change - // if the user is removed from the system and another user is added with - // the same name. - GetUID() string - // GetGroups returns the names of the groups the user is a member of - GetGroups() []string - - // GetExtra can contain any additional information that the authenticator - // thought was interesting. One example would be scopes on a token. - // Keys in this map should be namespaced to the authenticator or - // authenticator/authorizer pair making use of them. - // For instance: "example.org/foo" instead of "foo" - // This is a map[string][]string because it needs to be serializeable into - // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization - // delegation flows - GetExtra() map[string][]string -} - -// DefaultInfo provides a simple user information exchange object -// for components that implement the UserInfo interface. -type DefaultInfo struct { - Name string - UID string - Groups []string - Extra map[string][]string -} - -func (i *DefaultInfo) GetName() string { - return i.Name -} - -func (i *DefaultInfo) GetUID() string { - return i.UID -} - -func (i *DefaultInfo) GetGroups() []string { - return i.Groups -} - -func (i *DefaultInfo) GetExtra() map[string][]string { - return i.Extra -} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go deleted file mode 100644 index d0a882c54..000000000 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capabilities - -import ( - "sync" -) - -// Capabilities defines the set of capabilities available within the system. -// For now these are global. Eventually they may be per-user -type Capabilities struct { - AllowPrivileged bool - - // Pod sources from which to allow privileged capabilities like host networking, sharing the host - // IPC namespace, and sharing the host PID namespace. - PrivilegedSources PrivilegedSources - - // PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach) - PerConnectionBandwidthLimitBytesPerSec int64 -} - -// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types -// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace. -type PrivilegedSources struct { - // List of pod sources for which using host network is allowed. - HostNetworkSources []string - - // List of pod sources for which using host pid namespace is allowed. - HostPIDSources []string - - // List of pod sources for which using host ipc is allowed. - HostIPCSources []string -} - -// TODO: Clean these up into a singleton -var once sync.Once -var lock sync.Mutex -var capabilities *Capabilities - -// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored. -func Initialize(c Capabilities) { - // Only do this once - once.Do(func() { - capabilities = &c - }) -} - -// Setup the capability set. It wraps Initialize for improving usibility. -func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) { - Initialize(Capabilities{ - AllowPrivileged: allowPrivileged, - PrivilegedSources: privilegedSources, - PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec, - }) -} - -// SetCapabilitiesForTests. Convenience method for testing. This should only be called from tests. -func SetForTests(c Capabilities) { - lock.Lock() - defer lock.Unlock() - capabilities = &c -} - -// Returns a read-only copy of the system capabilities. -func Get() Capabilities { - lock.Lock() - defer lock.Unlock() - // This check prevents clobbering of capabilities that might've been set via SetForTests - if capabilities == nil { - Initialize(Capabilities{ - AllowPrivileged: false, - PrivilegedSources: PrivilegedSources{ - HostNetworkSources: []string{}, - HostPIDSources: []string{}, - HostIPCSources: []string{}, - }, - }) - } - return *capabilities -} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go deleted file mode 100644 index 81143a78e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package capbabilities manages system level capabilities -package capabilities diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go b/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go deleted file mode 100644 index 16600cf24..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cache is a client-side caching mechanism. It is useful for -// reducing the number of server calls you'd otherwise need to make. -// Reflector watches a server and updates a Store. Two stores are provided; -// one that simply caches objects (for example, to allow a scheduler to -// list currently available nodes), and one that additionally acts as -// a FIFO queue (for example, to allow a scheduler to process incoming -// pods). -package cache diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/index.go b/vendor/k8s.io/kubernetes/pkg/client/cache/index.go deleted file mode 100644 index 572f2c06b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/index.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/util/sets" -) - -// Indexer is a storage interface that lets you list objects using multiple indexing functions -type Indexer interface { - Store - // Retrieve list of objects that match on the named indexing function - Index(indexName string, obj interface{}) ([]interface{}, error) - // ListIndexFuncValues returns the list of generated values of an Index func - ListIndexFuncValues(indexName string) []string - // ByIndex lists object that match on the named indexing function with the exact key - ByIndex(indexName, indexKey string) ([]interface{}, error) - // GetIndexer return the indexers - GetIndexers() Indexers - - // AddIndexers adds more indexers to this store. If you call this after you already have data - // in the store, the results are undefined. - AddIndexers(newIndexers Indexers) error -} - -// IndexFunc knows how to provide an indexed value for an object. -type IndexFunc func(obj interface{}) ([]string, error) - -// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns -// unique values for every object. This is conversion can create errors when more than one key is found. You -// should prefer to make proper key and index functions. -func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { - return func(obj interface{}) (string, error) { - indexKeys, err := indexFunc(obj) - if err != nil { - return "", err - } - if len(indexKeys) > 1 { - return "", fmt.Errorf("too many keys: %v", indexKeys) - } - return indexKeys[0], nil - } -} - -const ( - NamespaceIndex string = "namespace" -) - -// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace -func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { - meta, err := meta.Accessor(obj) - if err != nil { - return []string{""}, fmt.Errorf("object has no meta: %v", err) - } - return []string{meta.GetNamespace()}, nil -} - -// Index maps the indexed value to a set of keys in the store that match on that value -type Index map[string]sets.String - -// Indexers maps a name to a IndexFunc -type Indexers map[string]IndexFunc - -// Indices maps a name to an Index -type Indices map[string]Index diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go deleted file mode 100644 index 90c9c62f9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listers.go +++ /dev/null @@ -1,672 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "fmt" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/labels" -) - -// TODO: generate these classes and methods for all resources of interest using -// a script. Can use "go generate" once 1.4 is supported by all users. - -// StoreToPodLister makes a Store have the List method of the client.PodInterface -// The Store must contain (only) Pods. -// -// Example: -// s := cache.NewStore() -// lw := cache.ListWatch{Client: c, FieldSelector: sel, Resource: "pods"} -// r := cache.NewReflector(lw, &api.Pod{}, s).Run() -// l := StoreToPodLister{s} -// l.List() -type StoreToPodLister struct { - Indexer -} - -// Please note that selector is filtering among the pods that have gotten into -// the store; there may have been some filtering that already happened before -// that. -// -// TODO: converge on the interface in pkg/client. -func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err error) { - // TODO: it'd be great to just call - // s.Pods(api.NamespaceAll).List(selector), however then we'd have to - // remake the list.Items as a []*api.Pod. So leave this separate for - // now. - for _, m := range s.Indexer.List() { - pod := m.(*api.Pod) - if selector.Matches(labels.Set(pod.Labels)) { - pods = append(pods, pod) - } - } - return pods, nil -} - -// Pods is taking baby steps to be more like the api in pkg/client -func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer { - return storePodsNamespacer{s.Indexer, namespace} -} - -type storePodsNamespacer struct { - indexer Indexer - namespace string -} - -// Please note that selector is filtering among the pods that have gotten into -// the store; there may have been some filtering that already happened before -// that. -func (s storePodsNamespacer) List(selector labels.Selector) (api.PodList, error) { - pods := api.PodList{} - - if s.namespace == api.NamespaceAll { - for _, m := range s.indexer.List() { - pod := m.(*api.Pod) - if selector.Matches(labels.Set(pod.Labels)) { - pods.Items = append(pods.Items, *pod) - } - } - return pods, nil - } - - key := &api.Pod{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} - items, err := s.indexer.Index(NamespaceIndex, key) - if err != nil { - // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) - for _, m := range s.indexer.List() { - pod := m.(*api.Pod) - if s.namespace == pod.Namespace && selector.Matches(labels.Set(pod.Labels)) { - pods.Items = append(pods.Items, *pod) - } - } - return pods, nil - } - for _, m := range items { - pod := m.(*api.Pod) - if selector.Matches(labels.Set(pod.Labels)) { - pods.Items = append(pods.Items, *pod) - } - } - return pods, nil -} - -// Exists returns true if a pod matching the namespace/name of the given pod exists in the store. -func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) { - _, exists, err := s.Indexer.Get(pod) - if err != nil { - return false, err - } - return exists, nil -} - -// NodeConditionPredicate is a function that indicates whether the given node's conditions meet -// some set of criteria defined by the function. -type NodeConditionPredicate func(node api.Node) bool - -// StoreToNodeLister makes a Store have the List method of the client.NodeInterface -// The Store must contain (only) Nodes. -type StoreToNodeLister struct { - Store -} - -func (s *StoreToNodeLister) List() (machines api.NodeList, err error) { - for _, m := range s.Store.List() { - machines.Items = append(machines.Items, *(m.(*api.Node))) - } - return machines, nil -} - -// NodeCondition returns a storeToNodeConditionLister -func (s *StoreToNodeLister) NodeCondition(predicate NodeConditionPredicate) storeToNodeConditionLister { - // TODO: Move this filtering server side. Currently our selectors don't facilitate searching through a list so we - // have the reflector filter out the Unschedulable field and sift through node conditions in the lister. - return storeToNodeConditionLister{s.Store, predicate} -} - -// storeToNodeConditionLister filters and returns nodes matching the given type and status from the store. -type storeToNodeConditionLister struct { - store Store - predicate NodeConditionPredicate -} - -// List returns a list of nodes that match the conditions defined by the predicate functions in the storeToNodeConditionLister. -func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) { - for _, m := range s.store.List() { - node := *m.(*api.Node) - if s.predicate(node) { - nodes.Items = append(nodes.Items, node) - } else { - glog.V(5).Infof("Node %s matches none of the conditions", node.Name) - } - } - return -} - -// StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers. -type StoreToReplicationControllerLister struct { - Indexer -} - -// Exists checks if the given rc exists in the store. -func (s *StoreToReplicationControllerLister) Exists(controller *api.ReplicationController) (bool, error) { - _, exists, err := s.Indexer.Get(controller) - if err != nil { - return false, err - } - return exists, nil -} - -// StoreToReplicationControllerLister lists all controllers in the store. -// TODO: converge on the interface in pkg/client -func (s *StoreToReplicationControllerLister) List() (controllers []api.ReplicationController, err error) { - for _, c := range s.Indexer.List() { - controllers = append(controllers, *(c.(*api.ReplicationController))) - } - return controllers, nil -} - -func (s *StoreToReplicationControllerLister) ReplicationControllers(namespace string) storeReplicationControllersNamespacer { - return storeReplicationControllersNamespacer{s.Indexer, namespace} -} - -type storeReplicationControllersNamespacer struct { - indexer Indexer - namespace string -} - -func (s storeReplicationControllersNamespacer) List(selector labels.Selector) ([]api.ReplicationController, error) { - controllers := []api.ReplicationController{} - - if s.namespace == api.NamespaceAll { - for _, m := range s.indexer.List() { - rc := *(m.(*api.ReplicationController)) - if selector.Matches(labels.Set(rc.Labels)) { - controllers = append(controllers, rc) - } - } - return controllers, nil - } - - key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: s.namespace}} - items, err := s.indexer.Index(NamespaceIndex, key) - if err != nil { - // Ignore error; do slow search without index. - glog.Warningf("can not retrieve list of objects using index : %v", err) - for _, m := range s.indexer.List() { - rc := *(m.(*api.ReplicationController)) - if s.namespace == rc.Namespace && selector.Matches(labels.Set(rc.Labels)) { - controllers = append(controllers, rc) - } - } - return controllers, nil - } - for _, m := range items { - rc := *(m.(*api.ReplicationController)) - if selector.Matches(labels.Set(rc.Labels)) { - controllers = append(controllers, rc) - } - } - return controllers, nil -} - -// GetPodControllers returns a list of replication controllers managing a pod. Returns an error only if no matching controllers are found. -func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { - var selector labels.Selector - var rc api.ReplicationController - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) - return - } - - key := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace}} - items, err := s.Indexer.Index(NamespaceIndex, key) - if err != nil { - return - } - - for _, m := range items { - rc = *m.(*api.ReplicationController) - labelSet := labels.Set(rc.Spec.Selector) - selector = labels.Set(rc.Spec.Selector).AsSelector() - - // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. - if labelSet.AsSelector().Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - controllers = append(controllers, rc) - } - if len(controllers) == 0 { - err = fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToDeploymentLister gives a store List and Exists methods. The store must contain only Deployments. -type StoreToDeploymentLister struct { - Store -} - -// Exists checks if the given deployment exists in the store. -func (s *StoreToDeploymentLister) Exists(deployment *extensions.Deployment) (bool, error) { - _, exists, err := s.Store.Get(deployment) - if err != nil { - return false, err - } - return exists, nil -} - -// StoreToDeploymentLister lists all deployments in the store. -// TODO: converge on the interface in pkg/client -func (s *StoreToDeploymentLister) List() (deployments []extensions.Deployment, err error) { - for _, c := range s.Store.List() { - deployments = append(deployments, *(c.(*extensions.Deployment))) - } - return deployments, nil -} - -// GetDeploymentsForReplicaSet returns a list of deployments managing a replica set. Returns an error only if no matching deployments are found. -func (s *StoreToDeploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) (deployments []extensions.Deployment, err error) { - var d extensions.Deployment - - if len(rs.Labels) == 0 { - err = fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) - return - } - - // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label - for _, m := range s.Store.List() { - d = *m.(*extensions.Deployment) - if d.Namespace != rs.Namespace { - continue - } - - selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { - continue - } - deployments = append(deployments, d) - } - if len(deployments) == 0 { - err = fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) - } - return -} - -// StoreToReplicaSetLister gives a store List and Exists methods. The store must contain only ReplicaSets. -type StoreToReplicaSetLister struct { - Store -} - -// Exists checks if the given ReplicaSet exists in the store. -func (s *StoreToReplicaSetLister) Exists(rs *extensions.ReplicaSet) (bool, error) { - _, exists, err := s.Store.Get(rs) - if err != nil { - return false, err - } - return exists, nil -} - -// List lists all ReplicaSets in the store. -// TODO: converge on the interface in pkg/client -func (s *StoreToReplicaSetLister) List() (rss []extensions.ReplicaSet, err error) { - for _, rs := range s.Store.List() { - rss = append(rss, *(rs.(*extensions.ReplicaSet))) - } - return rss, nil -} - -type storeReplicaSetsNamespacer struct { - store Store - namespace string -} - -func (s storeReplicaSetsNamespacer) List(selector labels.Selector) (rss []extensions.ReplicaSet, err error) { - for _, c := range s.store.List() { - rs := *(c.(*extensions.ReplicaSet)) - if s.namespace == api.NamespaceAll || s.namespace == rs.Namespace { - if selector.Matches(labels.Set(rs.Labels)) { - rss = append(rss, rs) - } - } - } - return -} - -func (s *StoreToReplicaSetLister) ReplicaSets(namespace string) storeReplicaSetsNamespacer { - return storeReplicaSetsNamespacer{s.Store, namespace} -} - -// GetPodReplicaSets returns a list of ReplicaSets managing a pod. Returns an error only if no matching ReplicaSets are found. -func (s *StoreToReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []extensions.ReplicaSet, err error) { - var selector labels.Selector - var rs extensions.ReplicaSet - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - rs = *m.(*extensions.ReplicaSet) - if rs.Namespace != pod.Namespace { - continue - } - selector, err = unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - err = fmt.Errorf("invalid selector: %v", err) - return - } - - // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - rss = append(rss, rs) - } - if len(rss) == 0 { - err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToDaemonSetLister gives a store List and Exists methods. The store must contain only DaemonSets. -type StoreToDaemonSetLister struct { - Store -} - -// Exists checks if the given daemon set exists in the store. -func (s *StoreToDaemonSetLister) Exists(ds *extensions.DaemonSet) (bool, error) { - _, exists, err := s.Store.Get(ds) - if err != nil { - return false, err - } - return exists, nil -} - -// List lists all daemon sets in the store. -// TODO: converge on the interface in pkg/client -func (s *StoreToDaemonSetLister) List() (dss extensions.DaemonSetList, err error) { - for _, c := range s.Store.List() { - dss.Items = append(dss.Items, *(c.(*extensions.DaemonSet))) - } - return dss, nil -} - -// GetPodDaemonSets returns a list of daemon sets managing a pod. -// Returns an error if and only if no matching daemon sets are found. -func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []extensions.DaemonSet, err error) { - var selector labels.Selector - var daemonSet extensions.DaemonSet - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - daemonSet = *m.(*extensions.DaemonSet) - if daemonSet.Namespace != pod.Namespace { - continue - } - selector, err = unversioned.LabelSelectorAsSelector(daemonSet.Spec.Selector) - if err != nil { - // this should not happen if the DaemonSet passed validation - return nil, err - } - - // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - daemonSets = append(daemonSets, daemonSet) - } - if len(daemonSets) == 0 { - err = fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// StoreToServiceLister makes a Store that has the List method of the client.ServiceInterface -// The Store must contain (only) Services. -type StoreToServiceLister struct { - Store -} - -func (s *StoreToServiceLister) List() (services api.ServiceList, err error) { - for _, m := range s.Store.List() { - services.Items = append(services.Items, *(m.(*api.Service))) - } - return services, nil -} - -// TODO: Move this back to scheduler as a helper function that takes a Store, -// rather than a method of StoreToServiceLister. -func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { - var selector labels.Selector - var service api.Service - - for _, m := range s.Store.List() { - service = *m.(*api.Service) - // consider only services that are in the same namespace as the pod - if service.Namespace != pod.Namespace { - continue - } - if service.Spec.Selector == nil { - // services with nil selectors match nothing, not everything. - continue - } - selector = labels.Set(service.Spec.Selector).AsSelector() - if selector.Matches(labels.Set(pod.Labels)) { - services = append(services, service) - } - } - if len(services) == 0 { - err = fmt.Errorf("could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - - return -} - -// StoreToEndpointsLister makes a Store that lists endpoints. -type StoreToEndpointsLister struct { - Store -} - -// List lists all endpoints in the store. -func (s *StoreToEndpointsLister) List() (services api.EndpointsList, err error) { - for _, m := range s.Store.List() { - services.Items = append(services.Items, *(m.(*api.Endpoints))) - } - return services, nil -} - -// GetServiceEndpoints returns the endpoints of a service, matched on service name. -func (s *StoreToEndpointsLister) GetServiceEndpoints(svc *api.Service) (ep api.Endpoints, err error) { - for _, m := range s.Store.List() { - ep = *m.(*api.Endpoints) - if svc.Name == ep.Name && svc.Namespace == ep.Namespace { - return ep, nil - } - } - err = fmt.Errorf("could not find endpoints for service: %v", svc.Name) - return -} - -// StoreToJobLister gives a store List and Exists methods. The store must contain only Jobs. -type StoreToJobLister struct { - Store -} - -// Exists checks if the given job exists in the store. -func (s *StoreToJobLister) Exists(job *batch.Job) (bool, error) { - _, exists, err := s.Store.Get(job) - if err != nil { - return false, err - } - return exists, nil -} - -// StoreToJobLister lists all jobs in the store. -func (s *StoreToJobLister) List() (jobs batch.JobList, err error) { - for _, c := range s.Store.List() { - jobs.Items = append(jobs.Items, *(c.(*batch.Job))) - } - return jobs, nil -} - -// GetPodJobs returns a list of jobs managing a pod. Returns an error only if no matching jobs are found. -func (s *StoreToJobLister) GetPodJobs(pod *api.Pod) (jobs []batch.Job, err error) { - var selector labels.Selector - var job batch.Job - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - job = *m.(*batch.Job) - if job.Namespace != pod.Namespace { - continue - } - - selector, _ = unversioned.LabelSelectorAsSelector(job.Spec.Selector) - if !selector.Matches(labels.Set(pod.Labels)) { - continue - } - jobs = append(jobs, job) - } - if len(jobs) == 0 { - err = fmt.Errorf("could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} - -// Typed wrapper around a store of PersistentVolumes -type StoreToPVFetcher struct { - Store -} - -// GetPersistentVolumeInfo returns cached data for the PersistentVolume 'id'. -func (s *StoreToPVFetcher) GetPersistentVolumeInfo(id string) (*api.PersistentVolume, error) { - o, exists, err := s.Get(&api.PersistentVolume{ObjectMeta: api.ObjectMeta{Name: id}}) - - if err != nil { - return nil, fmt.Errorf("error retrieving PersistentVolume '%v' from cache: %v", id, err) - } - - if !exists { - return nil, fmt.Errorf("PersistentVolume '%v' not found", id) - } - - return o.(*api.PersistentVolume), nil -} - -// Typed wrapper around a store of PersistentVolumeClaims -type StoreToPVCFetcher struct { - Store -} - -// GetPersistentVolumeClaimInfo returns cached data for the PersistentVolumeClaim 'id'. -func (s *StoreToPVCFetcher) GetPersistentVolumeClaimInfo(namespace string, id string) (*api.PersistentVolumeClaim, error) { - o, exists, err := s.Get(&api.PersistentVolumeClaim{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: id}}) - if err != nil { - return nil, fmt.Errorf("error retrieving PersistentVolumeClaim '%s/%s' from cache: %v", namespace, id, err) - } - - if !exists { - return nil, fmt.Errorf("PersistentVolumeClaim '%s/%s' not found", namespace, id) - } - - return o.(*api.PersistentVolumeClaim), nil -} - -// StoreToPetSetLister gives a store List and Exists methods. The store must contain only PetSets. -type StoreToPetSetLister struct { - Store -} - -// Exists checks if the given PetSet exists in the store. -func (s *StoreToPetSetLister) Exists(ps *apps.PetSet) (bool, error) { - _, exists, err := s.Store.Get(ps) - if err != nil { - return false, err - } - return exists, nil -} - -// List lists all PetSets in the store. -func (s *StoreToPetSetLister) List() (psList []apps.PetSet, err error) { - for _, ps := range s.Store.List() { - psList = append(psList, *(ps.(*apps.PetSet))) - } - return psList, nil -} - -type storePetSetsNamespacer struct { - store Store - namespace string -} - -func (s *StoreToPetSetLister) PetSets(namespace string) storePetSetsNamespacer { - return storePetSetsNamespacer{s.Store, namespace} -} - -// GetPodPetSets returns a list of PetSets managing a pod. Returns an error only if no matching PetSets are found. -func (s *StoreToPetSetLister) GetPodPetSets(pod *api.Pod) (psList []apps.PetSet, err error) { - var selector labels.Selector - var ps apps.PetSet - - if len(pod.Labels) == 0 { - err = fmt.Errorf("no PetSets found for pod %v because it has no labels", pod.Name) - return - } - - for _, m := range s.Store.List() { - ps = *m.(*apps.PetSet) - if ps.Namespace != pod.Namespace { - continue - } - selector, err = unversioned.LabelSelectorAsSelector(ps.Spec.Selector) - if err != nil { - err = fmt.Errorf("invalid selector: %v", err) - return - } - - // If a PetSet with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { - continue - } - psList = append(psList, ps) - } - if len(psList) == 0 { - err = fmt.Errorf("could not find PetSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) - } - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go b/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go deleted file mode 100644 index 06c2f611b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/cache/listwatch.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// ListFunc knows how to list resources -type ListFunc func(options api.ListOptions) (runtime.Object, error) - -// WatchFunc knows how to watch resources -type WatchFunc func(options api.ListOptions) (watch.Interface, error) - -// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. -// It is a convenience function for users of NewReflector, etc. -// ListFunc and WatchFunc must not be nil -type ListWatch struct { - ListFunc ListFunc - WatchFunc WatchFunc -} - -// Getter interface knows how to access Get method from RESTClient. -type Getter interface { - Get() *restclient.Request -} - -// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. -func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { - listFunc := func(options api.ListOptions) (runtime.Object, error) { - return c.Get(). - Namespace(namespace). - Resource(resource). - VersionedParams(&options, api.ParameterCodec). - FieldsSelectorParam(fieldSelector). - Do(). - Get() - } - watchFunc := func(options api.ListOptions) (watch.Interface, error) { - return c.Get(). - Prefix("watch"). - Namespace(namespace). - Resource(resource). - VersionedParams(&options, api.ParameterCodec). - FieldsSelectorParam(fieldSelector). - Watch() - } - return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} -} - -func timeoutFromListOptions(options api.ListOptions) time.Duration { - if options.TimeoutSeconds != nil { - return time.Duration(*options.TimeoutSeconds) * time.Second - } - return 0 -} - -// List a set of apiserver resources -func (lw *ListWatch) List(options api.ListOptions) (runtime.Object, error) { - return lw.ListFunc(options) -} - -// Watch a set of apiserver resources -func (lw *ListWatch) Watch(options api.ListOptions) (watch.Interface, error) { - return lw.WatchFunc(options) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go deleted file mode 100644 index 8b958a581..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/clientset.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalclientset - -import ( - "github.com/golang/glog" - unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned" - unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" - unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" - unversionedrbac "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned" - restclient "k8s.io/kubernetes/pkg/client/restclient" - discovery "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - Core() unversionedcore.CoreInterface - Extensions() unversionedextensions.ExtensionsInterface - Autoscaling() unversionedautoscaling.AutoscalingInterface - Batch() unversionedbatch.BatchInterface - Rbac() unversionedrbac.RbacInterface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - *unversionedcore.CoreClient - *unversionedextensions.ExtensionsClient - *unversionedautoscaling.AutoscalingClient - *unversionedbatch.BatchClient - *unversionedrbac.RbacClient -} - -// Core retrieves the CoreClient -func (c *Clientset) Core() unversionedcore.CoreInterface { - if c == nil { - return nil - } - return c.CoreClient -} - -// Extensions retrieves the ExtensionsClient -func (c *Clientset) Extensions() unversionedextensions.ExtensionsInterface { - if c == nil { - return nil - } - return c.ExtensionsClient -} - -// Autoscaling retrieves the AutoscalingClient -func (c *Clientset) Autoscaling() unversionedautoscaling.AutoscalingInterface { - if c == nil { - return nil - } - return c.AutoscalingClient -} - -// Batch retrieves the BatchClient -func (c *Clientset) Batch() unversionedbatch.BatchInterface { - if c == nil { - return nil - } - return c.BatchClient -} - -// Rbac retrieves the RbacClient -func (c *Clientset) Rbac() unversionedrbac.RbacInterface { - if c == nil { - return nil - } - return c.RbacClient -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *restclient.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var clientset Clientset - var err error - clientset.CoreClient, err = unversionedcore.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.ExtensionsClient, err = unversionedextensions.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.AutoscalingClient, err = unversionedautoscaling.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.BatchClient, err = unversionedbatch.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - clientset.RbacClient, err = unversionedrbac.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - glog.Errorf("failed to create the DiscoveryClient: %v", err) - return nil, err - } - return &clientset, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *Clientset { - var clientset Clientset - clientset.CoreClient = unversionedcore.NewForConfigOrDie(c) - clientset.ExtensionsClient = unversionedextensions.NewForConfigOrDie(c) - clientset.AutoscalingClient = unversionedautoscaling.NewForConfigOrDie(c) - clientset.BatchClient = unversionedbatch.NewForConfigOrDie(c) - clientset.RbacClient = unversionedrbac.NewForConfigOrDie(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &clientset -} - -// New creates a new Clientset for the given RESTClient. -func New(c *restclient.RESTClient) *Clientset { - var clientset Clientset - clientset.CoreClient = unversionedcore.New(c) - clientset.ExtensionsClient = unversionedextensions.New(c) - clientset.AutoscalingClient = unversionedautoscaling.New(c) - clientset.BatchClient = unversionedbatch.New(c) - clientset.RbacClient = unversionedrbac.New(c) - - clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &clientset -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go deleted file mode 100644 index 3934caa42..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated clientset. -package internalclientset diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go deleted file mode 100644 index 8bdbe2e6c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/import_known_versions.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalclientset - -// These imports are the API groups the client will support. -import ( - "fmt" - - _ "k8s.io/kubernetes/pkg/api/install" - "k8s.io/kubernetes/pkg/apimachinery/registered" - _ "k8s.io/kubernetes/pkg/apis/apps/install" - _ "k8s.io/kubernetes/pkg/apis/authorization/install" - _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" - _ "k8s.io/kubernetes/pkg/apis/batch/install" - _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" - _ "k8s.io/kubernetes/pkg/apis/extensions/install" - _ "k8s.io/kubernetes/pkg/apis/policy/install" - _ "k8s.io/kubernetes/pkg/apis/rbac/install" -) - -func init() { - if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { - panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go deleted file mode 100644 index 752b5d554..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/autoscaling_client.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type AutoscalingInterface interface { - GetRESTClient() *restclient.RESTClient - HorizontalPodAutoscalersGetter -} - -// AutoscalingClient is used to interact with features provided by the Autoscaling group. -type AutoscalingClient struct { - *restclient.RESTClient -} - -func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingClient for the given config. -func NewForConfig(c *restclient.Config) (*AutoscalingClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingClient{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *AutoscalingClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingClient for the given RESTClient. -func New(c *restclient.RESTClient) *AutoscalingClient { - return &AutoscalingClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if autoscaling group is not registered, return an error - g, err := registered.Group("autoscaling") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go deleted file mode 100644 index 47517b642..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go deleted file mode 100644 index 39324902a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go deleted file mode 100644 index ae185ad7f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/unversioned/horizontalpodautoscaler.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling" - watch "k8s.io/kubernetes/pkg/watch" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Update(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - UpdateStatus(*autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) - List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client *AutoscalingClient - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { - result = &autoscaling.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go deleted file mode 100644 index 83d9d749c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/batch_client.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type BatchInterface interface { - GetRESTClient() *restclient.RESTClient - JobsGetter - ScheduledJobsGetter -} - -// BatchClient is used to interact with features provided by the Batch group. -type BatchClient struct { - *restclient.RESTClient -} - -func (c *BatchClient) Jobs(namespace string) JobInterface { - return newJobs(c, namespace) -} - -func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface { - return newScheduledJobs(c, namespace) -} - -// NewForConfig creates a new BatchClient for the given config. -func NewForConfig(c *restclient.Config) (*BatchClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchClient{client}, nil -} - -// NewForConfigOrDie creates a new BatchClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *BatchClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new BatchClient for the given RESTClient. -func New(c *restclient.RESTClient) *BatchClient { - return &BatchClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if batch group is not registered, return an error - g, err := registered.Group("batch") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *BatchClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go deleted file mode 100644 index 47517b642..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go deleted file mode 100644 index f876ef63f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type JobExpansion interface{} - -type ScheduledJobExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go deleted file mode 100644 index 680c50654..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/job.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - batch "k8s.io/kubernetes/pkg/apis/batch" - watch "k8s.io/kubernetes/pkg/watch" -) - -// JobsGetter has a method to return a JobInterface. -// A group's client should implement this interface. -type JobsGetter interface { - Jobs(namespace string) JobInterface -} - -// JobInterface has methods to work with Job resources. -type JobInterface interface { - Create(*batch.Job) (*batch.Job, error) - Update(*batch.Job) (*batch.Job, error) - UpdateStatus(*batch.Job) (*batch.Job, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*batch.Job, error) - List(opts api.ListOptions) (*batch.JobList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - JobExpansion -} - -// jobs implements JobInterface -type jobs struct { - client *BatchClient - ns string -} - -// newJobs returns a Jobs -func newJobs(c *BatchClient, namespace string) *jobs { - return &jobs{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - Body(job). - Do(). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - Body(job). - Do(). - Into(result) - return -} - -func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - Body(job). - Do(). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { - result = &batch.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go deleted file mode 100644 index 2675d11c4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned/scheduledjob.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - batch "k8s.io/kubernetes/pkg/apis/batch" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ScheduledJobsGetter has a method to return a ScheduledJobInterface. -// A group's client should implement this interface. -type ScheduledJobsGetter interface { - ScheduledJobs(namespace string) ScheduledJobInterface -} - -// ScheduledJobInterface has methods to work with ScheduledJob resources. -type ScheduledJobInterface interface { - Create(*batch.ScheduledJob) (*batch.ScheduledJob, error) - Update(*batch.ScheduledJob) (*batch.ScheduledJob, error) - UpdateStatus(*batch.ScheduledJob) (*batch.ScheduledJob, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*batch.ScheduledJob, error) - List(opts api.ListOptions) (*batch.ScheduledJobList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ScheduledJobExpansion -} - -// scheduledJobs implements ScheduledJobInterface -type scheduledJobs struct { - client *BatchClient - ns string -} - -// newScheduledJobs returns a ScheduledJobs -func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs { - return &scheduledJobs{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a scheduledJob and creates it. Returns the server's representation of the scheduledJob, and an error, if there is any. -func (c *scheduledJobs) Create(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("scheduledjobs"). - Body(scheduledJob). - Do(). - Into(result) - return -} - -// Update takes the representation of a scheduledJob and updates it. Returns the server's representation of the scheduledJob, and an error, if there is any. -func (c *scheduledJobs) Update(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("scheduledjobs"). - Name(scheduledJob.Name). - Body(scheduledJob). - Do(). - Into(result) - return -} - -func (c *scheduledJobs) UpdateStatus(scheduledJob *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("scheduledjobs"). - Name(scheduledJob.Name). - SubResource("status"). - Body(scheduledJob). - Do(). - Into(result) - return -} - -// Delete takes name of the scheduledJob and deletes it. Returns an error if one occurs. -func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("scheduledjobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *scheduledJobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("scheduledjobs"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the scheduledJob, and returns the corresponding scheduledJob object, and an error if there is any. -func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("scheduledjobs"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ScheduledJobs that match those selectors. -func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { - result = &batch.ScheduledJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("scheduledjobs"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested scheduledJobs. -func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("scheduledjobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go deleted file mode 100644 index 0ef0667da..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/componentstatus.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ComponentStatusesGetter has a method to return a ComponentStatusInterface. -// A group's client should implement this interface. -type ComponentStatusesGetter interface { - ComponentStatuses() ComponentStatusInterface -} - -// ComponentStatusInterface has methods to work with ComponentStatus resources. -type ComponentStatusInterface interface { - Create(*api.ComponentStatus) (*api.ComponentStatus, error) - Update(*api.ComponentStatus) (*api.ComponentStatus, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ComponentStatus, error) - List(opts api.ListOptions) (*api.ComponentStatusList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ComponentStatusExpansion -} - -// componentStatuses implements ComponentStatusInterface -type componentStatuses struct { - client *CoreClient -} - -// newComponentStatuses returns a ComponentStatuses -func newComponentStatuses(c *CoreClient) *componentStatuses { - return &componentStatuses{ - client: c, - } -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(componentStatus *api.ComponentStatus) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { - result = &api.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("componentstatuses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go deleted file mode 100644 index b43e53d6c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/configmap.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ConfigMapsGetter has a method to return a ConfigMapInterface. -// A group's client should implement this interface. -type ConfigMapsGetter interface { - ConfigMaps(namespace string) ConfigMapInterface -} - -// ConfigMapInterface has methods to work with ConfigMap resources. -type ConfigMapInterface interface { - Create(*api.ConfigMap) (*api.ConfigMap, error) - Update(*api.ConfigMap) (*api.ConfigMap, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ConfigMap, error) - List(opts api.ListOptions) (*api.ConfigMapList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ConfigMapExpansion -} - -// configMaps implements ConfigMapInterface -type configMaps struct { - client *CoreClient - ns string -} - -// newConfigMaps returns a ConfigMaps -func newConfigMaps(c *CoreClient, namespace string) *configMaps { - return &configMaps{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - Body(configMap). - Do(). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *api.ConfigMap) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - Body(configMap). - Do(). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string) (result *api.ConfigMap, err error) { - result = &api.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts api.ListOptions) (result *api.ConfigMapList, err error) { - result = &api.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go deleted file mode 100644 index 41aee4cf6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/core_client.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type CoreInterface interface { - GetRESTClient() *restclient.RESTClient - ComponentStatusesGetter - ConfigMapsGetter - EndpointsGetter - EventsGetter - LimitRangesGetter - NamespacesGetter - NodesGetter - PersistentVolumesGetter - PersistentVolumeClaimsGetter - PodsGetter - PodTemplatesGetter - ReplicationControllersGetter - ResourceQuotasGetter - SecretsGetter - ServicesGetter - ServiceAccountsGetter -} - -// CoreClient is used to interact with features provided by the Core group. -type CoreClient struct { - *restclient.RESTClient -} - -func (c *CoreClient) ComponentStatuses() ComponentStatusInterface { - return newComponentStatuses(c) -} - -func (c *CoreClient) ConfigMaps(namespace string) ConfigMapInterface { - return newConfigMaps(c, namespace) -} - -func (c *CoreClient) Endpoints(namespace string) EndpointsInterface { - return newEndpoints(c, namespace) -} - -func (c *CoreClient) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *CoreClient) LimitRanges(namespace string) LimitRangeInterface { - return newLimitRanges(c, namespace) -} - -func (c *CoreClient) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *CoreClient) Nodes() NodeInterface { - return newNodes(c) -} - -func (c *CoreClient) PersistentVolumes() PersistentVolumeInterface { - return newPersistentVolumes(c) -} - -func (c *CoreClient) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { - return newPersistentVolumeClaims(c, namespace) -} - -func (c *CoreClient) Pods(namespace string) PodInterface { - return newPods(c, namespace) -} - -func (c *CoreClient) PodTemplates(namespace string) PodTemplateInterface { - return newPodTemplates(c, namespace) -} - -func (c *CoreClient) ReplicationControllers(namespace string) ReplicationControllerInterface { - return newReplicationControllers(c, namespace) -} - -func (c *CoreClient) ResourceQuotas(namespace string) ResourceQuotaInterface { - return newResourceQuotas(c, namespace) -} - -func (c *CoreClient) Secrets(namespace string) SecretInterface { - return newSecrets(c, namespace) -} - -func (c *CoreClient) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -func (c *CoreClient) ServiceAccounts(namespace string) ServiceAccountInterface { - return newServiceAccounts(c, namespace) -} - -// NewForConfig creates a new CoreClient for the given config. -func NewForConfig(c *restclient.Config) (*CoreClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreClient{client}, nil -} - -// NewForConfigOrDie creates a new CoreClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *CoreClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreClient for the given RESTClient. -func New(c *restclient.RESTClient) *CoreClient { - return &CoreClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if core group is not registered, return an error - g, err := registered.Group("") - if err != nil { - return err - } - config.APIPath = "/api" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go deleted file mode 100644 index 47517b642..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go deleted file mode 100644 index 78e2a0878..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/endpoints.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// EndpointsGetter has a method to return a EndpointsInterface. -// A group's client should implement this interface. -type EndpointsGetter interface { - Endpoints(namespace string) EndpointsInterface -} - -// EndpointsInterface has methods to work with Endpoints resources. -type EndpointsInterface interface { - Create(*api.Endpoints) (*api.Endpoints, error) - Update(*api.Endpoints) (*api.Endpoints, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Endpoints, error) - List(opts api.ListOptions) (*api.EndpointsList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - EndpointsExpansion -} - -// endpoints implements EndpointsInterface -type endpoints struct { - client *CoreClient - ns string -} - -// newEndpoints returns a Endpoints -func newEndpoints(c *CoreClient, namespace string) *endpoints { - return &endpoints{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - Body(endpoints). - Do(). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(endpoints *api.Endpoints) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - Body(endpoints). - Do(). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { - result = &api.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go deleted file mode 100644 index 5627690a6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*api.Event) (*api.Event, error) - Update(*api.Event) (*api.Event, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Event, error) - List(opts api.ListOptions) (*api.EventList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client *CoreClient - ns string -} - -// newEvents returns a Events -func newEvents(c *CoreClient, namespace string) *events { - return &events{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *api.Event) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string) (result *api.Event, err error) { - result = &api.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts api.ListOptions) (result *api.EventList, err error) { - result = &api.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go deleted file mode 100644 index abdf89aa1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/event_expansion.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" -) - -// The EventExpansion interface allows manually adding extra methods to the EventInterface. -type EventExpansion interface { - // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. - CreateWithEventNamespace(event *api.Event) (*api.Event, error) - // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. - UpdateWithEventNamespace(event *api.Event) (*api.Event, error) - Patch(event *api.Event, data []byte) (*api.Event, error) - // Search finds events about the specified object - Search(objOrRef runtime.Object) (*api.EventList, error) - // Returns the appropriate field selector based on the API version being used to communicate with the server. - // The returned field selector can be used with List and Watch to filter desired events. - GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector -} - -// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, -// or an error. The namespace to create the event within is deduced from the -// event; it must either match this event client's namespace, or this event -// client must have been created with the "" namespace. -func (e *events) CreateWithEventNamespace(event *api.Event) (*api.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &api.Event{} - err := e.client.Post(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Body(event). - Do(). - Into(result) - return result, err -} - -// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, -// or an error. The namespace and key to update the event within is deduced from the event. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. Update also requires the ResourceVersion to be set in the event -// object. -func (e *events) UpdateWithEventNamespace(event *api.Event) (*api.Event, error) { - result := &api.Event{} - err := e.client.Put(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return result, err -} - -// Patch modifies an existing event. It returns the copy of the event that the server returns, or an -// error. The namespace and name of the target event is deduced from the incompleteEvent. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. -func (e *events) Patch(incompleteEvent *api.Event, data []byte) (*api.Event, error) { - result := &api.Event{} - err := e.client.Patch(api.StrategicMergePatchType). - NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). - Resource("events"). - Name(incompleteEvent.Name). - Body(data). - Do(). - Into(result) - return result, err -} - -// Search finds events about the specified object. The namespace of the -// object must match this event's client namespace unless the event client -// was made with the "" namespace. -func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { - ref, err := api.GetReference(objOrRef) - if err != nil { - return nil, err - } - if e.ns != "" && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) - } - stringRefKind := string(ref.Kind) - var refKind *string - if stringRefKind != "" { - refKind = &stringRefKind - } - stringRefUID := string(ref.UID) - var refUID *string - if stringRefUID != "" { - refUID = &stringRefUID - } - fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(api.ListOptions{FieldSelector: fieldSelector}) -} - -// Returns the appropriate field selector based on the API version being used to communicate with the server. -// The returned field selector can be used with List and Watch to filter desired events. -func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() - field := fields.Set{} - if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName - } - if involvedObjectNamespace != nil { - field["involvedObject.namespace"] = *involvedObjectNamespace - } - if involvedObjectKind != nil { - field["involvedObject.kind"] = *involvedObjectKind - } - if involvedObjectUID != nil { - field["involvedObject.uid"] = *involvedObjectUID - } - return field.AsSelector() -} - -// Returns the appropriate field label to use for name of the involved object as per the given API version. -func GetInvolvedObjectNameFieldLabel(version string) string { - return "involvedObject.name" -} - -// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. -type EventSinkImpl struct { - Interface EventInterface -} - -func (e *EventSinkImpl) Create(event *api.Event) (*api.Event, error) { - return e.Interface.CreateWithEventNamespace(event) -} - -func (e *EventSinkImpl) Update(event *api.Event) (*api.Event, error) { - return e.Interface.UpdateWithEventNamespace(event) -} - -func (e *EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) { - return e.Interface.Patch(event, data) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go deleted file mode 100644 index 546f8e7a1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/generated_expansion.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type ComponentStatusExpansion interface{} - -type EndpointsExpansion interface{} - -type LimitRangeExpansion interface{} - -type PersistentVolumeExpansion interface{} - -type PersistentVolumeClaimExpansion interface{} - -type PodTemplateExpansion interface{} - -type ReplicationControllerExpansion interface{} - -type ResourceQuotaExpansion interface{} - -type SecretExpansion interface{} - -type ServiceAccountExpansion interface{} - -type ConfigMapExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go deleted file mode 100644 index 86cc9b07f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/limitrange.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// LimitRangesGetter has a method to return a LimitRangeInterface. -// A group's client should implement this interface. -type LimitRangesGetter interface { - LimitRanges(namespace string) LimitRangeInterface -} - -// LimitRangeInterface has methods to work with LimitRange resources. -type LimitRangeInterface interface { - Create(*api.LimitRange) (*api.LimitRange, error) - Update(*api.LimitRange) (*api.LimitRange, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.LimitRange, error) - List(opts api.ListOptions) (*api.LimitRangeList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - LimitRangeExpansion -} - -// limitRanges implements LimitRangeInterface -type limitRanges struct { - client *CoreClient - ns string -} - -// newLimitRanges returns a LimitRanges -func newLimitRanges(c *CoreClient, namespace string) *limitRanges { - return &limitRanges{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - Body(limitRange). - Do(). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - Body(limitRange). - Do(). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { - result = &api.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go deleted file mode 100644 index c1c8b4506..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/namespace.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// NamespacesGetter has a method to return a NamespaceInterface. -// A group's client should implement this interface. -type NamespacesGetter interface { - Namespaces() NamespaceInterface -} - -// NamespaceInterface has methods to work with Namespace resources. -type NamespaceInterface interface { - Create(*api.Namespace) (*api.Namespace, error) - Update(*api.Namespace) (*api.Namespace, error) - UpdateStatus(*api.Namespace) (*api.Namespace, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Namespace, error) - List(opts api.ListOptions) (*api.NamespaceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - NamespaceExpansion -} - -// namespaces implements NamespaceInterface -type namespaces struct { - client *CoreClient -} - -// newNamespaces returns a Namespaces -func newNamespaces(c *CoreClient) *namespaces { - return &namespaces{ - client: c, - } -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -func (c *namespaces) UpdateStatus(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *namespaces) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("namespaces"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts api.ListOptions) (result *api.NamespaceList, err error) { - result = &api.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go deleted file mode 100644 index b0c53ef1d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/node.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// NodesGetter has a method to return a NodeInterface. -// A group's client should implement this interface. -type NodesGetter interface { - Nodes() NodeInterface -} - -// NodeInterface has methods to work with Node resources. -type NodeInterface interface { - Create(*api.Node) (*api.Node, error) - Update(*api.Node) (*api.Node, error) - UpdateStatus(*api.Node) (*api.Node, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Node, error) - List(opts api.ListOptions) (*api.NodeList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - NodeExpansion -} - -// nodes implements NodeInterface -type nodes struct { - client *CoreClient -} - -// newNodes returns a Nodes -func newNodes(c *CoreClient) *nodes { - return &nodes{ - client: c, - } -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Post(). - Resource("nodes"). - Body(node). - Do(). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - Body(node). - Do(). - Into(result) - return -} - -func (c *nodes) UpdateStatus(node *api.Node) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - Body(node). - Do(). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string) (result *api.Node, err error) { - result = &api.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts api.ListOptions) (result *api.NodeList, err error) { - result = &api.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("nodes"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go deleted file mode 100644 index 6b4d0f017..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolume.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. -// A group's client should implement this interface. -type PersistentVolumesGetter interface { - PersistentVolumes() PersistentVolumeInterface -} - -// PersistentVolumeInterface has methods to work with PersistentVolume resources. -type PersistentVolumeInterface interface { - Create(*api.PersistentVolume) (*api.PersistentVolume, error) - Update(*api.PersistentVolume) (*api.PersistentVolume, error) - UpdateStatus(*api.PersistentVolume) (*api.PersistentVolume, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.PersistentVolume, error) - List(opts api.ListOptions) (*api.PersistentVolumeList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - PersistentVolumeExpansion -} - -// persistentVolumes implements PersistentVolumeInterface -type persistentVolumes struct { - client *CoreClient -} - -// newPersistentVolumes returns a PersistentVolumes -func newPersistentVolumes(c *CoreClient) *persistentVolumes { - return &persistentVolumes{ - client: c, - } -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - Body(persistentVolume). - Do(). - Into(result) - return -} - -func (c *persistentVolumes) UpdateStatus(persistentVolume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { - result = &api.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("persistentvolumes"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go deleted file mode 100644 index 2f5b17437..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/persistentvolumeclaim.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. -// A group's client should implement this interface. -type PersistentVolumeClaimsGetter interface { - PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface -} - -// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. -type PersistentVolumeClaimInterface interface { - Create(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Update(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - UpdateStatus(*api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.PersistentVolumeClaim, error) - List(opts api.ListOptions) (*api.PersistentVolumeClaimList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - PersistentVolumeClaimExpansion -} - -// persistentVolumeClaims implements PersistentVolumeClaimInterface -type persistentVolumeClaims struct { - client *CoreClient - ns string -} - -// newPersistentVolumeClaims returns a PersistentVolumeClaims -func newPersistentVolumeClaims(c *CoreClient, namespace string) *persistentVolumeClaims { - return &persistentVolumeClaims{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { - result = &api.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go deleted file mode 100644 index 1cdfc8e71..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodsGetter has a method to return a PodInterface. -// A group's client should implement this interface. -type PodsGetter interface { - Pods(namespace string) PodInterface -} - -// PodInterface has methods to work with Pod resources. -type PodInterface interface { - Create(*api.Pod) (*api.Pod, error) - Update(*api.Pod) (*api.Pod, error) - UpdateStatus(*api.Pod) (*api.Pod, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Pod, error) - List(opts api.ListOptions) (*api.PodList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - PodExpansion -} - -// pods implements PodInterface -type pods struct { - client *CoreClient - ns string -} - -// newPods returns a Pods -func newPods(c *CoreClient, namespace string) *pods { - return &pods{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - Body(pod). - Do(). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - Body(pod). - Do(). - Into(result) - return -} - -func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - Body(pod). - Do(). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts api.ListOptions) (result *api.PodList, err error) { - result = &api.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go deleted file mode 100644 index 8ebd29d30..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/pod_expansion.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" -) - -// The PodExpansion interface allows manually adding extra methods to the PodInterface. -type PodExpansion interface { - Bind(binding *api.Binding) error - GetLogs(name string, opts *api.PodLogOptions) *restclient.Request -} - -// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). -func (c *pods) Bind(binding *api.Binding) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() -} - -// Get constructs a request for getting the logs for a pod -func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go deleted file mode 100644 index cccef29f7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/podtemplate.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodTemplatesGetter has a method to return a PodTemplateInterface. -// A group's client should implement this interface. -type PodTemplatesGetter interface { - PodTemplates(namespace string) PodTemplateInterface -} - -// PodTemplateInterface has methods to work with PodTemplate resources. -type PodTemplateInterface interface { - Create(*api.PodTemplate) (*api.PodTemplate, error) - Update(*api.PodTemplate) (*api.PodTemplate, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.PodTemplate, error) - List(opts api.ListOptions) (*api.PodTemplateList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - PodTemplateExpansion -} - -// podTemplates implements PodTemplateInterface -type podTemplates struct { - client *CoreClient - ns string -} - -// newPodTemplates returns a PodTemplates -func newPodTemplates(c *CoreClient, namespace string) *podTemplates { - return &podTemplates{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { - result = &api.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go deleted file mode 100644 index 6f9f06625..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/replicationcontroller.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. -// A group's client should implement this interface. -type ReplicationControllersGetter interface { - ReplicationControllers(namespace string) ReplicationControllerInterface -} - -// ReplicationControllerInterface has methods to work with ReplicationController resources. -type ReplicationControllerInterface interface { - Create(*api.ReplicationController) (*api.ReplicationController, error) - Update(*api.ReplicationController) (*api.ReplicationController, error) - UpdateStatus(*api.ReplicationController) (*api.ReplicationController, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ReplicationController, error) - List(opts api.ListOptions) (*api.ReplicationControllerList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ReplicationControllerExpansion -} - -// replicationControllers implements ReplicationControllerInterface -type replicationControllers struct { - client *CoreClient - ns string -} - -// newReplicationControllers returns a ReplicationControllers -func newReplicationControllers(c *CoreClient, namespace string) *replicationControllers { - return &replicationControllers{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - Body(replicationController). - Do(). - Into(result) - return -} - -func (c *replicationControllers) UpdateStatus(replicationController *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(name string) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { - result = &api.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go deleted file mode 100644 index 2d0da73fb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/resourcequota.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. -// A group's client should implement this interface. -type ResourceQuotasGetter interface { - ResourceQuotas(namespace string) ResourceQuotaInterface -} - -// ResourceQuotaInterface has methods to work with ResourceQuota resources. -type ResourceQuotaInterface interface { - Create(*api.ResourceQuota) (*api.ResourceQuota, error) - Update(*api.ResourceQuota) (*api.ResourceQuota, error) - UpdateStatus(*api.ResourceQuota) (*api.ResourceQuota, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ResourceQuota, error) - List(opts api.ListOptions) (*api.ResourceQuotaList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ResourceQuotaExpansion -} - -// resourceQuotas implements ResourceQuotaInterface -type resourceQuotas struct { - client *CoreClient - ns string -} - -// newResourceQuotas returns a ResourceQuotas -func newResourceQuotas(c *CoreClient, namespace string) *resourceQuotas { - return &resourceQuotas{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - Body(resourceQuota). - Do(). - Into(result) - return -} - -func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { - result = &api.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go deleted file mode 100644 index 101fbdb54..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/secret.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// SecretsGetter has a method to return a SecretInterface. -// A group's client should implement this interface. -type SecretsGetter interface { - Secrets(namespace string) SecretInterface -} - -// SecretInterface has methods to work with Secret resources. -type SecretInterface interface { - Create(*api.Secret) (*api.Secret, error) - Update(*api.Secret) (*api.Secret, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Secret, error) - List(opts api.ListOptions) (*api.SecretList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - SecretExpansion -} - -// secrets implements SecretInterface -type secrets struct { - client *CoreClient - ns string -} - -// newSecrets returns a Secrets -func newSecrets(c *CoreClient, namespace string) *secrets { - return &secrets{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string) (result *api.Secret, err error) { - result = &api.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts api.ListOptions) (result *api.SecretList, err error) { - result = &api.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go deleted file mode 100644 index 006f601c2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/service.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*api.Service) (*api.Service, error) - Update(*api.Service) (*api.Service, error) - UpdateStatus(*api.Service) (*api.Service, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.Service, error) - List(opts api.ListOptions) (*api.ServiceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client *CoreClient - ns string -} - -// newServices returns a Services -func newServices(c *CoreClient, namespace string) *services { - return &services{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *services) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string) (result *api.Service, err error) { - result = &api.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { - result = &api.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go deleted file mode 100644 index 65f7df263..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned/serviceaccount.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ServiceAccountsGetter has a method to return a ServiceAccountInterface. -// A group's client should implement this interface. -type ServiceAccountsGetter interface { - ServiceAccounts(namespace string) ServiceAccountInterface -} - -// ServiceAccountInterface has methods to work with ServiceAccount resources. -type ServiceAccountInterface interface { - Create(*api.ServiceAccount) (*api.ServiceAccount, error) - Update(*api.ServiceAccount) (*api.ServiceAccount, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*api.ServiceAccount, error) - List(opts api.ListOptions) (*api.ServiceAccountList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ServiceAccountExpansion -} - -// serviceAccounts implements ServiceAccountInterface -type serviceAccounts struct { - client *CoreClient - ns string -} - -// newServiceAccounts returns a ServiceAccounts -func newServiceAccounts(c *CoreClient, namespace string) *serviceAccounts { - return &serviceAccounts{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts api.ListOptions) (result *api.ServiceAccountList, err error) { - result = &api.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go deleted file mode 100644 index 96dae5835..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/daemonset.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - watch "k8s.io/kubernetes/pkg/watch" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Update(*extensions.DaemonSet) (*extensions.DaemonSet, error) - UpdateStatus(*extensions.DaemonSet) (*extensions.DaemonSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.DaemonSet, error) - List(opts api.ListOptions) (*extensions.DaemonSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client *ExtensionsClient - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { - return &daemonSets{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -func (c *daemonSets) UpdateStatus(daemonSet *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { - result = &extensions.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go deleted file mode 100644 index 3b995c021..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/deployment.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - watch "k8s.io/kubernetes/pkg/watch" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*extensions.Deployment) (*extensions.Deployment, error) - Update(*extensions.Deployment) (*extensions.Deployment, error) - UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Deployment, error) - List(opts api.ListOptions) (*extensions.DeploymentList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client *ExtensionsClient - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsClient, namespace string) *deployments { - return &deployments{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { - result = &extensions.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go deleted file mode 100644 index 47517b642..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go deleted file mode 100644 index 9b9f4749a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/extensions_client.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type ExtensionsInterface interface { - GetRESTClient() *restclient.RESTClient - DaemonSetsGetter - DeploymentsGetter - IngressesGetter - PodSecurityPoliciesGetter - ReplicaSetsGetter - ScalesGetter - ThirdPartyResourcesGetter -} - -// ExtensionsClient is used to interact with features provided by the Extensions group. -type ExtensionsClient struct { - *restclient.RESTClient -} - -func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsClient) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - -func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - -// NewForConfig creates a new ExtensionsClient for the given config. -func NewForConfig(c *restclient.Config) (*ExtensionsClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsClient{client}, nil -} - -// NewForConfigOrDie creates a new ExtensionsClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *ExtensionsClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExtensionsClient for the given RESTClient. -func New(c *restclient.RESTClient) *ExtensionsClient { - return &ExtensionsClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if extensions group is not registered, return an error - g, err := registered.Group("extensions") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExtensionsClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go deleted file mode 100644 index 7a1999454..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/generated_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type DaemonSetExpansion interface{} - -type HorizontalPodAutoscalerExpansion interface{} - -type IngressExpansion interface{} - -type JobExpansion interface{} - -type PodSecurityPolicyExpansion interface{} - -type ThirdPartyResourceExpansion interface{} - -type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go deleted file mode 100644 index a9d950eae..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/ingress.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - watch "k8s.io/kubernetes/pkg/watch" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*extensions.Ingress) (*extensions.Ingress, error) - Update(*extensions.Ingress) (*extensions.Ingress, error) - UpdateStatus(*extensions.Ingress) (*extensions.Ingress, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.Ingress, error) - List(opts api.ListOptions) (*extensions.IngressList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client *ExtensionsClient - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *ExtensionsClient, namespace string) *ingresses { - return &ingresses{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -func (c *ingresses) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts api.ListOptions) (result *extensions.IngressList, err error) { - result = &extensions.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go deleted file mode 100644 index 4ae3f6cac..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/job.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - batch "k8s.io/kubernetes/pkg/apis/batch" - watch "k8s.io/kubernetes/pkg/watch" -) - -// JobsGetter has a method to return a JobInterface. -// A group's client should implement this interface. -type JobsGetter interface { - Jobs(namespace string) JobInterface -} - -// JobInterface has methods to work with Job resources. -type JobInterface interface { - Create(*batch.Job) (*batch.Job, error) - Update(*batch.Job) (*batch.Job, error) - UpdateStatus(*batch.Job) (*batch.Job, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*batch.Job, error) - List(opts api.ListOptions) (*batch.JobList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - JobExpansion -} - -// jobs implements JobInterface -type jobs struct { - client *ExtensionsClient - ns string -} - -// newJobs returns a Jobs -func newJobs(c *ExtensionsClient, namespace string) *jobs { - return &jobs{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - Body(job). - Do(). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - Body(job). - Do(). - Into(result) - return -} - -func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - Body(job). - Do(). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { - result = &batch.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go deleted file mode 100644 index 06a7908f4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/podsecuritypolicy.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - watch "k8s.io/kubernetes/pkg/watch" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) - Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.PodSecurityPolicy, error) - List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client *ExtensionsClient -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsClient) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c, - } -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts api.ListOptions) (result *extensions.PodSecurityPolicyList, err error) { - result = &extensions.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("podsecuritypolicies"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go deleted file mode 100644 index 6257fd898..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/replicaset.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Update(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateStatus(*extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.ReplicaSet, error) - List(opts api.ListOptions) (*extensions.ReplicaSetList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client *ExtensionsClient - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { - return &replicaSets{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -func (c *replicaSets) UpdateStatus(replicaSet *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { - result = &extensions.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go deleted file mode 100644 index 7e54bc347..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client *ExtensionsClient - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsClient, namespace string) *scales { - return &scales{ - client: c, - ns: namespace, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go deleted file mode 100644 index 61a77f260..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*extensions.Scale, error) - Update(kind string, scale *extensions.Scale) (*extensions.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go deleted file mode 100644 index a64ffb62c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned/thirdpartyresource.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - extensions "k8s.io/kubernetes/pkg/apis/extensions" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Update(*extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*extensions.ThirdPartyResource, error) - List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client *ExtensionsClient -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { - return &thirdPartyResources{ - client: c, - } -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { - result = &extensions.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("thirdpartyresources"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go deleted file mode 100644 index 5d0b3912a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrole.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ClusterRolesGetter has a method to return a ClusterRoleInterface. -// A group's client should implement this interface. -type ClusterRolesGetter interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - Create(*rbac.ClusterRole) (*rbac.ClusterRole, error) - Update(*rbac.ClusterRole) (*rbac.ClusterRole, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.ClusterRole, error) - List(opts api.ListOptions) (*rbac.ClusterRoleList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ClusterRoleExpansion -} - -// clusterRoles implements ClusterRoleInterface -type clusterRoles struct { - client *RbacClient -} - -// newClusterRoles returns a ClusterRoles -func newClusterRoles(c *RbacClient) *clusterRoles { - return &clusterRoles{ - client: c, - } -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { - result = &rbac.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusterroles"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go deleted file mode 100644 index f2102592a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/clusterrolebinding.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - watch "k8s.io/kubernetes/pkg/watch" -) - -// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. -// A group's client should implement this interface. -type ClusterRoleBindingsGetter interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - Create(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Update(*rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.ClusterRoleBinding, error) - List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - ClusterRoleBindingExpansion -} - -// clusterRoleBindings implements ClusterRoleBindingInterface -type clusterRoleBindings struct { - client *RbacClient -} - -// newClusterRoleBindings returns a ClusterRoleBindings -func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c, - } -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { - result = &rbac.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusterrolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go deleted file mode 100644 index 47517b642..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This package is generated by client-gen with the default arguments. - -// This package has the automatically generated typed clients. -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go deleted file mode 100644 index a3b9c689d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -type ClusterRoleExpansion interface{} - -type ClusterRoleBindingExpansion interface{} - -type RoleExpansion interface{} - -type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go deleted file mode 100644 index 4d67337cd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rbac_client.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - registered "k8s.io/kubernetes/pkg/apimachinery/registered" - restclient "k8s.io/kubernetes/pkg/client/restclient" -) - -type RbacInterface interface { - GetRESTClient() *restclient.RESTClient - ClusterRolesGetter - ClusterRoleBindingsGetter - RolesGetter - RoleBindingsGetter -} - -// RbacClient is used to interact with features provided by the Rbac group. -type RbacClient struct { - *restclient.RESTClient -} - -func (c *RbacClient) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacClient) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -// NewForConfig creates a new RbacClient for the given config. -func NewForConfig(c *restclient.Config) (*RbacClient, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacClient{client}, nil -} - -// NewForConfigOrDie creates a new RbacClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *restclient.Config) *RbacClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new RbacClient for the given RESTClient. -func New(c *restclient.RESTClient) *RbacClient { - return &RbacClient{c} -} - -func setConfigDefaults(config *restclient.Config) error { - // if rbac group is not registered, return an error - g, err := registered.Group("rbac.authorization.k8s.io") - if err != nil { - return err - } - config.APIPath = "/apis" - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.NegotiatedSerializer = api.Codecs - - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// GetRESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *RbacClient) GetRESTClient() *restclient.RESTClient { - if c == nil { - return nil - } - return c.RESTClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go deleted file mode 100644 index 68e7ebe93..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/role.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - watch "k8s.io/kubernetes/pkg/watch" -) - -// RolesGetter has a method to return a RoleInterface. -// A group's client should implement this interface. -type RolesGetter interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - Create(*rbac.Role) (*rbac.Role, error) - Update(*rbac.Role) (*rbac.Role, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.Role, error) - List(opts api.ListOptions) (*rbac.RoleList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - RoleExpansion -} - -// roles implements RoleInterface -type roles struct { - client *RbacClient - ns string -} - -// newRoles returns a Roles -func newRoles(c *RbacClient, namespace string) *roles { - return &roles{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - Body(role). - Do(). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - Body(role). - Do(). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { - result = &rbac.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go b/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go deleted file mode 100644 index c73318c97..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/unversioned/rolebinding.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - api "k8s.io/kubernetes/pkg/api" - rbac "k8s.io/kubernetes/pkg/apis/rbac" - watch "k8s.io/kubernetes/pkg/watch" -) - -// RoleBindingsGetter has a method to return a RoleBindingInterface. -// A group's client should implement this interface. -type RoleBindingsGetter interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - Create(*rbac.RoleBinding) (*rbac.RoleBinding, error) - Update(*rbac.RoleBinding) (*rbac.RoleBinding, error) - Delete(name string, options *api.DeleteOptions) error - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - Get(name string) (*rbac.RoleBinding, error) - List(opts api.ListOptions) (*rbac.RoleBindingList, error) - Watch(opts api.ListOptions) (watch.Interface, error) - RoleBindingExpansion -} - -// roleBindings implements RoleBindingInterface -type roleBindings struct { - client *RbacClient - ns string -} - -// newRoleBindings returns a RoleBindings -func newRoleBindings(c *RbacClient, namespace string) *roleBindings { - return &roleBindings{ - client: c, - ns: namespace, - } -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { - result = &rbac.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go deleted file mode 100644 index efa66fc8b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/metrics/metrics.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package metrics provides utilities for registering client metrics to Prometheus. -package metrics - -import ( - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -const restClientSubsystem = "rest_client" - -var ( - // RequestLatency is a Prometheus Summary metric type partitioned by - // "verb" and "url" labels. It is used for the rest client latency metrics. - RequestLatency = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Subsystem: restClientSubsystem, - Name: "request_latency_microseconds", - Help: "Request latency in microseconds. Broken down by verb and URL", - MaxAge: time.Hour, - }, - []string{"verb", "url"}, - ) - - RequestResult = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: restClientSubsystem, - Name: "request_status_codes", - Help: "Number of http requests, partitioned by metadata", - }, - []string{"code", "method", "host"}, - ) -) - -var registerMetrics sync.Once - -// Register registers all metrics to Prometheus with -// respect to the RequestLatency. -func Register() { - // Register the metrics. - registerMetrics.Do(func() { - prometheus.MustRegister(RequestLatency) - prometheus.MustRegister(RequestResult) - }) -} - -// Calculates the time since the specified start in microseconds. -func SinceInMicroseconds(start time.Time) float64 { - return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/doc.go b/vendor/k8s.io/kubernetes/pkg/client/record/doc.go deleted file mode 100644 index d95515432..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package record has all client logic for recording and reporting events. -package record diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/event.go b/vendor/k8s.io/kubernetes/pkg/client/record/event.go deleted file mode 100644 index 47cbe3eca..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/event.go +++ /dev/null @@ -1,315 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - "math/rand" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/watch" - - "github.com/golang/glog" - "net/http" -) - -const maxTriesPerEvent = 12 - -var defaultSleepDuration = 10 * time.Second - -const maxQueuedEvents = 1000 - -// EventSink knows how to store events (client.Client implements it.) -// EventSink must respect the namespace that will be embedded in 'event'. -// It is assumed that EventSink will return the same sorts of errors as -// pkg/client's REST client. -type EventSink interface { - Create(event *api.Event) (*api.Event, error) - Update(event *api.Event) (*api.Event, error) - Patch(oldEvent *api.Event, data []byte) (*api.Event, error) -} - -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Event constructs an event from the given information and puts it in the queue for sending. - // 'object' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'message' is intended to be human readable. - // - // The resulting event will be created in the same namespace as the reference object. - Event(object runtime.Object, eventtype, reason, message string) - - // Eventf is just like Event, but with Sprintf for the message field. - Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) - - // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. - PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) -} - -// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. -type EventBroadcaster interface { - // StartEventWatcher starts sending events received from this EventBroadcaster to the given - // event handler function. The return value can be ignored or used to stop recording, if - // desired. - StartEventWatcher(eventHandler func(*api.Event)) watch.Interface - - // StartRecordingToSink starts sending events received from this EventBroadcaster to the given - // sink. The return value can be ignored or used to stop recording, if desired. - StartRecordingToSink(sink EventSink) watch.Interface - - // StartLogging starts sending events received from this EventBroadcaster to the given logging - // function. The return value can be ignored or used to stop recording, if desired. - StartLogging(logf func(format string, args ...interface{})) watch.Interface - - // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster - // with the event source set to the given event source. - NewRecorder(source api.EventSource) EventRecorder -} - -// Creates a new event broadcaster. -func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} -} - -func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} -} - -type eventBroadcasterImpl struct { - *watch.Broadcaster - sleepDuration time.Duration -} - -// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. -// The return value can be ignored or used to stop recording, if desired. -// TODO: make me an object with parameterizable queue length and retry interval -func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(util.RealClock{}) - return eventBroadcaster.StartEventWatcher( - func(event *api.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) - }) -} - -func recordToSink(sink EventSink, event *api.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { - // Make a copy before modification, because there could be multiple listeners. - // Events are safe to copy like this. - eventCopy := *event - event = &eventCopy - result, err := eventCorrelator.EventCorrelate(event) - if err != nil { - utilruntime.HandleError(err) - } - if result.Skip { - return - } - tries := 0 - for { - if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { - break - } - tries++ - if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) - break - } - // Randomize the first sleep so that various clients won't all be - // synced up if the master goes down. - if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) - } else { - time.Sleep(sleepDuration) - } - } -} - -func isKeyNotFoundError(err error) bool { - statusErr, _ := err.(*errors.StatusError) - - if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { - return true - } - - return false -} - -// recordEvent attempts to write event to a sink. It returns true if the event -// was successfully recorded or discarded, false if it should be retried. -// If updateExistingEvent is false, it creates a new event, otherwise it updates -// existing event. -func recordEvent(sink EventSink, event *api.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { - var newEvent *api.Event - var err error - if updateExistingEvent { - newEvent, err = sink.Patch(event, patch) - } - // Update can fail because the event may have been removed and it no longer exists. - if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { - // Making sure that ResourceVersion is empty on creation - event.ResourceVersion = "" - newEvent, err = sink.Create(event) - } - if err == nil { - // we need to update our event correlator with the server returned state to handle name/resourceversion - eventCorrelator.UpdateState(newEvent) - return true - } - - // If we can't contact the server, then hold everything while we keep trying. - // Otherwise, something about the event is malformed and we should abandon it. - switch err.(type) { - case *restclient.RequestConstructionError: - // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) - return true - case *errors.StatusError: - if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } - return true - case *errors.UnexpectedObjectError: - // We don't expect this; it implies the server's response didn't match a - // known pattern. Go ahead and retry. - default: - // This case includes actual http transport errors. Go ahead and retry. - } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) - return false -} - -// StartLogging starts sending events received from this EventBroadcaster to the given logging function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { - return eventBroadcaster.StartEventWatcher( - func(e *api.Event) { - logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) - }) -} - -// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface { - watcher := eventBroadcaster.Watch() - go func() { - defer utilruntime.HandleCrash() - for { - watchEvent, open := <-watcher.ResultChan() - if !open { - return - } - event, ok := watchEvent.Object.(*api.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue - } - eventHandler(event) - } - }() - return watcher -} - -// NewRecorder returns an EventRecorder that records events with the given event source. -func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(source api.EventSource) EventRecorder { - return &recorderImpl{source, eventBroadcaster.Broadcaster, util.RealClock{}} -} - -type recorderImpl struct { - source api.EventSource - *watch.Broadcaster - clock util.Clock -} - -func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) { - ref, err := api.GetReference(object) - if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) - return - } - - if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) - return - } - - event := recorder.makeEvent(ref, eventtype, reason, message) - event.Source = recorder.source - - go func() { - // NOTE: events should be a non-blocking operation - defer utilruntime.HandleCrash() - recorder.Action(watch.Added, event) - }() -} - -func validateEventType(eventtype string) bool { - switch eventtype { - case api.EventTypeNormal, api.EventTypeWarning: - return true - } - return false -} - -func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, unversioned.Now(), eventtype, reason, message) -} - -func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event { - t := unversioned.Time{Time: recorder.clock.Now()} - namespace := ref.Namespace - if namespace == "" { - namespace = api.NamespaceDefault - } - return &api.Event{ - ObjectMeta: api.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), - Namespace: namespace, - }, - InvolvedObject: *ref, - Reason: reason, - Message: message, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventtype, - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go b/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go deleted file mode 100644 index fa76db795..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/events_cache.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/golang/groupcache/lru" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/strategicpatch" -) - -const ( - maxLruCacheEntries = 4096 - - // if we see the same event that varies only by message - // more than 10 times in a 10 minute period, aggregate the event - defaultAggregateMaxEvents = 10 - defaultAggregateIntervalInSeconds = 600 -) - -// getEventKey builds unique event key based on source, involvedObject, reason, message -func getEventKey(event *api.Event) string { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - event.Type, - event.Reason, - event.Message, - }, - "") -} - -// EventFilterFunc is a function that returns true if the event should be skipped -type EventFilterFunc func(event *api.Event) bool - -// DefaultEventFilterFunc returns false for all incoming events -func DefaultEventFilterFunc(event *api.Event) bool { - return false -} - -// EventAggregatorKeyFunc is responsible for grouping events for aggregation -// It returns a tuple of the following: -// aggregateKey - key the identifies the aggregate group to bucket this event -// localKey - key that makes this event in the local group -type EventAggregatorKeyFunc func(event *api.Event) (aggregateKey string, localKey string) - -// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason -func EventAggregatorByReasonFunc(event *api.Event) (string, string) { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - event.Type, - event.Reason, - }, - ""), event.Message -} - -// EventAggregatorMessageFunc is responsible for producing an aggregation message -type EventAggregatorMessageFunc func(event *api.Event) string - -// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message -func EventAggregatorByReasonMessageFunc(event *api.Event) string { - return "(events with common reason combined)" -} - -// EventAggregator identifies similar events and aggregates them into a single event -type EventAggregator struct { - sync.RWMutex - - // The cache that manages aggregation state - cache *lru.Cache - - // The function that groups events for aggregation - keyFunc EventAggregatorKeyFunc - - // The function that generates a message for an aggregate event - messageFunc EventAggregatorMessageFunc - - // The maximum number of events in the specified interval before aggregation occurs - maxEvents int - - // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new - maxIntervalInSeconds int - - // clock is used to allow for testing over a time interval - clock util.Clock -} - -// NewEventAggregator returns a new instance of an EventAggregator -func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, - maxEvents int, maxIntervalInSeconds int, clock util.Clock) *EventAggregator { - return &EventAggregator{ - cache: lru.New(lruCacheSize), - keyFunc: keyFunc, - messageFunc: messageFunc, - maxEvents: maxEvents, - maxIntervalInSeconds: maxIntervalInSeconds, - clock: clock, - } -} - -// aggregateRecord holds data used to perform aggregation decisions -type aggregateRecord struct { - // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate - // if the size of this set exceeds the max, we know we need to aggregate - localKeys sets.String - // The last time at which the aggregate was recorded - lastTimestamp unversioned.Time -} - -// EventAggregate identifies similar events and groups into a common event if required -func (e *EventAggregator) EventAggregate(newEvent *api.Event) (*api.Event, error) { - aggregateKey, localKey := e.keyFunc(newEvent) - now := unversioned.NewTime(e.clock.Now()) - record := aggregateRecord{localKeys: sets.NewString(), lastTimestamp: now} - e.Lock() - defer e.Unlock() - value, found := e.cache.Get(aggregateKey) - if found { - record = value.(aggregateRecord) - } - - // if the last event was far enough in the past, it is not aggregated, and we must reset state - maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second - interval := now.Time.Sub(record.lastTimestamp.Time) - if interval > maxInterval { - record = aggregateRecord{localKeys: sets.NewString()} - } - record.localKeys.Insert(localKey) - record.lastTimestamp = now - e.cache.Add(aggregateKey, record) - - if record.localKeys.Len() < e.maxEvents { - return newEvent, nil - } - - // do not grow our local key set any larger than max - record.localKeys.PopAny() - - // create a new aggregate event - eventCopy := &api.Event{ - ObjectMeta: api.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), - Namespace: newEvent.Namespace, - }, - Count: 1, - FirstTimestamp: now, - InvolvedObject: newEvent.InvolvedObject, - LastTimestamp: now, - Message: e.messageFunc(newEvent), - Type: newEvent.Type, - Reason: newEvent.Reason, - Source: newEvent.Source, - } - return eventCopy, nil -} - -// eventLog records data about when an event was observed -type eventLog struct { - // The number of times the event has occurred since first occurrence. - count int - - // The time at which the event was first recorded. - firstTimestamp unversioned.Time - - // The unique name of the first occurrence of this event - name string - - // Resource version returned from previous interaction with server - resourceVersion string -} - -// eventLogger logs occurrences of an event -type eventLogger struct { - sync.RWMutex - cache *lru.Cache - clock util.Clock -} - -// newEventLogger observes events and counts their frequencies -func newEventLogger(lruCacheEntries int, clock util.Clock) *eventLogger { - return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} -} - -// eventObserve records the event, and determines if its frequency should update -func (e *eventLogger) eventObserve(newEvent *api.Event) (*api.Event, []byte, error) { - var ( - patch []byte - err error - ) - key := getEventKey(newEvent) - eventCopy := *newEvent - event := &eventCopy - - e.Lock() - defer e.Unlock() - - lastObservation := e.lastEventObservationFromCache(key) - - // we have seen this event before, so we must prepare a patch - if lastObservation.count > 0 { - // update the event based on the last observation so patch will work as desired - event.Name = lastObservation.name - event.ResourceVersion = lastObservation.resourceVersion - event.FirstTimestamp = lastObservation.firstTimestamp - event.Count = int32(lastObservation.count) + 1 - - eventCopy2 := *event - eventCopy2.Count = 0 - eventCopy2.LastTimestamp = unversioned.NewTime(time.Unix(0, 0)) - - newData, _ := json.Marshal(event) - oldData, _ := json.Marshal(eventCopy2) - patch, err = strategicpatch.CreateStrategicMergePatch(oldData, newData, event) - } - - // record our new observation - e.cache.Add( - key, - eventLog{ - count: int(event.Count), - firstTimestamp: event.FirstTimestamp, - name: event.Name, - resourceVersion: event.ResourceVersion, - }, - ) - return event, patch, err -} - -// updateState updates its internal tracking information based on latest server state -func (e *eventLogger) updateState(event *api.Event) { - key := getEventKey(event) - e.Lock() - defer e.Unlock() - // record our new observation - e.cache.Add( - key, - eventLog{ - count: int(event.Count), - firstTimestamp: event.FirstTimestamp, - name: event.Name, - resourceVersion: event.ResourceVersion, - }, - ) -} - -// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock -func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { - value, ok := e.cache.Get(key) - if ok { - observationValue, ok := value.(eventLog) - if ok { - return observationValue - } - } - return eventLog{} -} - -// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all -// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur -// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication -// to ensure events that are observed multiple times are compacted into a single event with increasing counts. -type EventCorrelator struct { - // the function to filter the event - filterFunc EventFilterFunc - // the object that performs event aggregation - aggregator *EventAggregator - // the object that observes events as they come through - logger *eventLogger -} - -// EventCorrelateResult is the result of a Correlate -type EventCorrelateResult struct { - // the event after correlation - Event *api.Event - // if provided, perform a strategic patch when updating the record on the server - Patch []byte - // if true, do no further processing of the event - Skip bool -} - -// NewEventCorrelator returns an EventCorrelator configured with default values. -// -// The EventCorrelator is responsible for event filtering, aggregating, and counting -// prior to interacting with the API server to record the event. -// -// The default behavior is as follows: -// * No events are filtered from being recorded -// * Aggregation is performed if a similar event is recorded 10 times in a -// in a 10 minute rolling interval. A similar event is an event that varies only by -// the Event.Message field. Rather than recording the precise event, aggregation -// will create a new event whose message reports that it has combined events with -// the same reason. -// * Events are incrementally counted if the exact same event is encountered multiple -// times. -func NewEventCorrelator(clock util.Clock) *EventCorrelator { - cacheSize := maxLruCacheEntries - return &EventCorrelator{ - filterFunc: DefaultEventFilterFunc, - aggregator: NewEventAggregator( - cacheSize, - EventAggregatorByReasonFunc, - EventAggregatorByReasonMessageFunc, - defaultAggregateMaxEvents, - defaultAggregateIntervalInSeconds, - clock), - logger: newEventLogger(cacheSize, clock), - } -} - -// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events -func (c *EventCorrelator) EventCorrelate(newEvent *api.Event) (*EventCorrelateResult, error) { - if c.filterFunc(newEvent) { - return &EventCorrelateResult{Skip: true}, nil - } - aggregateEvent, err := c.aggregator.EventAggregate(newEvent) - if err != nil { - return &EventCorrelateResult{}, err - } - observedEvent, patch, err := c.logger.eventObserve(aggregateEvent) - return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err -} - -// UpdateState based on the latest observed state from server -func (c *EventCorrelator) UpdateState(event *api.Event) { - c.logger.updateState(event) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/record/fake.go b/vendor/k8s.io/kubernetes/pkg/client/record/fake.go deleted file mode 100644 index 35204ef2d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/record/fake.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// FakeRecorder is used as a fake during tests. It is thread safe. It is usable -// when created manually and not by NewFakeRecorder, however all events may be -// thrown away in this case. -type FakeRecorder struct { - Events chan string -} - -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) - } -} - -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) - } -} - -func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp unversioned.Time, eventtype, reason, messageFmt string, args ...interface{}) { -} - -// NewFakeRecorder creates new fake event recorder with event channel with -// buffer of given size. -func NewFakeRecorder(bufferSize int) *FakeRecorder { - return &FakeRecorder{ - Events: make(chan string, bufferSize), - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go deleted file mode 100644 index 230edd45c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/client.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "fmt" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/flowcontrol" -) - -const ( - // Environment variables: Note that the duration should be long enough that the backoff - // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". - envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" - envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" -) - -// RESTClient imposes common Kubernetes API conventions on a set of resource paths. -// The baseURL is expected to point to an HTTP or HTTPS path that is the parent -// of one or more resources. The server should return a decodable API resource -// object, or an api.Status object which contains information about the reason for -// any failure. -// -// Most consumers should use client.New() to get a Kubernetes API client. -type RESTClient struct { - // base is the root URL for all invocations of the client - base *url.URL - // versionedAPIPath is a path segment connecting the base URL to the resource root - versionedAPIPath string - - // contentConfig is the information used to communicate with the server. - contentConfig ContentConfig - - // serializers contain all serializers for undelying content type. - serializers Serializers - - // creates BackoffManager that is passed to requests. - createBackoffMgr func() BackoffManager - - // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. - Throttle flowcontrol.RateLimiter - - // Set specific behavior of the client. If not set http.DefaultClient will be used. - Client *http.Client -} - -type Serializers struct { - Encoder runtime.Encoder - Decoder runtime.Decoder - StreamingSerializer runtime.Serializer - Framer runtime.Framer - RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) -} - -// NewRESTClient creates a new RESTClient. This client performs generic REST functions -// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and -// decoding of responses from the server. -func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { - base := *baseURL - if !strings.HasSuffix(base.Path, "/") { - base.Path += "/" - } - base.RawQuery = "" - base.Fragment = "" - - if config.GroupVersion == nil { - config.GroupVersion = &unversioned.GroupVersion{} - } - if len(config.ContentType) == 0 { - config.ContentType = "application/json" - } - serializers, err := createSerializers(config) - if err != nil { - return nil, err - } - - var throttle flowcontrol.RateLimiter - if maxQPS > 0 && rateLimiter == nil { - throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) - } else if rateLimiter != nil { - throttle = rateLimiter - } - return &RESTClient{ - base: &base, - versionedAPIPath: versionedAPIPath, - contentConfig: config, - serializers: *serializers, - createBackoffMgr: readExpBackoffConfig, - Throttle: throttle, - Client: client, - }, nil -} - -// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client -func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { - if c == nil { - return nil - } - return c.Throttle -} - -// readExpBackoffConfig handles the internal logic of determining what the -// backoff policy is. By default if no information is available, NoBackoff. -// TODO Generalize this see #17727 . -func readExpBackoffConfig() BackoffManager { - backoffBase := os.Getenv(envBackoffBase) - backoffDuration := os.Getenv(envBackoffDuration) - - backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) - backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) - if errBase != nil || errDuration != nil { - return &NoBackoff{} - } - return &URLBackoff{ - Backoff: flowcontrol.NewBackOff( - time.Duration(backoffBaseInt)*time.Second, - time.Duration(backoffDurationInt)*time.Second)} -} - -// createSerializers creates all necessary serializers for given contentType. -func createSerializers(config ContentConfig) (*Serializers, error) { - negotiated := config.NegotiatedSerializer - contentType := config.ContentType - info, ok := negotiated.SerializerForMediaType(contentType, nil) - if !ok { - return nil, fmt.Errorf("serializer for %s not registered", contentType) - } - streamInfo, ok := negotiated.StreamingSerializerForMediaType(contentType, nil) - if !ok { - return nil, fmt.Errorf("streaming serializer for %s not registered", contentType) - } - internalGV := unversioned.GroupVersion{ - Group: config.GroupVersion.Group, - Version: runtime.APIVersionInternal, - } - return &Serializers{ - Encoder: negotiated.EncoderForVersion(info.Serializer, *config.GroupVersion), - Decoder: negotiated.DecoderToVersion(info.Serializer, internalGV), - StreamingSerializer: streamInfo.Serializer, - Framer: streamInfo.Framer, - RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { - renegotiated, ok := negotiated.SerializerForMediaType(contentType, params) - if !ok { - return nil, fmt.Errorf("serializer for %s not registered", contentType) - } - return negotiated.DecoderToVersion(renegotiated.Serializer, internalGV), nil - }, - }, nil -} - -// Verb begins a request with a verb (GET, POST, PUT, DELETE). -// -// Example usage of RESTClient's request building interface: -// c, err := NewRESTClient(...) -// if err != nil { ... } -// resp, err := c.Verb("GET"). -// Path("pods"). -// SelectorParam("labels", "area=staging"). -// Timeout(10*time.Second). -// Do() -// if err != nil { ... } -// list, ok := resp.(*api.PodList) -// -func (c *RESTClient) Verb(verb string) *Request { - backoff := c.createBackoffMgr() - - if c.Client == nil { - return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) - } - return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle) -} - -// Post begins a POST request. Short for c.Verb("POST"). -func (c *RESTClient) Post() *Request { - return c.Verb("POST") -} - -// Put begins a PUT request. Short for c.Verb("PUT"). -func (c *RESTClient) Put() *Request { - return c.Verb("PUT") -} - -// Patch begins a PATCH request. Short for c.Verb("Patch"). -func (c *RESTClient) Patch(pt api.PatchType) *Request { - return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) -} - -// Get begins a GET request. Short for c.Verb("GET"). -func (c *RESTClient) Get() *Request { - return c.Verb("GET") -} - -// Delete begins a DELETE request. Short for c.Verb("DELETE"). -func (c *RESTClient) Delete() *Request { - return c.Verb("DELETE") -} - -// APIVersion returns the APIVersion this RESTClient is expected to use. -func (c *RESTClient) APIVersion() unversioned.GroupVersion { - return *c.contentConfig.GroupVersion -} - -func (c *RESTClient) Codec() runtime.Codec { - return c.contentConfig.Codec -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go deleted file mode 100644 index 0741e3c2d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/config.go +++ /dev/null @@ -1,328 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path" - gruntime "runtime" - "strings" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/crypto" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/version" -) - -// Config holds the common attributes that can be passed to a Kubernetes client on -// initialization. -type Config struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - // APIPath is a sub-path that points to an API root. - APIPath string - // Prefix is the sub path of the server. If not specified, the client will set - // a default value. Use "/" to indicate the server root should be used - Prefix string - - // ContentConfig contains settings that affect how objects are transformed when - // sent to the server. - ContentConfig - - // Server requires Basic authentication - Username string - Password string - - // Server requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // Impersonate is the username that this RESTClient will impersonate - Impersonate string - - // Server requires plugin-specified authentication. - AuthProvider *clientcmdapi.AuthProviderConfig - - // Callback to persist config for AuthProvider. - AuthConfigPersister AuthProviderConfigPersister - - // TLSClientConfig contains settings to enable transport layer security - TLSClientConfig - - // Server should be accessed without verifying the TLS - // certificate. For testing only. - Insecure bool - - // UserAgent is an optional field that specifies the caller of this request. - UserAgent string - - // Transport may be used for custom HTTP behavior. This attribute may not - // be specified with the TLS client certificate options. Use WrapTransport - // for most client level operations. - Transport http.RoundTripper - // WrapTransport will be invoked for custom HTTP behavior after the underlying - // transport is initialized (either the transport created from TLSClientConfig, - // Transport, or http.DefaultTransport). The config may layer other RoundTrippers - // on top of the returned RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper - - // QPS indicates the maximum QPS to the master from this client. If zero, QPS is unlimited. - QPS float32 - - // Maximum burst for throttle - Burst int - - // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst - RateLimiter flowcontrol.RateLimiter -} - -// TLSClientConfig contains settings to enable transport layer security -type TLSClientConfig struct { - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} - -type ContentConfig struct { - // ContentType specifies the wire format used to communicate with the server. - // This value will be set as the Accept header on requests made to the server, and - // as the default content type on any object sent to the server. If not set, - // "application/json" is used. - ContentType string - // GroupVersion is the API version to talk to. Must be provided when initializing - // a RESTClient directly. When initializing a Client, will be set with the default - // code version. - GroupVersion *unversioned.GroupVersion - // NegotiatedSerializer is used for obtaining encoders and decoders for multiple - // supported media types. - NegotiatedSerializer runtime.NegotiatedSerializer - - // Codec specifies the encoding and decoding behavior for runtime.Objects passed - // to a RESTClient or Client. Required when initializing a RESTClient, optional - // when initializing a Client. - // - // DEPRECATED: Please use NegotiatedSerializer instead. - // Codec is currently used only in some tests and will be removed soon. - // All production setups should use NegotiatedSerializer. - Codec runtime.Codec -} - -// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config -// object. Note that a RESTClient may require fields that are optional when initializing a Client. -// A RESTClient created by this method is generic - it expects to operate on an API that follows -// the Kubernetes conventions, but may not be the Kubernetes API. -func RESTClientFor(config *Config) (*RESTClient, error) { - if config.GroupVersion == nil { - return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") - } - if config.NegotiatedSerializer == nil { - return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") - } - - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) - if err != nil { - return nil, err - } - - transport, err := TransportFor(config) - if err != nil { - return nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - } - - return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) -} - -// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows -// the config.Version to be empty. -func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { - if config.NegotiatedSerializer == nil { - return nil, fmt.Errorf("NeogitatedSerializer is required when initializing a RESTClient") - } - - baseURL, versionedAPIPath, err := defaultServerUrlFor(config) - if err != nil { - return nil, err - } - - transport, err := TransportFor(config) - if err != nil { - return nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - } - - versionConfig := config.ContentConfig - if versionConfig.GroupVersion == nil { - v := unversioned.SchemeGroupVersion - versionConfig.GroupVersion = &v - } - - return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) -} - -// SetKubernetesDefaults sets default values on the provided client config for accessing the -// Kubernetes API or returns an error if any of the defaults are impossible or invalid. -func SetKubernetesDefaults(config *Config) error { - if len(config.UserAgent) == 0 { - config.UserAgent = DefaultKubernetesUserAgent() - } - if config.QPS == 0.0 { - config.QPS = 5.0 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} - -// DefaultKubernetesUserAgent returns the default user agent that clients can use. -func DefaultKubernetesUserAgent() string { - commit := version.Get().GitCommit - if len(commit) > 7 { - commit = commit[:7] - } - if len(commit) == 0 { - commit = "unknown" - } - version := version.Get().GitVersion - seg := strings.SplitN(version, "-", 2) - version = seg[0] - return fmt.Sprintf("%s/%s (%s/%s) kubernetes/%s", path.Base(os.Args[0]), version, gruntime.GOOS, gruntime.GOARCH, commit) -} - -// InClusterConfig returns a config object which uses the service account -// kubernetes gives to pods. It's intended for clients that expect to be -// running inside a pod running on kuberenetes. It will return an error if -// called from a process not running in a kubernetes environment. -func InClusterConfig() (*Config, error) { - host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") - if len(host) == 0 || len(port) == 0 { - return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") - } - - token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) - if err != nil { - return nil, err - } - tlsClientConfig := TLSClientConfig{} - rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey - if _, err := crypto.CertPoolFromFile(rootCAFile); err != nil { - glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) - } else { - tlsClientConfig.CAFile = rootCAFile - } - - return &Config{ - // TODO: switch to using cluster DNS. - Host: "https://" + net.JoinHostPort(host, port), - BearerToken: string(token), - TLSClientConfig: tlsClientConfig, - }, nil -} - -// IsConfigTransportTLS returns true if and only if the provided -// config will result in a protected connection to the server when it -// is passed to restclient.RESTClientFor(). Use to determine when to -// send credentials over the wire. -// -// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are -// still possible. -func IsConfigTransportTLS(config Config) bool { - baseURL, _, err := defaultServerUrlFor(&config) - if err != nil { - return false - } - return baseURL.Scheme == "https" -} - -// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func LoadTLSFiles(c *Config) error { - var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) - if err != nil { - return err - } - - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) - if err != nil { - return err - } - - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -func AddUserAgent(config *Config, userAgent string) *Config { - fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent - config.UserAgent = fullUserAgent - return config -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go deleted file mode 100644 index 4752e375b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/plugin.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "fmt" - "net/http" - "sync" - - "github.com/golang/glog" - - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" -) - -type AuthProvider interface { - // WrapTransport allows the plugin to create a modified RoundTripper that - // attaches authorization headers (or other info) to requests. - WrapTransport(http.RoundTripper) http.RoundTripper - // Login allows the plugin to initialize its configuration. It must not - // require direct user interaction. - Login() error -} - -// Factory generates an AuthProvider plugin. -// clusterAddress is the address of the current cluster. -// config is the initial configuration for this plugin. -// persister allows the plugin to save updated configuration. -type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) - -// AuthProviderConfigPersister allows a plugin to persist configuration info -// for just itself. -type AuthProviderConfigPersister interface { - Persist(map[string]string) error -} - -// All registered auth provider plugins. -var pluginsLock sync.Mutex -var plugins = make(map[string]Factory) - -func RegisterAuthProviderPlugin(name string, plugin Factory) error { - pluginsLock.Lock() - defer pluginsLock.Unlock() - if _, found := plugins[name]; found { - return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) - } - glog.V(4).Infof("Registered Auth Provider Plugin %q", name) - plugins[name] = plugin - return nil -} - -func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) { - pluginsLock.Lock() - defer pluginsLock.Unlock() - p, ok := plugins[apc.Name] - if !ok { - return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) - } - return p(clusterAddress, apc.Config, persister) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go deleted file mode 100644 index 9fd3f0ddb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/request.go +++ /dev/null @@ -1,1086 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/client/metrics" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/util/flowcontrol" - "k8s.io/kubernetes/pkg/util/net" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/watch" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -var ( - // specialParams lists parameters that are handled specially and which users of Request - // are therefore not allowed to set manually. - specialParams = sets.NewString("timeout") - - // longThrottleLatency defines threshold for logging requests. All requests being - // throttle for more than longThrottleLatency will be logged. - longThrottleLatency = 50 * time.Millisecond -) - -func init() { - metrics.Register() -} - -// HTTPClient is an interface for testing a request object. -type HTTPClient interface { - Do(req *http.Request) (*http.Response, error) -} - -// ResponseWrapper is an interface for getting a response. -// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream. -type ResponseWrapper interface { - DoRaw() ([]byte, error) - Stream() (io.ReadCloser, error) -} - -// RequestConstructionError is returned when there's an error assembling a request. -type RequestConstructionError struct { - Err error -} - -// Error returns a textual description of 'r'. -func (r *RequestConstructionError) Error() string { - return fmt.Sprintf("request construction error: '%v'", r.Err) -} - -// Request allows for building up a request to a server in a chained fashion. -// Any errors are stored until the end of your call, so you only have to -// check once. -type Request struct { - // required - client HTTPClient - verb string - - baseURL *url.URL - content ContentConfig - serializers Serializers - - // generic components accessible via method setters - pathPrefix string - subpath string - params url.Values - headers http.Header - - // structural elements of the request that are part of the Kubernetes API conventions - namespace string - namespaceSet bool - resource string - resourceName string - subresource string - selector labels.Selector - timeout time.Duration - - // output - err error - body io.Reader - - // The constructed request and the response - req *http.Request - resp *http.Response - - backoffMgr BackoffManager - throttle flowcontrol.RateLimiter -} - -// NewRequest creates a new request helper object for accessing runtime.Objects on a server. -func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter) *Request { - if backoff == nil { - glog.V(2).Infof("Not implementing request backoff strategy.") - backoff = &NoBackoff{} - } - - pathPrefix := "/" - if baseURL != nil { - pathPrefix = path.Join(pathPrefix, baseURL.Path) - } - r := &Request{ - client: client, - verb: verb, - baseURL: baseURL, - pathPrefix: path.Join(pathPrefix, versionedAPIPath), - content: content, - serializers: serializers, - backoffMgr: backoff, - throttle: throttle, - } - if len(content.ContentType) > 0 { - r.SetHeader("Accept", content.ContentType+", */*") - } - return r -} - -// Prefix adds segments to the relative beginning to the request path. These -// items will be placed before the optional Namespace, Resource, or Name sections. -// Setting AbsPath will clear any previously set Prefix segments -func (r *Request) Prefix(segments ...string) *Request { - if r.err != nil { - return r - } - r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...)) - return r -} - -// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional -// Namespace, Resource, or Name sections. -func (r *Request) Suffix(segments ...string) *Request { - if r.err != nil { - return r - } - r.subpath = path.Join(r.subpath, path.Join(segments...)) - return r -} - -// Resource sets the resource to access (<resource>/[ns/<namespace>/]<name>) -func (r *Request) Resource(resource string) *Request { - if r.err != nil { - return r - } - if len(r.resource) != 0 { - r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) - return r - } - if msgs := validation.IsValidPathSegmentName(resource); len(msgs) != 0 { - r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) - return r - } - r.resource = resource - return r -} - -// SubResource sets a sub-resource path which can be multiple segments segment after the resource -// name but before the suffix. -func (r *Request) SubResource(subresources ...string) *Request { - if r.err != nil { - return r - } - subresource := path.Join(subresources...) - if len(r.subresource) != 0 { - r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource) - return r - } - for _, s := range subresources { - if msgs := validation.IsValidPathSegmentName(s); len(msgs) != 0 { - r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) - return r - } - } - r.subresource = subresource - return r -} - -// Name sets the name of a resource to access (<resource>/[ns/<namespace>/]<name>) -func (r *Request) Name(resourceName string) *Request { - if r.err != nil { - return r - } - if len(resourceName) == 0 { - r.err = fmt.Errorf("resource name may not be empty") - return r - } - if len(r.resourceName) != 0 { - r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) - return r - } - if msgs := validation.IsValidPathSegmentName(resourceName); len(msgs) != 0 { - r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) - return r - } - r.resourceName = resourceName - return r -} - -// Namespace applies the namespace scope to a request (<resource>/[ns/<namespace>/]<name>) -func (r *Request) Namespace(namespace string) *Request { - if r.err != nil { - return r - } - if r.namespaceSet { - r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) - return r - } - if msgs := validation.IsValidPathSegmentName(namespace); len(msgs) != 0 { - r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) - return r - } - r.namespaceSet = true - r.namespace = namespace - return r -} - -// NamespaceIfScoped is a convenience function to set a namespace if scoped is true -func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request { - if scoped { - return r.Namespace(namespace) - } - return r -} - -// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved -// when a single segment is passed. -func (r *Request) AbsPath(segments ...string) *Request { - if r.err != nil { - return r - } - r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) - if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { - // preserve any trailing slashes for legacy behavior - r.pathPrefix += "/" - } - return r -} - -// RequestURI overwrites existing path and parameters with the value of the provided server relative -// URI. Some parameters (those in specialParameters) cannot be overwritten. -func (r *Request) RequestURI(uri string) *Request { - if r.err != nil { - return r - } - locator, err := url.Parse(uri) - if err != nil { - r.err = err - return r - } - r.pathPrefix = locator.Path - if len(locator.Query()) > 0 { - if r.params == nil { - r.params = make(url.Values) - } - for k, v := range locator.Query() { - r.params[k] = v - } - } - return r -} - -const ( - // A constant that clients can use to refer in a field selector to the object name field. - // Will be automatically emitted as the correct name for the API version. - nodeUnschedulable = "spec.unschedulable" - objectNameField = "metadata.name" - podHost = "spec.nodeName" - podStatus = "status.phase" - secretType = "type" - - eventReason = "reason" - eventSource = "source" - eventType = "type" - eventInvolvedKind = "involvedObject.kind" - eventInvolvedNamespace = "involvedObject.namespace" - eventInvolvedName = "involvedObject.name" - eventInvolvedUID = "involvedObject.uid" - eventInvolvedAPIVersion = "involvedObject.apiVersion" - eventInvolvedResourceVersion = "involvedObject.resourceVersion" - eventInvolvedFieldPath = "involvedObject.fieldPath" -) - -type clientFieldNameToAPIVersionFieldName map[string]string - -func (c clientFieldNameToAPIVersionFieldName) filterField(field, value string) (newField, newValue string, err error) { - newFieldName, ok := c[field] - if !ok { - return "", "", fmt.Errorf("%v - %v - no field mapping defined", field, value) - } - return newFieldName, value, nil -} - -type resourceTypeToFieldMapping map[string]clientFieldNameToAPIVersionFieldName - -func (r resourceTypeToFieldMapping) filterField(resourceType, field, value string) (newField, newValue string, err error) { - fMapping, ok := r[resourceType] - if !ok { - return "", "", fmt.Errorf("%v - %v - %v - no field mapping defined", resourceType, field, value) - } - return fMapping.filterField(field, value) -} - -type versionToResourceToFieldMapping map[unversioned.GroupVersion]resourceTypeToFieldMapping - -func (v versionToResourceToFieldMapping) filterField(groupVersion *unversioned.GroupVersion, resourceType, field, value string) (newField, newValue string, err error) { - rMapping, ok := v[*groupVersion] - if !ok { - glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", groupVersion, resourceType, field, value) - return field, value, nil - } - newField, newValue, err = rMapping.filterField(resourceType, field, value) - if err != nil { - // This is only a warning until we find and fix all of the client's usages. - glog.Warningf("Field selector: %v - %v - %v - %v: need to check if this is versioned correctly.", groupVersion, resourceType, field, value) - return field, value, nil - } - return newField, newValue, nil -} - -var fieldMappings = versionToResourceToFieldMapping{ - v1.SchemeGroupVersion: resourceTypeToFieldMapping{ - "nodes": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - nodeUnschedulable: nodeUnschedulable, - }, - "pods": clientFieldNameToAPIVersionFieldName{ - podHost: podHost, - podStatus: podStatus, - }, - "secrets": clientFieldNameToAPIVersionFieldName{ - secretType: secretType, - }, - "serviceAccounts": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - }, - "endpoints": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - }, - "events": clientFieldNameToAPIVersionFieldName{ - objectNameField: objectNameField, - eventReason: eventReason, - eventSource: eventSource, - eventType: eventType, - eventInvolvedKind: eventInvolvedKind, - eventInvolvedNamespace: eventInvolvedNamespace, - eventInvolvedName: eventInvolvedName, - eventInvolvedUID: eventInvolvedUID, - eventInvolvedAPIVersion: eventInvolvedAPIVersion, - eventInvolvedResourceVersion: eventInvolvedResourceVersion, - eventInvolvedFieldPath: eventInvolvedFieldPath, - }, - }, -} - -// FieldsSelectorParam adds the given selector as a query parameter with the name paramName. -func (r *Request) FieldsSelectorParam(s fields.Selector) *Request { - if r.err != nil { - return r - } - if s == nil { - return r - } - if s.Empty() { - return r - } - s2, err := s.Transform(func(field, value string) (newField, newValue string, err error) { - return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) - }) - if err != nil { - r.err = err - return r - } - return r.setParam(unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()), s2.String()) -} - -// LabelsSelectorParam adds the given selector as a query parameter -func (r *Request) LabelsSelectorParam(s labels.Selector) *Request { - if r.err != nil { - return r - } - if s == nil { - return r - } - if s.Empty() { - return r - } - return r.setParam(unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()), s.String()) -} - -// UintParam creates a query parameter with the given value. -func (r *Request) UintParam(paramName string, u uint64) *Request { - if r.err != nil { - return r - } - return r.setParam(paramName, strconv.FormatUint(u, 10)) -} - -// Param creates a query parameter with the given string value. -func (r *Request) Param(paramName, s string) *Request { - if r.err != nil { - return r - } - return r.setParam(paramName, s) -} - -// VersionedParams will take the provided object, serialize it to a map[string][]string using the -// implicit RESTClient API version and the default parameter codec, and then add those as parameters -// to the request. Use this to provide versioned query parameters from client libraries. -func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { - if r.err != nil { - return r - } - params, err := codec.EncodeParameters(obj, *r.content.GroupVersion) - if err != nil { - r.err = err - return r - } - for k, v := range params { - for _, value := range v { - // TODO: Move it to setParam method, once we get rid of - // FieldSelectorParam & LabelSelectorParam methods. - if k == unversioned.LabelSelectorQueryParam(r.content.GroupVersion.String()) && value == "" { - // Don't set an empty selector for backward compatibility. - // Since there is no way to get the difference between empty - // and unspecified string, we don't set it to avoid having - // labelSelector= param in every request. - continue - } - if k == unversioned.FieldSelectorQueryParam(r.content.GroupVersion.String()) { - if len(value) == 0 { - // Don't set an empty selector for backward compatibility. - // Since there is no way to get the difference between empty - // and unspecified string, we don't set it to avoid having - // fieldSelector= param in every request. - continue - } - // TODO: Filtering should be handled somewhere else. - selector, err := fields.ParseSelector(value) - if err != nil { - r.err = fmt.Errorf("unparsable field selector: %v", err) - return r - } - filteredSelector, err := selector.Transform( - func(field, value string) (newField, newValue string, err error) { - return fieldMappings.filterField(r.content.GroupVersion, r.resource, field, value) - }) - if err != nil { - r.err = fmt.Errorf("untransformable field selector: %v", err) - return r - } - value = filteredSelector.String() - } - - r.setParam(k, value) - } - } - return r -} - -func (r *Request) setParam(paramName, value string) *Request { - if specialParams.Has(paramName) { - r.err = fmt.Errorf("must set %v through the corresponding function, not directly.", paramName) - return r - } - if r.params == nil { - r.params = make(url.Values) - } - r.params[paramName] = append(r.params[paramName], value) - return r -} - -func (r *Request) SetHeader(key, value string) *Request { - if r.headers == nil { - r.headers = http.Header{} - } - r.headers.Set(key, value) - return r -} - -// Timeout makes the request use the given duration as a timeout. Sets the "timeout" -// parameter. -func (r *Request) Timeout(d time.Duration) *Request { - if r.err != nil { - return r - } - r.timeout = d - return r -} - -// Body makes the request use obj as the body. Optional. -// If obj is a string, try to read a file of that name. -// If obj is a []byte, send it directly. -// If obj is an io.Reader, use it directly. -// If obj is a runtime.Object, marshal it correctly, and set Content-Type header. -// If obj is a runtime.Object and nil, do nothing. -// Otherwise, set an error. -func (r *Request) Body(obj interface{}) *Request { - if r.err != nil { - return r - } - switch t := obj.(type) { - case string: - data, err := ioutil.ReadFile(t) - if err != nil { - r.err = err - return r - } - glog.V(8).Infof("Request Body: %s", string(data)) - r.body = bytes.NewReader(data) - case []byte: - glog.V(8).Infof("Request Body: %s", string(t)) - r.body = bytes.NewReader(t) - case io.Reader: - r.body = t - case runtime.Object: - // callers may pass typed interface pointers, therefore we must check nil with reflection - if reflect.ValueOf(t).IsNil() { - return r - } - data, err := runtime.Encode(r.serializers.Encoder, t) - if err != nil { - r.err = err - return r - } - glog.V(8).Infof("Request Body: %s", string(data)) - r.body = bytes.NewReader(data) - r.SetHeader("Content-Type", r.content.ContentType) - default: - r.err = fmt.Errorf("unknown type used for body: %+v", obj) - } - return r -} - -// URL returns the current working URL. -func (r *Request) URL() *url.URL { - p := r.pathPrefix - if r.namespaceSet && len(r.namespace) > 0 { - p = path.Join(p, "namespaces", r.namespace) - } - if len(r.resource) != 0 { - p = path.Join(p, strings.ToLower(r.resource)) - } - // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compat if nothing was changed - if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 { - p = path.Join(p, r.resourceName, r.subresource, r.subpath) - } - - finalURL := &url.URL{} - if r.baseURL != nil { - *finalURL = *r.baseURL - } - finalURL.Path = p - - query := url.Values{} - for key, values := range r.params { - for _, value := range values { - query.Add(key, value) - } - } - - // timeout is handled specially here. - if r.timeout != 0 { - query.Set("timeout", r.timeout.String()) - } - finalURL.RawQuery = query.Encode() - return finalURL -} - -// finalURLTemplate is similar to URL(), but will make all specific parameter values equal -// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query -// parameters will be reset. This creates a copy of the request so as not to change the -// underyling object. This means some useful request info (like the types of field -// selectors in use) will be lost. -// TODO: preserve field selector keys -func (r Request) finalURLTemplate() string { - if len(r.resourceName) != 0 { - r.resourceName = "{name}" - } - if r.namespaceSet && len(r.namespace) != 0 { - r.namespace = "{namespace}" - } - newParams := url.Values{} - v := []string{"{value}"} - for k := range r.params { - newParams[k] = v - } - r.params = newParams - return r.URL().String() -} - -func (r *Request) tryThrottle() { - now := time.Now() - if r.throttle != nil { - r.throttle.Accept() - } - if latency := time.Since(now); latency > longThrottleLatency { - glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) - } -} - -// Watch attempts to begin watching the requested location. -// Returns a watch.Interface, or an error. -func (r *Request) Watch() (watch.Interface, error) { - // We specifically don't want to rate limit watches, so we - // don't use r.throttle here. - if r.err != nil { - return nil, r.err - } - if r.serializers.Framer == nil { - return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) - } - - url := r.URL().String() - req, err := http.NewRequest(r.verb, url, r.body) - if err != nil { - return nil, err - } - req.Header = r.headers - client := r.client - if client == nil { - client = http.DefaultClient - } - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) - resp, err := client.Do(req) - updateURLMetrics(r, resp, err) - if r.baseURL != nil { - if err != nil { - r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) - } else { - r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) - } - } - if err != nil { - // The watch stream mechanism handles many common partial data errors, so closed - // connections can be retried in many cases. - if net.IsProbableEOF(err) { - return watch.NewEmptyWatch(), nil - } - return nil, err - } - if resp.StatusCode != http.StatusOK { - defer resp.Body.Close() - if result := r.transformResponse(resp, req); result.err != nil { - return nil, result.err - } - return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) - } - framer := r.serializers.Framer.NewFrameReader(resp.Body) - decoder := streaming.NewDecoder(framer, r.serializers.StreamingSerializer) - return watch.NewStreamWatcher(versioned.NewDecoder(decoder, r.serializers.Decoder)), nil -} - -// updateURLMetrics is a convenience function for pushing metrics. -// It also handles corner cases for incomplete/invalid request data. -func updateURLMetrics(req *Request, resp *http.Response, err error) { - url := "none" - if req.baseURL != nil { - url = req.baseURL.Host - } - - // If we have an error (i.e. apiserver down) we report that as a metric label. - if err != nil { - metrics.RequestResult.WithLabelValues(err.Error(), req.verb, url).Inc() - } else { - //Metrics for failure codes - metrics.RequestResult.WithLabelValues(strconv.Itoa(resp.StatusCode), req.verb, url).Inc() - } -} - -// Stream formats and executes the request, and offers streaming of the response. -// Returns io.ReadCloser which could be used for streaming of the response, or an error -// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. -// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. -func (r *Request) Stream() (io.ReadCloser, error) { - if r.err != nil { - return nil, r.err - } - - r.tryThrottle() - - url := r.URL().String() - req, err := http.NewRequest(r.verb, url, nil) - if err != nil { - return nil, err - } - req.Header = r.headers - client := r.client - if client == nil { - client = http.DefaultClient - } - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) - resp, err := client.Do(req) - updateURLMetrics(r, resp, err) - if r.baseURL != nil { - if err != nil { - r.backoffMgr.UpdateBackoff(r.URL(), err, 0) - } else { - r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) - } - } - if err != nil { - return nil, err - } - - switch { - case (resp.StatusCode >= 200) && (resp.StatusCode < 300): - return resp.Body, nil - - default: - // ensure we close the body before returning the error - defer resp.Body.Close() - - // we have a decent shot at taking the object returned, parsing it as a status object and returning a more normal error - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%v while accessing %v", resp.Status, url) - } - - // TODO: Check ContentType. - if runtimeObject, err := runtime.Decode(r.serializers.Decoder, bodyBytes); err == nil { - statusError := errors.FromObject(runtimeObject) - - if _, ok := statusError.(errors.APIStatus); ok { - return nil, statusError - } - } - - bodyText := string(bodyBytes) - return nil, fmt.Errorf("%s while accessing %v: %s", resp.Status, url, bodyText) - } -} - -// request connects to the server and invokes the provided function when a server response is -// received. It handles retry behavior and up front validation of requests. It will invoke -// fn at most once. It will return an error if a problem occurred prior to connecting to the -// server - the provided function is responsible for handling server errors. -func (r *Request) request(fn func(*http.Request, *http.Response)) error { - //Metrics for total request latency - start := time.Now() - defer func() { - metrics.RequestLatency.WithLabelValues(r.verb, r.finalURLTemplate()).Observe(metrics.SinceInMicroseconds(start)) - }() - - if r.err != nil { - glog.V(4).Infof("Error in request: %v", r.err) - return r.err - } - - // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) - if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { - return fmt.Errorf("an empty namespace may not be set when a resource name is provided") - } - if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { - return fmt.Errorf("an empty namespace may not be set during creation") - } - - client := r.client - if client == nil { - client = http.DefaultClient - } - - // Right now we make about ten retry attempts if we get a Retry-After response. - // TODO: Change to a timeout based approach. - maxRetries := 10 - retries := 0 - for { - url := r.URL().String() - req, err := http.NewRequest(r.verb, url, r.body) - if err != nil { - return err - } - req.Header = r.headers - - r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) - resp, err := client.Do(req) - updateURLMetrics(r, resp, err) - if err != nil { - r.backoffMgr.UpdateBackoff(r.URL(), err, 0) - } else { - r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) - } - if err != nil { - return err - } - - done := func() bool { - // ensure the response body is closed before we reconnect, so that we reuse the same - // TCP connection - defer resp.Body.Close() - - retries++ - if seconds, wait := checkWait(resp); wait && retries < maxRetries { - if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { - _, err := seeker.Seek(0, 0) - if err != nil { - glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) - fn(req, resp) - return true - } - } - - glog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", seconds, retries, url) - r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) - return false - } - fn(req, resp) - return true - }() - if done { - return nil - } - } -} - -// Do formats and executes the request. Returns a Result object for easy response -// processing. -// -// Error type: -// * If the request can't be constructed, or an error happened earlier while building its -// arguments: *RequestConstructionError -// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError -// * http.Client.Do errors are returned directly. -func (r *Request) Do() Result { - r.tryThrottle() - - var result Result - err := r.request(func(req *http.Request, resp *http.Response) { - result = r.transformResponse(resp, req) - }) - if err != nil { - return Result{err: err} - } - return result -} - -// DoRaw executes the request but does not process the response body. -func (r *Request) DoRaw() ([]byte, error) { - r.tryThrottle() - - var result Result - err := r.request(func(req *http.Request, resp *http.Response) { - result.body, result.err = ioutil.ReadAll(resp.Body) - }) - if err != nil { - return nil, err - } - return result.body, result.err -} - -// transformResponse converts an API response into a structured API object -func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { - var body []byte - if resp.Body != nil { - if data, err := ioutil.ReadAll(resp.Body); err == nil { - body = data - } - } - glog.V(8).Infof("Response Body: %s", string(body)) - - // Did the server give us a status response? - isStatusResponse := false - // Because release-1.1 server returns Status with empty APIVersion at paths - // to the Extensions resources, we need to use DecodeInto here to provide - // default groupVersion, otherwise a status response won't be correctly - // decoded. - status := &unversioned.Status{} - err := runtime.DecodeInto(r.serializers.Decoder, body, status) - if err == nil && len(status.Status) > 0 { - isStatusResponse = true - } - - switch { - case resp.StatusCode == http.StatusSwitchingProtocols: - // no-op, we've been upgraded - case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: - if !isStatusResponse { - return Result{err: r.transformUnstructuredResponseError(resp, req, body)} - } - return Result{err: errors.FromObject(status)} - } - - // If the server gave us a status back, look at what it was. - success := resp.StatusCode >= http.StatusOK && resp.StatusCode <= http.StatusPartialContent - if isStatusResponse && (status.Status != unversioned.StatusSuccess && !success) { - // "Failed" requests are clearly just an error and it makes sense to return them as such. - return Result{err: errors.FromObject(status)} - } - - contentType := resp.Header.Get("Content-Type") - var decoder runtime.Decoder - if contentType == r.content.ContentType { - decoder = r.serializers.Decoder - } else { - mediaType, params, err := mime.ParseMediaType(contentType) - if err != nil { - return Result{err: errors.NewInternalError(err)} - } - decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) - if err != nil { - return Result{ - body: body, - contentType: contentType, - statusCode: resp.StatusCode, - } - } - } - - return Result{ - body: body, - contentType: contentType, - statusCode: resp.StatusCode, - decoder: decoder, - } -} - -// transformUnstructuredResponseError handles an error from the server that is not in a structured form. -// It is expected to transform any response that is not recognizable as a clear server sent error from the -// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries -// introduce a level of uncertainty to the responses returned by servers that in common use result in -// unexpected responses. The rough structure is: -// -// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes -// - this is the happy path -// - when you get this output, trust what the server sends -// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to -// generate a reasonable facsimile of the original failure. -// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above -// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error -// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected -// initial contact, the presence of mismatched body contents from posted content types -// - Give these a separate distinct error type and capture as much as possible of the original message -// -// TODO: introduce transformation of generic http.Client.Do() errors that separates 4. -func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { - if body == nil && resp.Body != nil { - if data, err := ioutil.ReadAll(resp.Body); err == nil { - body = data - } - } - glog.V(8).Infof("Response Body: %s", string(body)) - - message := "unknown" - if isTextResponse(resp) { - message = strings.TrimSpace(string(body)) - } - retryAfter, _ := retryAfterSeconds(resp) - return errors.NewGenericServerResponse( - resp.StatusCode, - req.Method, - unversioned.GroupResource{ - Group: r.content.GroupVersion.Group, - Resource: r.resource, - }, - r.resourceName, - message, - retryAfter, - true, - ) -} - -// isTextResponse returns true if the response appears to be a textual media type. -func isTextResponse(resp *http.Response) bool { - contentType := resp.Header.Get("Content-Type") - if len(contentType) == 0 { - return true - } - media, _, err := mime.ParseMediaType(contentType) - if err != nil { - return false - } - return strings.HasPrefix(media, "text/") -} - -// checkWait returns true along with a number of seconds if the server instructed us to wait -// before retrying. -func checkWait(resp *http.Response) (int, bool) { - switch r := resp.StatusCode; { - // any 500 error code and 429 can trigger a wait - case r == errors.StatusTooManyRequests, r >= 500: - default: - return 0, false - } - i, ok := retryAfterSeconds(resp) - return i, ok -} - -// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if -// the header was missing or not a valid number. -func retryAfterSeconds(resp *http.Response) (int, bool) { - if h := resp.Header.Get("Retry-After"); len(h) > 0 { - if i, err := strconv.Atoi(h); err == nil { - return i, true - } - } - return 0, false -} - -// Result contains the result of calling Request.Do(). -type Result struct { - body []byte - contentType string - err error - statusCode int - - decoder runtime.Decoder -} - -// Raw returns the raw result. -func (r Result) Raw() ([]byte, error) { - return r.body, r.err -} - -// Get returns the result as an object. -func (r Result) Get() (runtime.Object, error) { - if r.err != nil { - return nil, r.err - } - if r.decoder == nil { - return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType) - } - return runtime.Decode(r.decoder, r.body) -} - -// StatusCode returns the HTTP status code of the request. (Only valid if no -// error was returned.) -func (r Result) StatusCode(statusCode *int) Result { - *statusCode = r.statusCode - return r -} - -// Into stores the result into obj, if possible. If obj is nil it is ignored. -func (r Result) Into(obj runtime.Object) error { - if r.err != nil { - return r.err - } - if r.decoder == nil { - return fmt.Errorf("serializer for %s doesn't exist", r.contentType) - } - return runtime.DecodeInto(r.decoder, r.body, obj) -} - -// WasCreated updates the provided bool pointer to whether the server returned -// 201 created or a different response. -func (r Result) WasCreated(wasCreated *bool) Result { - *wasCreated = r.statusCode == http.StatusCreated - return r -} - -// Error returns the error executing the request, nil if no error occurred. -// See the Request.Do() comment for what errors you might get. -func (r Result) Error() error { - return r.err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go b/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go deleted file mode 100644 index 0bfa2ea27..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/restclient/transport.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restclient - -import ( - "crypto/tls" - "net/http" - - "k8s.io/kubernetes/pkg/client/transport" -) - -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func TLSConfigFor(config *Config) (*tls.Config, error) { - cfg, err := config.transportConfig() - if err != nil { - return nil, err - } - return transport.TLSConfigFor(cfg) -} - -// TransportFor returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. Will return the -// default http.DefaultTransport if no special case behavior is needed. -func TransportFor(config *Config) (http.RoundTripper, error) { - cfg, err := config.transportConfig() - if err != nil { - return nil, err - } - return transport.New(cfg) -} - -// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the -// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack -// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use -// the higher level TransportFor or RESTClientFor methods. -func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { - cfg, err := config.transportConfig() - if err != nil { - return nil, err - } - return transport.HTTPWrappersForConfig(cfg, rt) -} - -// transportConfig converts a client config to an appropriate transport config. -func (c *Config) transportConfig() (*transport.Config, error) { - wt := c.WrapTransport - if c.AuthProvider != nil { - provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) - if err != nil { - return nil, err - } - if wt != nil { - previousWT := wt - wt = func(rt http.RoundTripper) http.RoundTripper { - return provider.WrapTransport(previousWT(rt)) - } - } else { - wt = provider.WrapTransport - } - } - return &transport.Config{ - UserAgent: c.UserAgent, - Transport: c.Transport, - WrapTransport: wt, - TLS: transport.TLSConfig{ - CAFile: c.CAFile, - CAData: c.CAData, - CertFile: c.CertFile, - CertData: c.CertData, - KeyFile: c.KeyFile, - KeyData: c.KeyData, - Insecure: c.Insecure, - }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, - Impersonate: c.Impersonate, - }, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/config.go b/vendor/k8s.io/kubernetes/pkg/client/transport/config.go deleted file mode 100644 index 63a63fbb4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/config.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import "net/http" - -// Config holds various options for establishing a transport. -type Config struct { - // UserAgent is an optional field that specifies the caller of this - // request. - UserAgent string - - // The base TLS configuration for this transport. - TLS TLSConfig - - // Username and password for basic authentication - Username string - Password string - - // Bearer token for authentication - BearerToken string - - // Impersonate is the username that this Config will impersonate - Impersonate string - - // Transport may be used for custom HTTP behavior. This attribute may - // not be specified with the TLS client certificate options. Use - // WrapTransport for most client level operations. - Transport http.RoundTripper - - // WrapTransport will be invoked for custom HTTP behavior after the - // underlying transport is initialized (either the transport created - // from TLSClientConfig, Transport, or http.DefaultTransport). The - // config may layer other RoundTrippers on top of the returned - // RoundTripper. - WrapTransport func(rt http.RoundTripper) http.RoundTripper -} - -// HasCA returns whether the configuration has a certificate authority or not. -func (c *Config) HasCA() bool { - return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 -} - -// HasBasicAuth returns whether the configuration has basic authentication or not. -func (c *Config) HasBasicAuth() bool { - return len(c.Username) != 0 -} - -// HasTokenAuth returns whether the configuration has token authentication or not. -func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 -} - -// HasCertAuth returns whether the configuration has certificate authentication or not. -func (c *Config) HasCertAuth() bool { - return len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0 -} - -// TLSConfig holds the information needed to set up a TLS transport. -type TLSConfig struct { - CAFile string // Path of the PEM-encoded server trusted root certificates. - CertFile string // Path of the PEM-encoded client certificate. - KeyFile string // Path of the PEM-encoded client key. - - Insecure bool // Server should be accessed without verifying the certificate. For testing only. - - CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. - CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. - KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go b/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go deleted file mode 100644 index 55284ebc6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/round_trippers.go +++ /dev/null @@ -1,337 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "fmt" - "net/http" - "time" - - "github.com/golang/glog" -) - -// HTTPWrappersForConfig wraps a round tripper with any relevant layered -// behavior from the config. Exposed to allow more clients that need HTTP-like -// behavior but then must hijack the underlying connection (like WebSocket or -// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from -// New. -func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { - if config.WrapTransport != nil { - rt = config.WrapTransport(rt) - } - - rt = DebugWrappers(rt) - - // Set authentication wrappers - switch { - case config.HasBasicAuth() && config.HasTokenAuth(): - return nil, fmt.Errorf("username/password or bearer token may be set, but not both") - case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) - case config.HasBasicAuth(): - rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) - } - if len(config.UserAgent) > 0 { - rt = NewUserAgentRoundTripper(config.UserAgent, rt) - } - if len(config.Impersonate) > 0 { - rt = NewImpersonatingRoundTripper(config.Impersonate, rt) - } - return rt, nil -} - -// DebugWrappers wraps a round tripper and logs based on the current log level. -func DebugWrappers(rt http.RoundTripper) http.RoundTripper { - switch { - case bool(glog.V(9)): - rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(glog.V(8)): - rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(glog.V(7)): - rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(glog.V(6)): - rt = newDebuggingRoundTripper(rt, debugURLTiming) - } - - return rt -} - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -type userAgentRoundTripper struct { - agent string - rt http.RoundTripper -} - -func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { - return &userAgentRoundTripper{agent, rt} -} - -func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("User-Agent")) != 0 { - return rt.rt.RoundTrip(req) - } - req = cloneRequest(req) - req.Header.Set("User-Agent", rt.agent) - return rt.rt.RoundTrip(req) -} - -func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } - -type basicAuthRoundTripper struct { - username string - password string - rt http.RoundTripper -} - -// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a -// request unless it has already been set. -func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { - return &basicAuthRoundTripper{username, password, rt} -} - -func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) != 0 { - return rt.rt.RoundTrip(req) - } - req = cloneRequest(req) - req.SetBasicAuth(rt.username, rt.password) - return rt.rt.RoundTrip(req) -} - -func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } - -type impersonatingRoundTripper struct { - impersonate string - delegate http.RoundTripper -} - -// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. -func NewImpersonatingRoundTripper(impersonate string, delegate http.RoundTripper) http.RoundTripper { - return &impersonatingRoundTripper{impersonate, delegate} -} - -func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Impersonate-User")) != 0 { - return rt.delegate.RoundTrip(req) - } - req = cloneRequest(req) - req.Header.Set("Impersonate-User", rt.impersonate) - return rt.delegate.RoundTrip(req) -} - -func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegate.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } - -type bearerAuthRoundTripper struct { - bearer string - rt http.RoundTripper -} - -// NewBearerAuthRoundTripper adds the provided bearer token to a request -// unless the authorization header has already been set. -func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} -} - -func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get("Authorization")) != 0 { - return rt.rt.RoundTrip(req) - } - - req = cloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) - return rt.rt.RoundTrip(req) -} - -func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.rt.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -// requestInfo keeps track of information about a request/response combination -type requestInfo struct { - RequestHeaders http.Header - RequestVerb string - RequestURL string - - ResponseStatus string - ResponseHeaders http.Header - ResponseErr error - - Duration time.Duration -} - -// newRequestInfo creates a new RequestInfo based on an http request -func newRequestInfo(req *http.Request) *requestInfo { - return &requestInfo{ - RequestURL: req.URL.String(), - RequestVerb: req.Method, - RequestHeaders: req.Header, - } -} - -// complete adds information about the response to the requestInfo -func (r *requestInfo) complete(response *http.Response, err error) { - if err != nil { - r.ResponseErr = err - return - } - r.ResponseStatus = response.Status - r.ResponseHeaders = response.Header -} - -// toCurl returns a string that can be run as a command in a terminal (minus the body) -func (r *requestInfo) toCurl() string { - headers := "" - for key, values := range r.RequestHeaders { - for _, value := range values { - headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) - } - } - - return fmt.Sprintf("curl -k -v -X%s %s %s", r.RequestVerb, headers, r.RequestURL) -} - -// debuggingRoundTripper will display information about the requests passing -// through it based on what is configured -type debuggingRoundTripper struct { - delegatedRoundTripper http.RoundTripper - - levels map[debugLevel]bool -} - -type debugLevel int - -const ( - debugJustURL debugLevel = iota - debugURLTiming - debugCurlCommand - debugRequestHeaders - debugResponseStatus - debugResponseHeaders -) - -func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { - drt := &debuggingRoundTripper{ - delegatedRoundTripper: rt, - levels: make(map[debugLevel]bool, len(levels)), - } - for _, v := range levels { - drt.levels[v] = true - } - return drt -} - -func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { - if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { - canceler.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented") - } -} - -func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - reqInfo := newRequestInfo(req) - - if rt.levels[debugJustURL] { - glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) - } - if rt.levels[debugCurlCommand] { - glog.Infof("%s", reqInfo.toCurl()) - - } - if rt.levels[debugRequestHeaders] { - glog.Infof("Request Headers:") - for key, values := range reqInfo.RequestHeaders { - for _, value := range values { - glog.Infof(" %s: %s", key, value) - } - } - } - - startTime := time.Now() - response, err := rt.delegatedRoundTripper.RoundTrip(req) - reqInfo.Duration = time.Since(startTime) - - reqInfo.complete(response, err) - - if rt.levels[debugURLTiming] { - glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) - } - if rt.levels[debugResponseStatus] { - glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) - } - if rt.levels[debugResponseHeaders] { - glog.Infof("Response Headers:") - for key, values := range reqInfo.ResponseHeaders { - for _, value := range values { - glog.Infof(" %s: %s", key, value) - } - } - } - - return response, err -} - -func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { - return rt.delegatedRoundTripper -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go b/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go deleted file mode 100644 index 2d20e1b87..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/transport/transport.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transport - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" -) - -// New returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. -func New(config *Config) (http.RoundTripper, error) { - // Set transport level security - if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.TLS.Insecure) { - return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") - } - - var ( - rt http.RoundTripper - err error - ) - - if config.Transport != nil { - rt = config.Transport - } else { - rt, err = tlsCache.get(config) - if err != nil { - return nil, err - } - } - - return HTTPWrappersForConfig(config, rt) -} - -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func TLSConfigFor(c *Config) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.TLS.Insecure) { - return nil, nil - } - if c.HasCA() && c.TLS.Insecure { - return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") - } - if err := loadTLSFiles(c); err != nil { - return nil, err - } - - tlsConfig := &tls.Config{ - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - InsecureSkipVerify: c.TLS.Insecure, - } - - if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) - } - - if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func loadTLSFiles(c *Config) error { - var err error - c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) - if err != nil { - return err - } - - c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) - if err != nil { - return err - } - - c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". -// When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { - // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go - // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values - // It doesn't allow trusting either/or, but hopefully that won't be an issue - if len(caData) == 0 { - return nil - } - - // if we have caData, use it - certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go b/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go deleted file mode 100644 index 283dd5a63..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go +++ /dev/null @@ -1,314 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/emicklei/go-restful/swagger" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/version" -) - -// DiscoveryInterface holds the methods that discover server-supported API groups, -// versions and resources. -type DiscoveryInterface interface { - ServerGroupsInterface - ServerResourcesInterface - ServerVersionInterface - SwaggerSchemaInterface -} - -// ServerGroupsInterface has methods for obtaining supported groups on the API server -type ServerGroupsInterface interface { - // ServerGroups returns the supported groups, with information like supported versions and the - // preferred version. - ServerGroups() (*unversioned.APIGroupList, error) -} - -// ServerResourcesInterface has methods for obtaining supported resources on the API server -type ServerResourcesInterface interface { - // ServerResourcesForGroupVersion returns the supported resources for a group and version. - ServerResourcesForGroupVersion(groupVersion string) (*unversioned.APIResourceList, error) - // ServerResources returns the supported resources for all groups and versions. - ServerResources() (map[string]*unversioned.APIResourceList, error) - // ServerPreferredResources returns the supported resources with the version preferred by the - // server. - ServerPreferredResources() ([]unversioned.GroupVersionResource, error) - // ServerPreferredNamespacedResources returns the supported namespaced resources with the - // version preferred by the server. - ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) -} - -// ServerVersionInterface has a method for retrieving the server's version. -type ServerVersionInterface interface { - // ServerVersion retrieves and parses the server's version (git version). - ServerVersion() (*version.Info, error) -} - -// SwaggerSchemaInterface has a method to retrieve the swagger schema. -type SwaggerSchemaInterface interface { - // SwaggerSchema retrieves and parses the swagger API schema the server supports. - SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) -} - -// DiscoveryClient implements the functions that discover server-supported API groups, -// versions and resources. -type DiscoveryClient struct { - *restclient.RESTClient -} - -// Convert unversioned.APIVersions to unversioned.APIGroup. APIVersions is used by legacy v1, so -// group would be "". -func apiVersionsToAPIGroup(apiVersions *unversioned.APIVersions) (apiGroup unversioned.APIGroup) { - groupVersions := []unversioned.GroupVersionForDiscovery{} - for _, version := range apiVersions.Versions { - groupVersion := unversioned.GroupVersionForDiscovery{ - GroupVersion: version, - Version: version, - } - groupVersions = append(groupVersions, groupVersion) - } - apiGroup.Versions = groupVersions - // There should be only one groupVersion returned at /api - apiGroup.PreferredVersion = groupVersions[0] - return -} - -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *DiscoveryClient) ServerGroups() (apiGroupList *unversioned.APIGroupList, err error) { - // Get the groupVersions exposed at /api - v := &unversioned.APIVersions{} - err = d.Get().AbsPath("/api").Do().Into(v) - apiGroup := unversioned.APIGroup{} - if err == nil { - apiGroup = apiVersionsToAPIGroup(v) - } - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - - // Get the groupVersions exposed at /apis - apiGroupList = &unversioned.APIGroupList{} - err = d.Get().AbsPath("/apis").Do().Into(apiGroupList) - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api - if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - apiGroupList = &unversioned.APIGroupList{} - } - - // append the group retrieved from /api to the list - apiGroupList.Groups = append(apiGroupList.Groups, apiGroup) - return apiGroupList, nil -} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *unversioned.APIResourceList, err error) { - url := url.URL{} - if len(groupVersion) == 0 { - return nil, fmt.Errorf("groupVersion shouldn't be empty") - } else if groupVersion == "v1" { - url.Path = "/api/" + groupVersion - } else { - url.Path = "/apis/" + groupVersion - } - resources = &unversioned.APIResourceList{} - err = d.Get().AbsPath(url.String()).Do().Into(resources) - if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - return resources, nil - } else { - return nil, err - } - } - return resources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) ServerResources() (map[string]*unversioned.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - groupVersions := unversioned.ExtractGroupVersions(apiGroups) - result := map[string]*unversioned.APIResourceList{} - for _, groupVersion := range groupVersions { - resources, err := d.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - return nil, err - } - result[groupVersion] = resources - } - return result, nil -} - -// serverPreferredResources returns the supported resources with the version preferred by the -// server. If namespaced is true, only namespaced resources will be returned. -func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversioned.GroupVersionResource, error) { - results := []unversioned.GroupVersionResource{} - serverGroupList, err := d.ServerGroups() - if err != nil { - return results, err - } - - allErrs := []error{} - for _, apiGroup := range serverGroupList.Groups { - preferredVersion := apiGroup.PreferredVersion - apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion) - if err != nil { - allErrs = append(allErrs, err) - continue - } - groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version} - for _, apiResource := range apiResourceList.APIResources { - // ignore the root scoped resources if "namespaced" is true. - if namespaced && !apiResource.Namespaced { - continue - } - if strings.Contains(apiResource.Name, "/") { - continue - } - results = append(results, groupVersion.WithResource(apiResource.Name)) - } - } - return results, utilerrors.NewAggregate(allErrs) -} - -// ServerPreferredResources returns the supported resources with the version preferred by the -// server. -func (d *DiscoveryClient) ServerPreferredResources() ([]unversioned.GroupVersionResource, error) { - return d.serverPreferredResources(false) -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources with the -// version preferred by the server. -func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]unversioned.GroupVersionResource, error) { - return d.serverPreferredResources(true) -} - -// ServerVersion retrieves and parses the server's version (git version). -func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { - body, err := d.Get().AbsPath("/version").Do().Raw() - if err != nil { - return nil, err - } - var info version.Info - err = json.Unmarshal(body, &info) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &info, nil -} - -// SwaggerSchema retrieves and parses the swagger API schema the server supports. -func (d *DiscoveryClient) SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { - if version.IsEmpty() { - return nil, fmt.Errorf("groupVersion cannot be empty") - } - - groupList, err := d.ServerGroups() - if err != nil { - return nil, err - } - groupVersions := unversioned.ExtractGroupVersions(groupList) - // This check also takes care the case that kubectl is newer than the running endpoint - if stringDoesntExistIn(version.String(), groupVersions) { - return nil, fmt.Errorf("API version: %v is not supported by the server. Use one of: %v", version, groupVersions) - } - var path string - if version == v1.SchemeGroupVersion { - path = "/swaggerapi/api/" + version.Version - } else { - path = "/swaggerapi/apis/" + version.Group + "/" + version.Version - } - - body, err := d.Get().AbsPath(path).Do().Raw() - if err != nil { - return nil, err - } - var schema swagger.ApiDeclaration - err = json.Unmarshal(body, &schema) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &schema, nil -} - -func setDiscoveryDefaults(config *restclient.Config) error { - config.APIPath = "" - config.GroupVersion = nil - codec := runtime.NoopEncoder{Decoder: api.Codecs.UniversalDecoder()} - config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper( - runtime.SerializerInfo{Serializer: codec}, - runtime.StreamSerializerInfo{}, - ) - if len(config.UserAgent) == 0 { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - return nil -} - -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client -// can be used to discover supported resources in the API server. -func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { - config := *c - if err := setDiscoveryDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.UnversionedRESTClientFor(&config) - return &DiscoveryClient{client}, err -} - -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. If -// there is an error, it panics. -func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { - client, err := NewDiscoveryClientForConfig(c) - if err != nil { - panic(err) - } - return client - -} - -// New creates a new DiscoveryClient for the given RESTClient. -func NewDiscoveryClient(c *restclient.RESTClient) *DiscoveryClient { - return &DiscoveryClient{c} -} - -func stringDoesntExistIn(str string, slice []string) bool { - for _, s := range slice { - if s == str { - return false - } - } - return true -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go deleted file mode 100644 index 680cadc92..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset/clientset_adaption.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package internalclientset - -import ( - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedbatch "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/unversioned" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" - unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/client/unversioned" -) - -// FromUnversionedClient adapts a unversioned.Client to a internalclientset.Clientset. -// This function is temporary. We will remove it when everyone has moved to using -// Clientset. New code should NOT use this function. -func FromUnversionedClient(c *unversioned.Client) *internalclientset.Clientset { - var clientset internalclientset.Clientset - if c != nil { - clientset.CoreClient = unversionedcore.New(c.RESTClient) - } else { - clientset.CoreClient = unversionedcore.New(nil) - } - if c != nil && c.ExtensionsClient != nil { - clientset.ExtensionsClient = unversionedextensions.New(c.ExtensionsClient.RESTClient) - } else { - clientset.ExtensionsClient = unversionedextensions.New(nil) - } - if c != nil && c.BatchClient != nil { - clientset.BatchClient = unversionedbatch.New(c.BatchClient.RESTClient) - } else { - clientset.BatchClient = unversionedbatch.New(nil) - } - if c != nil && c.DiscoveryClient != nil { - clientset.DiscoveryClient = discovery.NewDiscoveryClient(c.DiscoveryClient.RESTClient) - } else { - clientset.DiscoveryClient = discovery.NewDiscoveryClient(nil) - } - - return &clientset -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go deleted file mode 100644 index 1905c29c2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/apps.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/client/restclient" -) - -type AppsInterface interface { - PetSetNamespacer -} - -// AppsClient is used to interact with Kubernetes batch features. -type AppsClient struct { - *restclient.RESTClient -} - -func (c *AppsClient) PetSets(namespace string) PetSetInterface { - return newPetSet(c, namespace) -} - -func NewApps(c *restclient.Config) (*AppsClient, error) { - config := *c - if err := setAppsDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AppsClient{client}, nil -} - -func NewAppsOrDie(c *restclient.Config) *AppsClient { - client, err := NewApps(c) - if err != nil { - panic(err) - } - return client -} - -func setAppsDefaults(config *restclient.Config) error { - g, err := registered.Group(apps.GroupName) - if err != nil { - return err - } - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - config.NegotiatedSerializer = api.Codecs - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go deleted file mode 100644 index 9e543c9d3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/autoscaling.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/client/restclient" -) - -type AutoscalingInterface interface { - HorizontalPodAutoscalersNamespacer -} - -// AutoscalingClient is used to interact with Kubernetes autoscaling features. -type AutoscalingClient struct { - *restclient.RESTClient -} - -func (c *AutoscalingClient) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -func NewAutoscaling(c *restclient.Config) (*AutoscalingClient, error) { - config := *c - if err := setAutoscalingDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingClient{client}, nil -} - -func NewAutoscalingOrDie(c *restclient.Config) *AutoscalingClient { - client, err := NewAutoscaling(c) - if err != nil { - panic(err) - } - return client -} - -func setAutoscalingDefaults(config *restclient.Config) error { - // if autoscaling group is not registered, return an error - g, err := registered.Group(autoscaling.GroupName) - if err != nil { - return err - } - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - config.NegotiatedSerializer = api.Codecs - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go deleted file mode 100644 index 40fc49dc1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/batch.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/batch/v2alpha1" - "k8s.io/kubernetes/pkg/client/restclient" -) - -type BatchInterface interface { - JobsNamespacer - ScheduledJobsNamespacer -} - -// BatchClient is used to interact with Kubernetes batch features. -type BatchClient struct { - *restclient.RESTClient -} - -func (c *BatchClient) Jobs(namespace string) JobInterface { - return newJobsV1(c, namespace) -} - -func (c *BatchClient) ScheduledJobs(namespace string) ScheduledJobInterface { - return newScheduledJobs(c, namespace) -} - -func NewBatch(c *restclient.Config) (*BatchClient, error) { - config := *c - if err := setBatchDefaults(&config, nil); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchClient{client}, nil -} - -func NewBatchV2Alpha1(c *restclient.Config) (*BatchClient, error) { - config := *c - if err := setBatchDefaults(&config, &v2alpha1.SchemeGroupVersion); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchClient{client}, nil -} - -func NewBatchOrDie(c *restclient.Config) *BatchClient { - var ( - client *BatchClient - err error - ) - if c.ContentConfig.GroupVersion != nil && *c.ContentConfig.GroupVersion == v2alpha1.SchemeGroupVersion { - client, err = NewBatchV2Alpha1(c) - } else { - client, err = NewBatch(c) - } - if err != nil { - panic(err) - } - return client -} - -func setBatchDefaults(config *restclient.Config, gv *unversioned.GroupVersion) error { - // if batch group is not registered, return an error - g, err := registered.Group(batch.GroupName) - if err != nil { - return err - } - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - if gv != nil { - copyGroupVersion = *gv - } - config.GroupVersion = ©GroupVersion - //} - - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - config.NegotiatedSerializer = api.Codecs - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go deleted file mode 100644 index df6804004..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/client.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "net" - "net/url" - "strings" - - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/typed/discovery" -) - -// Interface holds the methods for clients of Kubernetes, -// an interface to allow mock testing. -type Interface interface { - PodsNamespacer - PodTemplatesNamespacer - ReplicationControllersNamespacer - ServicesNamespacer - EndpointsNamespacer - NodesInterface - EventNamespacer - LimitRangesNamespacer - ResourceQuotasNamespacer - ServiceAccountsNamespacer - SecretsNamespacer - NamespacesInterface - PersistentVolumesInterface - PersistentVolumeClaimsNamespacer - ComponentStatusesInterface - ConfigMapsNamespacer - Autoscaling() AutoscalingInterface - Batch() BatchInterface - Extensions() ExtensionsInterface - Rbac() RbacInterface - Discovery() discovery.DiscoveryInterface -} - -func (c *Client) ReplicationControllers(namespace string) ReplicationControllerInterface { - return newReplicationControllers(c, namespace) -} - -func (c *Client) Nodes() NodeInterface { - return newNodes(c) -} - -func (c *Client) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *Client) Endpoints(namespace string) EndpointsInterface { - return newEndpoints(c, namespace) -} - -func (c *Client) Pods(namespace string) PodInterface { - return newPods(c, namespace) -} - -func (c *Client) PodTemplates(namespace string) PodTemplateInterface { - return newPodTemplates(c, namespace) -} - -func (c *Client) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} -func (c *Client) LimitRanges(namespace string) LimitRangeInterface { - return newLimitRanges(c, namespace) -} - -func (c *Client) ResourceQuotas(namespace string) ResourceQuotaInterface { - return newResourceQuotas(c, namespace) -} - -func (c *Client) ServiceAccounts(namespace string) ServiceAccountsInterface { - return newServiceAccounts(c, namespace) -} - -func (c *Client) Secrets(namespace string) SecretsInterface { - return newSecrets(c, namespace) -} - -func (c *Client) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *Client) PersistentVolumes() PersistentVolumeInterface { - return newPersistentVolumes(c) -} - -func (c *Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { - return newPersistentVolumeClaims(c, namespace) -} - -func (c *Client) ComponentStatuses() ComponentStatusInterface { - return newComponentStatuses(c) -} - -func (c *Client) ConfigMaps(namespace string) ConfigMapsInterface { - return newConfigMaps(c, namespace) -} - -// Client is the implementation of a Kubernetes client. -type Client struct { - *restclient.RESTClient - *AutoscalingClient - *BatchClient - *ExtensionsClient - *AppsClient - *PolicyClient - *RbacClient - *discovery.DiscoveryClient -} - -// IsTimeout tests if this is a timeout error in the underlying transport. -// This is unbelievably ugly. -// See: http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error for details -func IsTimeout(err error) bool { - if err == nil { - return false - } - switch err := err.(type) { - case *url.Error: - if err, ok := err.Err.(net.Error); ok { - return err.Timeout() - } - case net.Error: - return err.Timeout() - } - - if strings.Contains(err.Error(), "use of closed network connection") { - return true - } - return false -} - -func (c *Client) Autoscaling() AutoscalingInterface { - return c.AutoscalingClient -} - -func (c *Client) Batch() BatchInterface { - return c.BatchClient -} - -func (c *Client) Extensions() ExtensionsInterface { - return c.ExtensionsClient -} - -func (c *Client) Apps() AppsInterface { - return c.AppsClient -} - -func (c *Client) Rbac() RbacInterface { - return c.RbacClient -} - -func (c *Client) Discovery() discovery.DiscoveryInterface { - return c.DiscoveryClient -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go deleted file mode 100644 index 87330c500..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/helpers.go +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" -) - -func init() { - sDec, _ := base64.StdEncoding.DecodeString("REDACTED+") - redactedBytes = []byte(string(sDec)) -} - -// IsConfigEmpty returns true if the config is empty. -func IsConfigEmpty(config *Config) bool { - return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 && - len(config.CurrentContext) == 0 && - len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors && - len(config.Extensions) == 0 -} - -// MinifyConfig read the current context and uses that to keep only the relevant pieces of config -// This is useful for making secrets based on kubeconfig files -func MinifyConfig(config *Config) error { - if len(config.CurrentContext) == 0 { - return errors.New("current-context must exist in order to minify") - } - - currContext, exists := config.Contexts[config.CurrentContext] - if !exists { - return fmt.Errorf("cannot locate context %v", config.CurrentContext) - } - - newContexts := map[string]*Context{} - newContexts[config.CurrentContext] = currContext - - newClusters := map[string]*Cluster{} - if len(currContext.Cluster) > 0 { - if _, exists := config.Clusters[currContext.Cluster]; !exists { - return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) - } - - newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] - } - - newAuthInfos := map[string]*AuthInfo{} - if len(currContext.AuthInfo) > 0 { - if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { - return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) - } - - newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo] - } - - config.AuthInfos = newAuthInfos - config.Clusters = newClusters - config.Contexts = newContexts - - return nil -} - -var redactedBytes []byte - -// Flatten redacts raw data entries from the config object for a human-readable view. -func ShortenConfig(config *Config) { - // trick json encoder into printing a human readable string in the raw data - // by base64 decoding what we want to print. Relies on implementation of - // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte - for key, authInfo := range config.AuthInfos { - if len(authInfo.ClientKeyData) > 0 { - authInfo.ClientKeyData = redactedBytes - } - if len(authInfo.ClientCertificateData) > 0 { - authInfo.ClientCertificateData = redactedBytes - } - config.AuthInfos[key] = authInfo - } - for key, cluster := range config.Clusters { - if len(cluster.CertificateAuthorityData) > 0 { - cluster.CertificateAuthorityData = redactedBytes - } - config.Clusters[key] = cluster - } -} - -// Flatten changes the config object into a self contained config (useful for making secrets) -func FlattenConfig(config *Config) error { - for key, authInfo := range config.AuthInfos { - baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") - if err != nil { - return err - } - - if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil { - return err - } - if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil { - return err - } - - config.AuthInfos[key] = authInfo - } - for key, cluster := range config.Clusters { - baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") - if err != nil { - return err - } - - if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil { - return err - } - - config.Clusters[key] = cluster - } - - return nil -} - -func FlattenContent(path *string, contents *[]byte, baseDir string) error { - if len(*path) != 0 { - if len(*contents) > 0 { - return errors.New("cannot have values for both path and contents") - } - - var err error - absPath := ResolvePath(*path, baseDir) - *contents, err = ioutil.ReadFile(absPath) - if err != nil { - return err - } - - *path = "" - } - - return nil -} - -// ResolvePath returns the path as an absolute paths, relative to the given base directory -func ResolvePath(path string, base string) string { - // Don't resolve empty paths - if len(path) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(path) { - return filepath.Join(base, path) - } - } - - return path -} - -func MakeAbs(path, base string) (string, error) { - if filepath.IsAbs(path) { - return path, nil - } - if len(base) == 0 { - cwd, err := os.Getwd() - if err != nil { - return "", err - } - base = cwd - } - return filepath.Join(base, path), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go deleted file mode 100644 index 48cedb82e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest/latest.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package latest - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - _ "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/json" - "k8s.io/kubernetes/pkg/runtime/serializer/versioning" -) - -// Version is the string that represents the current external default version. -const Version = "v1" - -var ExternalVersion = unversioned.GroupVersion{Group: "", Version: "v1"} - -// OldestVersion is the string that represents the oldest server version supported, -// for client code that wants to hardcode the lowest common denominator. -const OldestVersion = "v1" - -// Versions is the list of versions that are recognized in code. The order provided -// may be assumed to be least feature rich to most feature rich, and clients may -// choose to prefer the latter items in the list over the former items when presented -// with a set of versions to choose. -var Versions = []string{"v1"} - -var Codec runtime.Codec - -func init() { - yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, api.Scheme, api.Scheme) - Codec = versioning.NewCodecForScheme( - api.Scheme, - yamlSerializer, - yamlSerializer, - []unversioned.GroupVersion{{Version: Version}}, - []unversioned.GroupVersion{{Version: runtime.APIVersionInternal}}, - ) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go deleted file mode 100644 index f26a6cd1b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/register.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -var Scheme = runtime.NewScheme() - -// SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "kubeconfig" group -var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: runtime.APIVersionInternal} - -func init() { - Scheme.AddKnownTypes(SchemeGroupVersion, - &Config{}, - ) -} - -func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go deleted file mode 100644 index 56b44e8f4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/types.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - Kind string `json:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. - APIVersion string `json:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters map[string]*Cluster `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos map[string]*AuthInfo `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts map[string]*Context `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type Preferences struct { - Colors bool `json:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - APIVersion string `json:"api-version,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Impersonate is the username to act-as. - Impersonate string `json:"act-as,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.Object `json:"extensions,omitempty"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name"` - Config map[string]string `json:"config,omitempty"` -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewConfig() *Config { - return &Config{ - Preferences: *NewPreferences(), - Clusters: make(map[string]*Cluster), - AuthInfos: make(map[string]*AuthInfo), - Contexts: make(map[string]*Context), - Extensions: make(map[string]runtime.Object), - } -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewContext() *Context { - return &Context{Extensions: make(map[string]runtime.Object)} -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewCluster() *Cluster { - return &Cluster{Extensions: make(map[string]runtime.Object)} -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewAuthInfo() *AuthInfo { - return &AuthInfo{Extensions: make(map[string]runtime.Object)} -} - -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func NewPreferences() *Preferences { - return &Preferences{Extensions: make(map[string]runtime.Object)} -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go deleted file mode 100644 index e03fc60b1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/conversion.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "sort" - - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" -) - -func init() { - err := api.Scheme.AddConversionFuncs( - func(in *Cluster, out *api.Cluster, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Cluster, out *Cluster, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *Preferences, out *api.Preferences, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Preferences, out *Preferences, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *Context, out *api.Context, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - func(in *api.Context, out *Context, s conversion.Scope) error { - return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) - }, - - func(in *Config, out *api.Config, s conversion.Scope) error { - out.CurrentContext = in.CurrentContext - if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { - return err - } - - out.Clusters = make(map[string]*api.Cluster) - if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { - return err - } - out.AuthInfos = make(map[string]*api.AuthInfo) - if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { - return err - } - out.Contexts = make(map[string]*api.Context) - if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { - return err - } - out.Extensions = make(map[string]runtime.Object) - if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { - return err - } - return nil - }, - func(in *api.Config, out *Config, s conversion.Scope) error { - out.CurrentContext = in.CurrentContext - if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { - return err - } - - out.Clusters = make([]NamedCluster, 0, 0) - if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { - return err - } - out.AuthInfos = make([]NamedAuthInfo, 0, 0) - if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { - return err - } - out.Contexts = make([]NamedContext, 0, 0) - if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { - return err - } - out.Extensions = make([]NamedExtension, 0, 0) - if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { - return err - } - return nil - }, - func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { - for _, curr := range *in { - newCluster := api.NewCluster() - if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { - return err - } - (*out)[curr.Name] = newCluster - } - - return nil - }, - func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newCluster := (*in)[key] - oldCluster := &Cluster{} - if err := s.Convert(newCluster, oldCluster, 0); err != nil { - return err - } - - namedCluster := NamedCluster{key, *oldCluster} - *out = append(*out, namedCluster) - } - - return nil - }, - func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { - for _, curr := range *in { - newAuthInfo := api.NewAuthInfo() - if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { - return err - } - (*out)[curr.Name] = newAuthInfo - } - - return nil - }, - func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newAuthInfo := (*in)[key] - oldAuthInfo := &AuthInfo{} - if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { - return err - } - - namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} - *out = append(*out, namedAuthInfo) - } - - return nil - }, - func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { - for _, curr := range *in { - newContext := api.NewContext() - if err := s.Convert(&curr.Context, newContext, 0); err != nil { - return err - } - (*out)[curr.Name] = newContext - } - - return nil - }, - func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newContext := (*in)[key] - oldContext := &Context{} - if err := s.Convert(newContext, oldContext, 0); err != nil { - return err - } - - namedContext := NamedContext{key, *oldContext} - *out = append(*out, namedContext) - } - - return nil - }, - func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { - for _, curr := range *in { - var newExtension runtime.Object - if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { - return err - } - (*out)[curr.Name] = newExtension - } - - return nil - }, - func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { - allKeys := make([]string, 0, len(*in)) - for key := range *in { - allKeys = append(allKeys, key) - } - sort.Strings(allKeys) - - for _, key := range allKeys { - newExtension := (*in)[key] - oldExtension := &runtime.RawExtension{} - if err := s.Convert(newExtension, oldExtension, 0); err != nil { - return err - } - - namedExtension := NamedExtension{key, *oldExtension} - *out = append(*out, namedExtension) - } - - return nil - }, - ) - if err != nil { - // If one of the conversion functions is malformed, detect it immediately. - panic(err) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go deleted file mode 100644 index e5c9e88ef..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/register.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" -) - -// SchemeGroupVersion is group version used to register these objects -// TODO this should be in the "kubeconfig" group -var SchemeGroupVersion = unversioned.GroupVersion{Group: "", Version: "v1"} - -func init() { - api.Scheme.AddKnownTypes(SchemeGroupVersion, - &Config{}, - ) -} - -func (obj *Config) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *Config) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} -func (obj *Config) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go deleted file mode 100644 index 46b5dbaa7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1/types.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - Kind string `json:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. - APIVersion string `json:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters []NamedCluster `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos []NamedAuthInfo `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts []NamedContext `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -type Preferences struct { - Colors bool `json:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - APIVersion string `json:"api-version,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Impersonate is the username to imperonate. The name matches the flag. - Impersonate string `json:"as,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions []NamedExtension `json:"extensions,omitempty"` -} - -// NamedCluster relates nicknames to cluster information -type NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster Cluster `json:"cluster"` -} - -// NamedContext relates nicknames to context information -type NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context Context `json:"context"` -} - -// NamedAuthInfo relates nicknames to auth information -type NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo AuthInfo `json:"user"` -} - -// NamedExtension relates nicknames to extension information -type NamedExtension struct { - // Name is the nickname for this Extension - Name string `json:"name"` - // Extension holds the extension information - Extension runtime.RawExtension `json:"extension"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name"` - Config map[string]string `json:"config"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go deleted file mode 100644 index c83f315a3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/client_config.go +++ /dev/null @@ -1,411 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strings" - - "github.com/golang/glog" - "github.com/imdario/mergo" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" -) - -var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - DefaultCluster = clientcmdapi.Cluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} - - DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil, NewDefaultClientConfigLoadingRules()} -) - -// ClientConfig is used to make it easy to get an api server client -type ClientConfig interface { - // RawConfig returns the merged result of all overrides - RawConfig() (clientcmdapi.Config, error) - // ClientConfig returns a complete client config - ClientConfig() (*restclient.Config, error) - // Namespace returns the namespace resulting from the merged - // result of all overrides and a boolean indicating if it was - // overridden - Namespace() (string, bool, error) - // ConfigAccess returns the rules for loading/persisting the config. - ConfigAccess() ConfigAccess -} - -type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister - -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type DirectClientConfig struct { - config clientcmdapi.Config - contextName string - overrides *ConfigOverrides - fallbackReader io.Reader - configAccess ConfigAccess -} - -// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name -func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { - return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules()} -} - -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, nil, configAccess} -} - -// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags -func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { - return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess} -} - -func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { - return config.config, nil -} - -// ClientConfig implements ClientConfig -func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - configAuthInfo := config.getAuthInfo() - configClusterInfo := config.getCluster() - - clientConfig := &restclient.Config{} - clientConfig.Host = configClusterInfo.Server - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - if len(configAuthInfo.Impersonate) > 0 { - clientConfig.Impersonate = configAuthInfo.Impersonate - } - - // only try to read the auth information if we are secure - if restclient.IsConfigTransportTLS(*clientConfig) { - var err error - - // mergo is a first write wins for map value and a last writing wins for interface values - // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. - // Our mergo.Merge version is older than this change. - var persister restclient.AuthProviderConfigPersister - if config.configAccess != nil { - persister = PersisterForUser(config.configAccess, config.getAuthInfoName()) - } - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, userAuthPartialConfig) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, serverAuthPartialConfig) - } - - return clientConfig, nil -} - -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { - mergedConfig := &restclient.Config{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &restclient.Config{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.Merge(mergedConfig, configClientConfig) - - return mergedConfig, nil -} - -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identifcation -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { - mergedConfig := &restclient.Config{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - } - if len(configAuthInfo.Impersonate) > 0 { - mergedConfig.Impersonate = configAuthInfo.Impersonate - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - if configAuthInfo.AuthProvider != nil { - mergedConfig.AuthProvider = configAuthInfo.AuthProvider - mergedConfig.AuthConfigPersister = persistAuthConfig - } - - // if there still isn't enough information to authenticate the user, try prompting - if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { - prompter := NewPromptingAuthLoader(fallbackReader) - promptedAuthInfo := prompter.Prompt() - - promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) - previouslyMergedConfig := mergedConfig - mergedConfig = &restclient.Config{} - mergo.Merge(mergedConfig, promptedConfig) - mergo.Merge(mergedConfig, previouslyMergedConfig) - } - - return mergedConfig, nil -} - -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information -func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { - config := &restclient.Config{} - config.Username = info.User - config.Password = info.Password - config.CertFile = info.CertFile - config.KeyFile = info.KeyFile - config.BearerToken = info.BearerToken - return config -} - -// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only server identification information -func makeServerIdentificationConfig(info clientauth.Info) restclient.Config { - config := restclient.Config{} - config.CAFile = info.CAFile - if info.Insecure != nil { - config.Insecure = *info.Insecure - } - return config -} - -func canIdentifyUser(config restclient.Config) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 || - config.AuthProvider != nil -} - -// Namespace implements ClientConfig -func (config *DirectClientConfig) Namespace() (string, bool, error) { - if err := config.ConfirmUsable(); err != nil { - return "", false, err - } - - configContext := config.getContext() - - if len(configContext.Namespace) == 0 { - return api.NamespaceDefault, false, nil - } - - overridden := false - if config.overrides != nil && config.overrides.Context.Namespace != "" { - overridden = true - } - return configContext.Namespace, overridden, nil -} - -// ConfigAccess implements ClientConfig -func (config *DirectClientConfig) ConfigAccess() ConfigAccess { - return config.configAccess -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *DirectClientConfig) ConfirmUsable() error { - validationErrors := make([]error, 0) - validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) - validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -func (config *DirectClientConfig) getContextName() string { - if len(config.overrides.CurrentContext) != 0 { - return config.overrides.CurrentContext - } - if len(config.contextName) != 0 { - return config.contextName - } - - return config.config.CurrentContext -} - -func (config *DirectClientConfig) getAuthInfoName() string { - if len(config.overrides.Context.AuthInfo) != 0 { - return config.overrides.Context.AuthInfo - } - return config.getContext().AuthInfo -} - -func (config *DirectClientConfig) getClusterName() string { - if len(config.overrides.Context.Cluster) != 0 { - return config.overrides.Context.Cluster - } - return config.getContext().Cluster -} - -func (config *DirectClientConfig) getContext() clientcmdapi.Context { - contexts := config.config.Contexts - contextName := config.getContextName() - - var mergedContext clientcmdapi.Context - if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) - } - mergo.Merge(&mergedContext, config.overrides.Context) - - return mergedContext -} - -func (config *DirectClientConfig) getAuthInfo() clientcmdapi.AuthInfo { - authInfos := config.config.AuthInfos - authInfoName := config.getAuthInfoName() - - var mergedAuthInfo clientcmdapi.AuthInfo - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) - } - mergo.Merge(&mergedAuthInfo, config.overrides.AuthInfo) - - return mergedAuthInfo -} - -func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { - clusterInfos := config.config.Clusters - clusterInfoName := config.getClusterName() - - var mergedClusterInfo clientcmdapi.Cluster - mergo.Merge(&mergedClusterInfo, DefaultCluster) - mergo.Merge(&mergedClusterInfo, EnvVarCluster) - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) - } - mergo.Merge(&mergedClusterInfo, config.overrides.ClusterInfo) - // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data - // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" - caLen := len(config.overrides.ClusterInfo.CertificateAuthority) - caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) - if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { - mergedClusterInfo.CertificateAuthority = "" - mergedClusterInfo.CertificateAuthorityData = nil - } - - return mergedClusterInfo -} - -// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. -type inClusterClientConfig struct{} - -func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { - return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") -} - -func (inClusterClientConfig) ClientConfig() (*restclient.Config, error) { - return restclient.InClusterConfig() -} - -func (inClusterClientConfig) Namespace() (string, error) { - // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. - // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up - if ns := os.Getenv("POD_NAMESPACE"); ns != "" { - return ns, nil - } - - // Fall back to the namespace associated with the service account token, if available - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, nil - } - } - - return "default", nil -} - -func (inClusterClientConfig) ConfigAccess() ConfigAccess { - return NewDefaultClientConfigLoadingRules() -} - -// Possible returns true if loading an inside-kubernetes-cluster is possible. -func (inClusterClientConfig) Possible() bool { - fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") - return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && - os.Getenv("KUBERNETES_SERVICE_PORT") != "" && - err == nil && !fi.IsDir() -} - -// BuildConfigFromFlags is a helper function that builds configs from a master -// url or a kubeconfig filepath. These are passed in as command line flags for cluster -// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath -// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback -// to the default config. -func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { - if kubeconfigPath == "" && masterUrl == "" { - glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") - kubeconfig, err := restclient.InClusterConfig() - if err == nil { - return kubeconfig, nil - } - glog.Warning("error creating inClusterConfig, falling back to default config: ", err) - } - return NewNonInteractiveDeferredLoadingClientConfig( - &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() -} - -// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master -// url and a kubeconfigGetter. -func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { - // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. - cc := NewNonInteractiveDeferredLoadingClientConfig( - &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, - &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) - return cc.ClientConfig() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go deleted file mode 100644 index ec5948609..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/config.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "os" - "path" - "path/filepath" - "reflect" - "sort" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/client/restclient" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" -) - -// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files -type ConfigAccess interface { - // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config - GetLoadingPrecedence() []string - // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules - GetStartingConfig() (*clientcmdapi.Config, error) - // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. - GetDefaultFilename() string - // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more - IsExplicitFile() bool - // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more - GetExplicitFile() string -} - -type PathOptions struct { - // GlobalFile is the full path to the file to load as the global (final) option - GlobalFile string - // EnvVar is the env var name that points to the list of kubeconfig files to load - EnvVar string - // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file - ExplicitFileFlag string - - // GlobalFileSubpath is an optional value used for displaying help - GlobalFileSubpath string - - LoadingRules *ClientConfigLoadingRules -} - -func (o *PathOptions) GetEnvVarFiles() []string { - if len(o.EnvVar) == 0 { - return []string{} - } - - envVarValue := os.Getenv(o.EnvVar) - if len(envVarValue) == 0 { - return []string{} - } - - return filepath.SplitList(envVarValue) -} - -func (o *PathOptions) GetLoadingPrecedence() []string { - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - return envVarFiles - } - - return []string{o.GlobalFile} -} - -func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { - // don't mutate the original - loadingRules := *o.LoadingRules - loadingRules.Precedence = o.GetLoadingPrecedence() - - clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) - rawConfig, err := clientConfig.RawConfig() - if os.IsNotExist(err) { - return clientcmdapi.NewConfig(), nil - } - if err != nil { - return nil, err - } - - return &rawConfig, nil -} - -func (o *PathOptions) GetDefaultFilename() string { - if o.IsExplicitFile() { - return o.GetExplicitFile() - } - - if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { - if len(envVarFiles) == 1 { - return envVarFiles[0] - } - - // if any of the envvar files already exists, return it - for _, envVarFile := range envVarFiles { - if _, err := os.Stat(envVarFile); err == nil { - return envVarFile - } - } - - // otherwise, return the last one in the list - return envVarFiles[len(envVarFiles)-1] - } - - return o.GlobalFile -} - -func (o *PathOptions) IsExplicitFile() bool { - if len(o.LoadingRules.ExplicitPath) > 0 { - return true - } - - return false -} - -func (o *PathOptions) GetExplicitFile() string { - return o.LoadingRules.ExplicitPath -} - -func NewDefaultPathOptions() *PathOptions { - ret := &PathOptions{ - GlobalFile: RecommendedHomeFile, - EnvVar: RecommendedConfigPathEnvVar, - ExplicitFileFlag: RecommendedConfigPathFlag, - - GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), - - LoadingRules: NewDefaultClientConfigLoadingRules(), - } - ret.LoadingRules.DoNotResolvePaths = true - - return ret -} - -// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or -// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. -// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values -// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, -// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any -// modified element. -func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { - possibleSources := configAccess.GetLoadingPrecedence() - // sort the possible kubeconfig files so we always "lock" in the same order - // to avoid deadlock (note: this can fail w/ symlinks, but... come on). - sort.Strings(possibleSources) - for _, filename := range possibleSources { - if err := lockFile(filename); err != nil { - return err - } - defer unlockFile(filename) - } - - startingConfig, err := configAccess.GetStartingConfig() - if err != nil { - return err - } - - // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. - // Special case the test for current context and preferences since those always write to the default file. - if reflect.DeepEqual(*startingConfig, newConfig) { - // nothing to do - return nil - } - - if startingConfig.CurrentContext != newConfig.CurrentContext { - if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { - return err - } - } - - if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { - if err := writePreferences(configAccess, newConfig.Preferences); err != nil { - return err - } - } - - // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions - for key, cluster := range newConfig.Clusters { - startingCluster, exists := startingConfig.Clusters[key] - if !reflect.DeepEqual(cluster, startingCluster) || !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *cluster - - configToWrite.Clusters[key] = &t - configToWrite.Clusters[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range newConfig.Contexts { - startingContext, exists := startingConfig.Contexts[key] - if !reflect.DeepEqual(context, startingContext) || !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - configToWrite.Contexts[key] = context - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range newConfig.AuthInfos { - startingAuthInfo, exists := startingConfig.AuthInfos[key] - if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - t := *authInfo - configToWrite.AuthInfos[key] = &t - configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile - if relativizePaths { - if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { - return err - } - } - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, cluster := range startingConfig.Clusters { - if _, exists := newConfig.Clusters[key]; !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Clusters, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, context := range startingConfig.Contexts { - if _, exists := newConfig.Contexts[key]; !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.Contexts, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - for key, authInfo := range startingConfig.AuthInfos { - if _, exists := newConfig.AuthInfos[key]; !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } - - configToWrite, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - delete(configToWrite.AuthInfos, key) - - if err := WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } - } - } - - return nil -} - -func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { - return &persister{configAccess, user} -} - -type persister struct { - configAccess ConfigAccess - user string -} - -func (p *persister) Persist(config map[string]string) error { - newConfig, err := p.configAccess.GetStartingConfig() - if err != nil { - return err - } - authInfo, ok := newConfig.AuthInfos[p.user] - if ok && authInfo.AuthProvider != nil { - authInfo.AuthProvider.Config = config - ModifyConfig(p.configAccess, *newConfig, false) - } - return nil -} - -// writeCurrentContext takes three possible paths. -// If newCurrentContext is the same as the startingConfig's current context, then we exit. -// If newCurrentContext has a value, then that value is written into the default destination file. -// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file -func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if startingConfig.CurrentContext == newCurrentContext { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - if len(newCurrentContext) > 0 { - destinationFile := configAccess.GetDefaultFilename() - config, err := getConfigFromFile(destinationFile) - if err != nil { - return err - } - config.CurrentContext = newCurrentContext - - if err := WriteToFile(*config, destinationFile); err != nil { - return err - } - - return nil - } - - // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it - for _, file := range configAccess.GetLoadingPrecedence() { - if _, err := os.Stat(file); err == nil { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if len(currConfig.CurrentContext) > 0 { - currConfig.CurrentContext = newCurrentContext - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - } - - return errors.New("no config found to write context") -} - -func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { - if startingConfig, err := configAccess.GetStartingConfig(); err != nil { - return err - } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { - return nil - } - - if configAccess.IsExplicitFile() { - file := configAccess.GetExplicitFile() - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - - for _, file := range configAccess.GetLoadingPrecedence() { - currConfig, err := getConfigFromFile(file) - if err != nil { - return err - } - - if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { - currConfig.Preferences = newPrefs - if err := WriteToFile(*currConfig, file); err != nil { - return err - } - - return nil - } - } - - return errors.New("no config found to write preferences") -} - -// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. -func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { - config, err := LoadFromFile(filename) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if config == nil { - config = clientcmdapi.NewConfig() - } - return config, nil -} - -// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit -func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { - config, err := getConfigFromFile(filename) - if err != nil { - glog.FatalDepth(1, err) - } - - return config -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go deleted file mode 100644 index 7e8f9b4e3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/doc.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package clientcmd provides one stop shopping for building a working client from a fixed config, -from a .kubeconfig file, from command line flags, or from any merged combination. - -Sample usage from merged .kubeconfig files (local directory, home directory) - - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - // if you want to change the loading rules (which files in which order), you can do so here - - configOverrides := &clientcmd.ConfigOverrides{} - // if you want to change override values or bind them to flags, there are methods to help you - - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - config, err := kubeConfig.ClientConfig() - if err != nil { - // Do something - } - client, err := unversioned.New(config) - // ... -*/ -package clientcmd diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go deleted file mode 100644 index 52c1493d0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/merged_client_builder.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "io" - "reflect" - "sync" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/client/restclient" - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" -) - -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type DeferredLoadingClientConfig struct { - loader ClientConfigLoader - overrides *ConfigOverrides - fallbackReader io.Reader - - clientConfig ClientConfig - loadingLock sync.Mutex -} - -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides} -} - -// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader -func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, fallbackReader: fallbackReader} -} - -func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { - if config.clientConfig == nil { - config.loadingLock.Lock() - defer config.loadingLock.Unlock() - - if config.clientConfig == nil { - mergedConfig, err := config.loader.Load() - if err != nil { - return nil, err - } - - var mergedClientConfig ClientConfig - if config.fallbackReader != nil { - mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) - } else { - mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) - } - - config.clientConfig = mergedClientConfig - } - } - - return config.clientConfig, nil -} - -func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { - mergedConfig, err := config.createClientConfig() - if err != nil { - return clientcmdapi.Config{}, err - } - - return mergedConfig.RawConfig() -} - -// ClientConfig implements ClientConfig -func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - - mergedConfig, err := mergedClientConfig.ClientConfig() - if err != nil { - return nil, err - } - // Are we running in a cluster and were no other configs found? If so, use the in-cluster-config. - icc := inClusterClientConfig{} - defaultConfig, err := DefaultClientConfig.ClientConfig() - if icc.Possible() && err == nil && reflect.DeepEqual(mergedConfig, defaultConfig) { - glog.V(2).Info("No kubeconfig could be created, falling back to service account.") - return icc.ClientConfig() - } - return mergedConfig, nil -} - -// Namespace implements KubeConfig -func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { - mergedKubeConfig, err := config.createClientConfig() - if err != nil { - return "", false, err - } - - return mergedKubeConfig.Namespace() -} - -// ConfigAccess implements ClientConfig -func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { - return config.loader -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go deleted file mode 100644 index 1690f515e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clientcmd/validation.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clientcmd - -import ( - "errors" - "fmt" - "os" - "reflect" - "strings" - - clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/validation" -) - -var ( - ErrNoContext = errors.New("no context chosen") - ErrEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - ErrEmptyCluster = errors.New("cluster has no server defined") -) - -type errContextNotFound struct { - ContextName string -} - -func (e *errContextNotFound) Error() string { - return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) -} - -// IsContextNotFound returns a boolean indicating whether the error is known to -// report that a context was not found -func IsContextNotFound(err error) bool { - if err == nil { - return false - } - if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { - return true - } - return strings.Contains(err.Error(), "context was not found for specified context") -} - -// IsEmptyConfig returns true if the provided error indicates the provided configuration -// is empty. -func IsEmptyConfig(err error) bool { - switch t := err.(type) { - case errConfigurationInvalid: - return len(t) == 1 && t[0] == ErrEmptyConfig - } - return err == ErrEmptyConfig -} - -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -// errConfigurationInvalid implements error and Aggregate -var _ error = errConfigurationInvalid{} -var _ utilerrors.Aggregate = errConfigurationInvalid{} - -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) -} - -// Errors implements the AggregateError interface -func (e errConfigurationInvalid) Errors() []error { - return e -} - -// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. -func IsConfigurationInvalid(err error) bool { - switch err.(type) { - case *errContextNotFound, errConfigurationInvalid: - return true - } - return IsContextNotFound(err) -} - -// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. -func Validate(config clientcmdapi.Config) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - if len(config.CurrentContext) != 0 { - if _, exists := config.Contexts[config.CurrentContext]; !exists { - validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) - } - } - - for contextName, context := range config.Contexts { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - } - - for authInfoName, authInfo := range config.AuthInfos { - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) - } - - for clusterName, clusterInfo := range config.Clusters { - validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { - validationErrors := make([]error, 0) - - if clientcmdapi.IsConfigEmpty(&config) { - return newErrConfigurationInvalid([]error{ErrEmptyConfig}) - } - - var contextName string - if len(passedContextName) != 0 { - contextName = passedContextName - } else { - contextName = config.CurrentContext - } - - if len(contextName) == 0 { - return ErrNoContext - } - - context, exists := config.Contexts[contextName] - if !exists { - validationErrors = append(validationErrors, &errContextNotFound{contextName}) - } - - if exists { - validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) - } - - return newErrConfigurationInvalid(validationErrors) -} - -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { - validationErrors := make([]error, 0) - - if reflect.DeepEqual(clientcmdapi.Cluster{}, clusterInfo) { - return []error{ErrEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { - validationErrors := make([]error, 0) - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return -func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { - validationErrors := make([]error, 0) - - if len(context.AuthInfo) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) - } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) - } - - if len(context.Cluster) == 0 { - validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) - } else if _, exists := config.Clusters[context.Cluster]; !exists { - validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) - } - - if len(context.Namespace) != 0 { - if len(validation.IsDNS1123Label(context.Namespace)) != 0 { - validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) - } - } - - return validationErrors -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go deleted file mode 100644 index 2a9d79846..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterrolebindings.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/watch" -) - -// ClusterRoleBindings has methods to work with ClusterRoleBinding resources in a namespace -type ClusterRoleBindings interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - List(opts api.ListOptions) (*rbac.ClusterRoleBindingList, error) - Get(name string) (*rbac.ClusterRoleBinding, error) - Delete(name string, options *api.DeleteOptions) error - Create(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Update(clusterRoleBinding *rbac.ClusterRoleBinding) (*rbac.ClusterRoleBinding, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// clusterRoleBindings implements ClusterRoleBindingsNamespacer interface -type clusterRoleBindings struct { - client *RbacClient -} - -// newClusterRoleBindings returns a clusterRoleBindings -func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c, - } -} - -// List takes label and field selectors, and returns the list of clusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts api.ListOptions) (result *rbac.ClusterRoleBindingList, err error) { - result = &rbac.ClusterRoleBindingList{} - err = c.client.Get().Resource("clusterrolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the clusterRoleBinding, and returns the corresponding ClusterRoleBinding object, and an error if it occurs -func (c *clusterRoleBindings) Get(name string) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Get().Resource("clusterrolebindings").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Resource("clusterrolebindings").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs. -func (c *clusterRoleBindings) Create(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Post().Resource("clusterrolebindings").Body(clusterRoleBinding).Do().Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if it occurs. -func (c *clusterRoleBindings) Update(clusterRoleBinding *rbac.ClusterRoleBinding) (result *rbac.ClusterRoleBinding, err error) { - result = &rbac.ClusterRoleBinding{} - err = c.client.Put().Resource("clusterrolebindings").Name(clusterRoleBinding.Name).Body(clusterRoleBinding).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusterrolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go deleted file mode 100644 index 0d2d375d6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/clusterroles.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/watch" -) - -// ClusterRoles has methods to work with ClusterRole resources in a namespace -type ClusterRoles interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - List(opts api.ListOptions) (*rbac.ClusterRoleList, error) - Get(name string) (*rbac.ClusterRole, error) - Delete(name string, options *api.DeleteOptions) error - Create(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error) - Update(clusterRole *rbac.ClusterRole) (*rbac.ClusterRole, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// clusterRoles implements ClusterRolesNamespacer interface -type clusterRoles struct { - client *RbacClient -} - -// newClusterRoles returns a clusterRoles -func newClusterRoles(c *RbacClient) *clusterRoles { - return &clusterRoles{ - client: c, - } -} - -// List takes label and field selectors, and returns the list of clusterRoles that match those selectors. -func (c *clusterRoles) List(opts api.ListOptions) (result *rbac.ClusterRoleList, err error) { - result = &rbac.ClusterRoleList{} - err = c.client.Get().Resource("clusterroles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the clusterRole, and returns the corresponding ClusterRole object, and an error if it occurs -func (c *clusterRoles) Get(name string) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Get().Resource("clusterroles").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Resource("clusterroles").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if it occurs. -func (c *clusterRoles) Create(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Post().Resource("clusterroles").Body(clusterRole).Do().Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if it occurs. -func (c *clusterRoles) Update(clusterRole *rbac.ClusterRole) (result *rbac.ClusterRole, err error) { - result = &rbac.ClusterRole{} - err = c.client.Put().Resource("clusterroles").Name(clusterRole.Name).Body(clusterRole).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("clusterroles"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go deleted file mode 100644 index 0717cdec1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/componentstatuses.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" -) - -type ComponentStatusesInterface interface { - ComponentStatuses() ComponentStatusInterface -} - -// ComponentStatusInterface contains methods to retrieve ComponentStatus -type ComponentStatusInterface interface { - List(opts api.ListOptions) (*api.ComponentStatusList, error) - Get(name string) (*api.ComponentStatus, error) - - // TODO: It'd be nice to have watch support at some point - //Watch(opts api.ListOptions) (watch.Interface, error) -} - -// componentStatuses implements ComponentStatusesInterface -type componentStatuses struct { - client *Client -} - -func newComponentStatuses(c *Client) *componentStatuses { - return &componentStatuses{c} -} - -func (c *componentStatuses) List(opts api.ListOptions) (result *api.ComponentStatusList, err error) { - result = &api.ComponentStatusList{} - err = c.client.Get(). - Resource("componentStatuses"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) { - result = &api.ComponentStatus{} - err = c.client.Get().Resource("componentStatuses").Name(name).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go deleted file mode 100644 index f68f98fe1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/conditions.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" -) - -// ControllerHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a controller's ReplicaSelector equals the Replicas count. -func ControllerHasDesiredReplicas(c Interface, controller *api.ReplicationController) wait.ConditionFunc { - - // If we're given a controller where the status lags the spec, it either means that the controller is stale, - // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. - desiredGeneration := controller.Generation - - return func() (bool, error) { - ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, - // or, after this check has passed, a modification causes the rc manager to create more pods. - // This will not be an issue once we've implemented graceful delete for rcs, but till then - // concurrent stop operations on the same rc might have unintended side effects. - return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == ctrl.Spec.Replicas, nil - } -} - -// ReplicaSetHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a ReplicaSet's ReplicaSelector equals the Replicas count. -func ReplicaSetHasDesiredReplicas(c ExtensionsInterface, replicaSet *extensions.ReplicaSet) wait.ConditionFunc { - - // If we're given a ReplicaSet where the status lags the spec, it either means that the - // ReplicaSet is stale, or that the ReplicaSet manager hasn't noticed the update yet. - // Polling status.Replicas is not safe in the latter case. - desiredGeneration := replicaSet.Generation - - return func() (bool, error) { - rs, err := c.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to - // pass, or, after this check has passed, a modification causes the ReplicaSet manager to - // create more pods. This will not be an issue once we've implemented graceful delete for - // ReplicaSets, but till then concurrent stop operations on the same ReplicaSet might have - // unintended side effects. - return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == rs.Spec.Replicas, nil - } -} - -// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count -// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count. -func JobHasDesiredParallelism(c BatchInterface, job *batch.Job) wait.ConditionFunc { - - return func() (bool, error) { - job, err := c.Jobs(job.Namespace).Get(job.Name) - if err != nil { - return false, err - } - - // desired parallelism can be either the exact number, in which case return immediately - if job.Status.Active == *job.Spec.Parallelism { - return true, nil - } - if job.Spec.Completions == nil { - // A job without specified completions needs to wait for Active to reach Parallelism. - return false, nil - } else { - // otherwise count successful - progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded - return progress == 0, nil - } - } -} - -// DeploymentHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a deployment equals its updated replicas count. -// (non-terminated pods that have the desired template spec). -func DeploymentHasDesiredReplicas(c ExtensionsInterface, deployment *extensions.Deployment) wait.ConditionFunc { - // If we're given a deployment where the status lags the spec, it either - // means that the deployment is stale, or that the deployment manager hasn't - // noticed the update yet. Polling status.Replicas is not safe in the latter - // case. - desiredGeneration := deployment.Generation - - return func() (bool, error) { - deployment, err := c.Deployments(deployment.Namespace).Get(deployment.Name) - if err != nil { - return false, err - } - return deployment.Status.ObservedGeneration >= desiredGeneration && - deployment.Status.UpdatedReplicas == deployment.Spec.Replicas, nil - } -} - -// ErrPodCompleted is returned by PodRunning or PodContainerRunning to indicate that -// the pod has already reached completed state. -var ErrPodCompleted = fmt.Errorf("pod ran to completion") - -// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, -// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. -func PodRunning(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning: - return true, nil - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - } - } - return false, nil -} - -// PodCompleted returns true if the pod has run to completion, false if the pod has not yet -// reached running state, or an error in any other case. -func PodCompleted(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return true, nil - } - } - return false, nil -} - -// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not -// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or -// an error in any other case. -func PodRunningAndReady(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - case api.PodRunning: - return api.IsPodReady(t), nil - } - } - return false, nil -} - -// PodNotPending returns true if the pod has left the pending state, false if it has not, -// or an error in any other case (such as if the pod was deleted). -func PodNotPending(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodPending: - return false, nil - default: - return true, nil - } - } - return false, nil -} - -// PodContainerRunning returns false until the named container has ContainerStatus running (at least once), -// and will return an error if the pod is deleted, runs to completion, or the container pod is not available. -func PodContainerRunning(containerName string) watch.ConditionFunc { - return func(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning, api.PodPending: - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - default: - return false, nil - } - for _, s := range t.Status.ContainerStatuses { - if s.Name != containerName { - continue - } - return s.State.Running != nil, nil - } - return false, nil - } - return false, nil - } -} - -// ServiceAccountHasSecrets returns true if the service account has at least one secret, -// false if it does not, or an error. -func ServiceAccountHasSecrets(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(unversioned.GroupResource{Resource: "serviceaccounts"}, "") - } - switch t := event.Object.(type) { - case *api.ServiceAccount: - return len(t.Secrets) > 0, nil - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go deleted file mode 100644 index 60fffa755..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/configmap.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - ConfigMapResourceName string = "configmaps" -) - -type ConfigMapsNamespacer interface { - ConfigMaps(namespace string) ConfigMapsInterface -} - -type ConfigMapsInterface interface { - Get(string) (*api.ConfigMap, error) - List(opts api.ListOptions) (*api.ConfigMapList, error) - Create(*api.ConfigMap) (*api.ConfigMap, error) - Delete(string) error - Update(*api.ConfigMap) (*api.ConfigMap, error) - Watch(api.ListOptions) (watch.Interface, error) -} - -type ConfigMaps struct { - client *Client - namespace string -} - -// ConfigMaps should implement ConfigMapsInterface -var _ ConfigMapsInterface = &ConfigMaps{} - -func newConfigMaps(c *Client, ns string) *ConfigMaps { - return &ConfigMaps{ - client: c, - namespace: ns, - } -} - -func (c *ConfigMaps) Get(name string) (*api.ConfigMap, error) { - result := &api.ConfigMap{} - err := c.client.Get(). - Namespace(c.namespace). - Resource(ConfigMapResourceName). - Name(name). - Do(). - Into(result) - - return result, err -} - -func (c *ConfigMaps) List(opts api.ListOptions) (*api.ConfigMapList, error) { - result := &api.ConfigMapList{} - err := c.client.Get(). - Namespace(c.namespace). - Resource(ConfigMapResourceName). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -func (c *ConfigMaps) Create(cfg *api.ConfigMap) (*api.ConfigMap, error) { - result := &api.ConfigMap{} - err := c.client.Post(). - Namespace(c.namespace). - Resource(ConfigMapResourceName). - Body(cfg). - Do(). - Into(result) - - return result, err -} - -func (c *ConfigMaps) Delete(name string) error { - return c.client.Delete(). - Namespace(c.namespace). - Resource(ConfigMapResourceName). - Name(name). - Do(). - Error() -} - -func (c *ConfigMaps) Update(cfg *api.ConfigMap) (*api.ConfigMap, error) { - result := &api.ConfigMap{} - - err := c.client.Put(). - Namespace(c.namespace). - Resource(ConfigMapResourceName). - Name(cfg.Name). - Body(cfg). - Do(). - Into(result) - - return result, err -} - -func (c *ConfigMaps) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.namespace). - Resource(ConfigMapResourceName). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go deleted file mode 100644 index 306386852..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/containerinfo.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "strconv" - - cadvisorapi "github.com/google/cadvisor/info/v1" -) - -type ContainerInfoGetter interface { - // GetContainerInfo returns information about a container. - GetContainerInfo(host, podID, containerID string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) - // GetRootInfo returns information about the root container on a machine. - GetRootInfo(host string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) - // GetMachineInfo returns the machine's information like number of cores, memory capacity. - GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error) -} - -type HTTPContainerInfoGetter struct { - Client *http.Client - Port int -} - -func (self *HTTPContainerInfoGetter) GetMachineInfo(host string) (*cadvisorapi.MachineInfo, error) { - request, err := http.NewRequest( - "GET", - fmt.Sprintf("http://%v/spec", - net.JoinHostPort(host, strconv.Itoa(self.Port)), - ), - nil, - ) - if err != nil { - return nil, err - } - - response, err := self.Client.Do(request) - if err != nil { - return nil, err - } - defer response.Body.Close() - if response.StatusCode != http.StatusOK { - return nil, fmt.Errorf("trying to get machine spec from %v; received status %v", - host, response.Status) - } - var minfo cadvisorapi.MachineInfo - err = json.NewDecoder(response.Body).Decode(&minfo) - if err != nil { - return nil, err - } - return &minfo, nil -} - -func (self *HTTPContainerInfoGetter) getContainerInfo(host, path string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { - var body io.Reader - if req != nil { - content, err := json.Marshal(req) - if err != nil { - return nil, err - } - body = bytes.NewBuffer(content) - } - - request, err := http.NewRequest( - "GET", - fmt.Sprintf("http://%v/stats/%v", - net.JoinHostPort(host, strconv.Itoa(self.Port)), - path, - ), - body, - ) - if err != nil { - return nil, err - } - - response, err := self.Client.Do(request) - if err != nil { - return nil, err - } - defer response.Body.Close() - if response.StatusCode != http.StatusOK { - return nil, fmt.Errorf("trying to get info for %v from %v; received status %v", - path, host, response.Status) - } - var cinfo cadvisorapi.ContainerInfo - err = json.NewDecoder(response.Body).Decode(&cinfo) - if err != nil { - return nil, err - } - return &cinfo, nil -} - -func (self *HTTPContainerInfoGetter) GetContainerInfo(host, podID, containerID string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { - return self.getContainerInfo( - host, - fmt.Sprintf("%v/%v", podID, containerID), - req, - ) -} - -func (self *HTTPContainerInfoGetter) GetRootInfo(host string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { - return self.getContainerInfo(host, "", req) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go deleted file mode 100644 index fa12591a6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/daemon_sets.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// DaemonsSetsNamespacer has methods to work with DaemonSet resources in a namespace -type DaemonSetsNamespacer interface { - DaemonSets(namespace string) DaemonSetInterface -} - -type DaemonSetInterface interface { - List(opts api.ListOptions) (*extensions.DaemonSetList, error) - Get(name string) (*extensions.DaemonSet, error) - Create(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error) - Update(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error) - UpdateStatus(ctrl *extensions.DaemonSet) (*extensions.DaemonSet, error) - Delete(name string) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// daemonSets implements DaemonsSetsNamespacer interface -type daemonSets struct { - r *ExtensionsClient - ns string -} - -func newDaemonSets(c *ExtensionsClient, namespace string) *daemonSets { - return &daemonSets{c, namespace} -} - -// Ensure statically that daemonSets implements DaemonSetsInterface. -var _ DaemonSetInterface = &daemonSets{} - -func (c *daemonSets) List(opts api.ListOptions) (result *extensions.DaemonSetList, err error) { - result = &extensions.DaemonSetList{} - err = c.r.Get().Namespace(c.ns).Resource("daemonsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular daemon set. -func (c *daemonSets) Get(name string) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.r.Get().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Into(result) - return -} - -// Create creates a new daemon set. -func (c *daemonSets) Create(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.r.Post().Namespace(c.ns).Resource("daemonsets").Body(daemon).Do().Into(result) - return -} - -// Update updates an existing daemon set. -func (c *daemonSets) Update(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).Body(daemon).Do().Into(result) - return -} - -// UpdateStatus updates an existing daemon set status -func (c *daemonSets) UpdateStatus(daemon *extensions.DaemonSet) (result *extensions.DaemonSet, err error) { - result = &extensions.DaemonSet{} - err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).SubResource("status").Body(daemon).Do().Into(result) - return -} - -// Delete deletes an existing daemon set. -func (c *daemonSets) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested daemon sets. -func (c *daemonSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go deleted file mode 100644 index cafd4cfd1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/deployment.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// DeploymentsNamespacer has methods to work with Deployment resources in a namespace -type DeploymentsNamespacer interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - List(opts api.ListOptions) (*extensions.DeploymentList, error) - Get(name string) (*extensions.Deployment, error) - Delete(name string, options *api.DeleteOptions) error - Create(*extensions.Deployment) (*extensions.Deployment, error) - Update(*extensions.Deployment) (*extensions.Deployment, error) - UpdateStatus(*extensions.Deployment) (*extensions.Deployment, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Rollback(*extensions.DeploymentRollback) error -} - -// deployments implements DeploymentInterface -type deployments struct { - client *ExtensionsClient - ns string -} - -// Ensure statically that deployments implements DeploymentInterface. -var _ DeploymentInterface = &deployments{} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsClient, namespace string) *deployments { - return &deployments{ - client: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts api.ListOptions) (result *extensions.DeploymentList, err error) { - result = &extensions.DeploymentList{} - err = c.client.Get().Namespace(c.ns).Resource("deployments").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Get().Namespace(c.ns).Resource("deployments").Name(name).Do().Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Namespace(c.ns).Resource("deployments").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Post().Namespace(c.ns).Resource("deployments").Body(deployment).Do().Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).Body(deployment).Do().Into(result) - return -} - -func (c *deployments) UpdateStatus(deployment *extensions.Deployment) (result *extensions.Deployment, err error) { - result = &extensions.Deployment{} - err = c.client.Put().Namespace(c.ns).Resource("deployments").Name(deployment.Name).SubResource("status").Body(deployment).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *extensions.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go deleted file mode 100644 index 252d80975..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/doc.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package unversioned contains the implementation of the client side communication with the -Kubernetes master. The Client class provides methods for reading, creating, updating, -and deleting pods, replication controllers, daemons, services, and nodes. - -Most consumers should use the Config object to create a Client: - - import ( - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/api" - ) - - [...] - - config := &client.Config{ - Host: "http://localhost:8080", - Username: "test", - Password: "password", - } - client, err := client.New(config) - if err != nil { - // handle error - } - pods, err := client.Pods(api.NamespaceDefault).List(api.ListOptions{}) - if err != nil { - // handle error - } - -More advanced consumers may wish to provide their own transport via a http.RoundTripper: - - config := &client.Config{ - Host: "https://localhost:8080", - Transport: oauthclient.Transport(), - } - client, err := client.New(config) - -The RESTClient type implements the Kubernetes API conventions (see `docs/devel/api-conventions.md`) -for a given API path and is intended for use by consumers implementing their own Kubernetes -compatible APIs. -*/ -package unversioned diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go deleted file mode 100644 index c58c88a28..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/endpoints.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -// EndpointsNamespacer has methods to work with Endpoints resources in a namespace -type EndpointsNamespacer interface { - Endpoints(namespace string) EndpointsInterface -} - -// EndpointsInterface has methods to work with Endpoints resources -type EndpointsInterface interface { - Create(endpoints *api.Endpoints) (*api.Endpoints, error) - List(opts api.ListOptions) (*api.EndpointsList, error) - Get(name string) (*api.Endpoints, error) - Delete(name string) error - Update(endpoints *api.Endpoints) (*api.Endpoints, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// endpoints implements EndpointsInterface -type endpoints struct { - r *Client - ns string -} - -// newEndpoints returns a endpoints -func newEndpoints(c *Client, namespace string) *endpoints { - return &endpoints{c, namespace} -} - -// Create creates a new endpoint. -func (c *endpoints) Create(endpoints *api.Endpoints) (*api.Endpoints, error) { - result := &api.Endpoints{} - err := c.r.Post().Namespace(c.ns).Resource("endpoints").Body(endpoints).Do().Into(result) - return result, err -} - -// List takes a selector, and returns the list of endpoints that match that selector -func (c *endpoints) List(opts api.ListOptions) (result *api.EndpointsList, err error) { - result = &api.EndpointsList{} - err = c.r.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Get returns information about the endpoints for a particular service. -func (c *endpoints) Get(name string) (result *api.Endpoints, err error) { - result = &api.Endpoints{} - err = c.r.Get().Namespace(c.ns).Resource("endpoints").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the endpoint, and returns an error if one occurs -func (c *endpoints) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("endpoints").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested endpoints for a service. -func (c *endpoints) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -func (c *endpoints) Update(endpoints *api.Endpoints) (*api.Endpoints, error) { - result := &api.Endpoints{} - err := c.r.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - Body(endpoints). - Do(). - Into(result) - return result, err -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go deleted file mode 100644 index b882ccdc4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/events.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// EventNamespacer can return an EventInterface for the given namespace. -type EventNamespacer interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources -type EventInterface interface { - Create(event *api.Event) (*api.Event, error) - Update(event *api.Event) (*api.Event, error) - Patch(event *api.Event, data []byte) (*api.Event, error) - List(opts api.ListOptions) (*api.EventList, error) - Get(name string) (*api.Event, error) - Watch(opts api.ListOptions) (watch.Interface, error) - // Search finds events about the specified object - Search(objOrRef runtime.Object) (*api.EventList, error) - Delete(name string) error - // DeleteCollection deletes a collection of events. - DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error - // Returns the appropriate field selector based on the API version being used to communicate with the server. - // The returned field selector can be used with List and Watch to filter desired events. - GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector -} - -// events implements Events interface -type events struct { - client *Client - namespace string -} - -// newEvents returns a new events object. -func newEvents(c *Client, ns string) *events { - return &events{ - client: c, - namespace: ns, - } -} - -// Create makes a new event. Returns the copy of the event the server returns, -// or an error. The namespace to create the event within is deduced from the -// event; it must either match this event client's namespace, or this event -// client must have been created with the "" namespace. -func (e *events) Create(event *api.Event) (*api.Event, error) { - if e.namespace != "" && event.Namespace != e.namespace { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.namespace) - } - result := &api.Event{} - err := e.client.Post(). - Namespace(event.Namespace). - Resource("events"). - Body(event). - Do(). - Into(result) - return result, err -} - -// Update modifies an existing event. It returns the copy of the event that the server returns, -// or an error. The namespace and key to update the event within is deduced from the event. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. Update also requires the ResourceVersion to be set in the event -// object. -func (e *events) Update(event *api.Event) (*api.Event, error) { - result := &api.Event{} - err := e.client.Put(). - Namespace(event.Namespace). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return result, err -} - -// Patch modifies an existing event. It returns the copy of the event that the server returns, or an -// error. The namespace and name of the target event is deduced from the incompleteEvent. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. -func (e *events) Patch(incompleteEvent *api.Event, data []byte) (*api.Event, error) { - result := &api.Event{} - err := e.client.Patch(api.StrategicMergePatchType). - Namespace(incompleteEvent.Namespace). - Resource("events"). - Name(incompleteEvent.Name). - Body(data). - Do(). - Into(result) - return result, err -} - -// List returns a list of events matching the selectors. -func (e *events) List(opts api.ListOptions) (*api.EventList, error) { - result := &api.EventList{} - err := e.client.Get(). - Namespace(e.namespace). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return result, err -} - -// Get returns the given event, or an error. -func (e *events) Get(name string) (*api.Event, error) { - result := &api.Event{} - err := e.client.Get(). - Namespace(e.namespace). - Resource("events"). - Name(name). - Do(). - Into(result) - return result, err -} - -// Watch starts watching for events matching the given selectors. -func (e *events) Watch(opts api.ListOptions) (watch.Interface, error) { - return e.client.Get(). - Prefix("watch"). - Namespace(e.namespace). - Resource("events"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Search finds events about the specified object. The namespace of the -// object must match this event's client namespace unless the event client -// was made with the "" namespace. -func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { - ref, err := api.GetReference(objOrRef) - if err != nil { - return nil, err - } - if e.namespace != "" && ref.Namespace != e.namespace { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.namespace) - } - stringRefKind := string(ref.Kind) - var refKind *string - if stringRefKind != "" { - refKind = &stringRefKind - } - stringRefUID := string(ref.UID) - var refUID *string - if stringRefUID != "" { - refUID = &stringRefUID - } - fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(api.ListOptions{FieldSelector: fieldSelector}) -} - -// Delete deletes an existing event. -func (e *events) Delete(name string) error { - return e.client.Delete(). - Namespace(e.namespace). - Resource("events"). - Name(name). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (e *events) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - return e.client.Delete(). - Namespace(e.namespace). - Resource("events"). - VersionedParams(&listOptions, api.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Returns the appropriate field selector based on the API version being used to communicate with the server. -// The returned field selector can be used with List and Watch to filter desired events. -func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() - field := fields.Set{} - if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName - } - if involvedObjectNamespace != nil { - field["involvedObject.namespace"] = *involvedObjectNamespace - } - if involvedObjectKind != nil { - field["involvedObject.kind"] = *involvedObjectKind - } - if involvedObjectUID != nil { - field["involvedObject.uid"] = *involvedObjectUID - } - return field.AsSelector() -} - -// Returns the appropriate field label to use for name of the involved object as per the given API version. -func GetInvolvedObjectNameFieldLabel(version string) string { - return "involvedObject.name" -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go deleted file mode 100644 index 3c9114d9a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/extensions.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/restclient" -) - -// Interface holds the experimental methods for clients of Kubernetes -// to allow mock testing. -// Features of Extensions group are not supported and may be changed or removed in -// incompatible ways at any time. -type ExtensionsInterface interface { - ScaleNamespacer - DaemonSetsNamespacer - DeploymentsNamespacer - JobsNamespacer - IngressNamespacer - NetworkPolicyNamespacer - ThirdPartyResourceNamespacer - ReplicaSetsNamespacer - PodSecurityPoliciesInterface -} - -// ExtensionsClient is used to interact with experimental Kubernetes features. -// Features of Extensions group are not supported and may be changed or removed in -// incompatible ways at any time. -type ExtensionsClient struct { - *restclient.RESTClient -} - -func (c *ExtensionsClient) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicy(c) -} - -func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -func (c *ExtensionsClient) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsClient) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsClient) Jobs(namespace string) JobInterface { - return newJobs(c, namespace) -} - -func (c *ExtensionsClient) Ingress(namespace string) IngressInterface { - return newIngress(c, namespace) -} - -func (c *ExtensionsClient) NetworkPolicies(namespace string) NetworkPolicyInterface { - return newNetworkPolicies(c, namespace) -} - -func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - -func (c *ExtensionsClient) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -// NewExtensions creates a new ExtensionsClient for the given config. This client -// provides access to experimental Kubernetes features. -// Features of Extensions group are not supported and may be changed or removed in -// incompatible ways at any time. -func NewExtensions(c *restclient.Config) (*ExtensionsClient, error) { - config := *c - if err := setExtensionsDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsClient{client}, nil -} - -// NewExtensionsOrDie creates a new ExtensionsClient for the given config and -// panics if there is an error in the config. -// Features of Extensions group are not supported and may be changed or removed in -// incompatible ways at any time. -func NewExtensionsOrDie(c *restclient.Config) *ExtensionsClient { - client, err := NewExtensions(c) - if err != nil { - panic(err) - } - return client -} - -func setExtensionsDefaults(config *restclient.Config) error { - // if experimental group is not registered, return an error - g, err := registered.Group(extensions.GroupName) - if err != nil { - return err - } - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - config.NegotiatedSerializer = api.Codecs - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go deleted file mode 100644 index 9fc540cfb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/flags.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "time" -) - -// FlagSet abstracts the flag interface for compatibility with both Golang "flag" -// and cobra pflags (Posix style). -type FlagSet interface { - StringVar(p *string, name, value, usage string) - BoolVar(p *bool, name string, value bool, usage string) - UintVar(p *uint, name string, value uint, usage string) - DurationVar(p *time.Duration, name string, value time.Duration, usage string) - IntVar(p *int, name string, value int, usage string) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go deleted file mode 100644 index 020bb01c2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/helper.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/version" - // Import solely to initialize client auth plugins. - _ "k8s.io/kubernetes/plugin/pkg/client/auth" -) - -const ( - legacyAPIPath = "/api" - defaultAPIPath = "/apis" -) - -// New creates a Kubernetes client for the given config. This client works with pods, -// replication controllers, daemons, and services. It allows operations such as list, get, update -// and delete on these objects. An error is returned if the provided configuration -// is not valid. -func New(c *restclient.Config) (*Client, error) { - config := *c - if err := SetKubernetesDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - - discoveryConfig := *c - discoveryClient, err := discovery.NewDiscoveryClientForConfig(&discoveryConfig) - if err != nil { - return nil, err - } - - var autoscalingClient *AutoscalingClient - if registered.IsRegistered(autoscaling.GroupName) { - autoscalingConfig := *c - autoscalingClient, err = NewAutoscaling(&autoscalingConfig) - if err != nil { - return nil, err - } - } - - var batchClient *BatchClient - if registered.IsRegistered(batch.GroupName) { - batchConfig := *c - batchClient, err = NewBatch(&batchConfig) - if err != nil { - return nil, err - } - } - - var extensionsClient *ExtensionsClient - if registered.IsRegistered(extensions.GroupName) { - extensionsConfig := *c - extensionsClient, err = NewExtensions(&extensionsConfig) - if err != nil { - return nil, err - } - } - var policyClient *PolicyClient - if registered.IsRegistered(policy.GroupName) { - policyConfig := *c - policyClient, err = NewPolicy(&policyConfig) - if err != nil { - return nil, err - } - } - - var appsClient *AppsClient - if registered.IsRegistered(apps.GroupName) { - appsConfig := *c - appsClient, err = NewApps(&appsConfig) - if err != nil { - return nil, err - } - } - - var rbacClient *RbacClient - if registered.IsRegistered(rbac.GroupName) { - rbacConfig := *c - rbacClient, err = NewRbac(&rbacConfig) - if err != nil { - return nil, err - } - } - - return &Client{RESTClient: client, AutoscalingClient: autoscalingClient, BatchClient: batchClient, ExtensionsClient: extensionsClient, DiscoveryClient: discoveryClient, AppsClient: appsClient, PolicyClient: policyClient, RbacClient: rbacClient}, nil -} - -// MatchesServerVersion queries the server to compares the build version -// (git hash) of the client with the server's build version. It returns an error -// if it failed to contact the server or if the versions are not an exact match. -func MatchesServerVersion(client *Client, c *restclient.Config) error { - var err error - if client == nil { - client, err = New(c) - if err != nil { - return err - } - } - cVer := version.Get() - sVer, err := client.Discovery().ServerVersion() - if err != nil { - return fmt.Errorf("couldn't read version from server: %v\n", err) - } - // GitVersion includes GitCommit and GitTreeState, but best to be safe? - if cVer.GitVersion != sVer.GitVersion || cVer.GitCommit != sVer.GitCommit || cVer.GitTreeState != cVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, cVer) - } - - return nil -} - -// NegotiateVersion queries the server's supported api versions to find -// a version that both client and server support. -// - If no version is provided, try registered client versions in order of -// preference. -// - If version is provided, but not default config (explicitly requested via -// commandline flag), and is unsupported by the server, print a warning to -// stderr and try client's registered versions in order of preference. -// - If version is config default, and the server does not support it, -// return an error. -func NegotiateVersion(client *Client, c *restclient.Config, requestedGV *unversioned.GroupVersion, clientRegisteredGVs []unversioned.GroupVersion) (*unversioned.GroupVersion, error) { - var err error - if client == nil { - client, err = New(c) - if err != nil { - return nil, err - } - } - clientVersions := sets.String{} - for _, gv := range clientRegisteredGVs { - clientVersions.Insert(gv.String()) - } - groups, err := client.ServerGroups() - if err != nil { - // This is almost always a connection error, and higher level code should treat this as a generic error, - // not a negotiation specific error. - return nil, err - } - versions := unversioned.ExtractGroupVersions(groups) - serverVersions := sets.String{} - for _, v := range versions { - serverVersions.Insert(v) - } - - // If no version requested, use config version (may also be empty). - // make a copy of the original so we don't risk mutating input here or in the returned value - var preferredGV *unversioned.GroupVersion - switch { - case requestedGV != nil: - t := *requestedGV - preferredGV = &t - case c.GroupVersion != nil: - t := *c.GroupVersion - preferredGV = &t - } - - // If version explicitly requested verify that both client and server support it. - // If server does not support warn, but try to negotiate a lower version. - if preferredGV != nil { - if !clientVersions.Has(preferredGV.String()) { - return nil, fmt.Errorf("client does not support API version %q; client supported API versions: %v", preferredGV, clientVersions) - - } - if serverVersions.Has(preferredGV.String()) { - return preferredGV, nil - } - // If we are using an explicit config version the server does not support, fail. - if (c.GroupVersion != nil) && (*preferredGV == *c.GroupVersion) { - return nil, fmt.Errorf("server does not support API version %q", preferredGV) - } - } - - for _, clientGV := range clientRegisteredGVs { - if serverVersions.Has(clientGV.String()) { - // Version was not explicitly requested in command config (--api-version). - // Ok to fall back to a supported version with a warning. - // TODO: caesarxuchao: enable the warning message when we have - // proper fix. Please refer to issue #14895. - // if len(version) != 0 { - // glog.Warningf("Server does not support API version '%s'. Falling back to '%s'.", version, clientVersion) - // } - t := clientGV - return &t, nil - } - } - return nil, fmt.Errorf("failed to negotiate an api version; server supports: %v, client supports: %v", - serverVersions, clientVersions) -} - -// NewOrDie creates a Kubernetes client and panics if the provided API version is not recognized. -func NewOrDie(c *restclient.Config) *Client { - client, err := New(c) - if err != nil { - panic(err) - } - return client -} - -// NewInCluster is a shortcut for calling InClusterConfig() and then New(). -func NewInCluster() (*Client, error) { - cc, err := restclient.InClusterConfig() - if err != nil { - return nil, err - } - return New(cc) -} - -// SetKubernetesDefaults sets default values on the provided client config for accessing the -// Kubernetes API or returns an error if any of the defaults are impossible or invalid. -// TODO: this method needs to be split into one that sets defaults per group, expected to be fix in PR "Refactoring clientcache.go and helper.go #14592" -func SetKubernetesDefaults(config *restclient.Config) error { - if config.APIPath == "" { - config.APIPath = legacyAPIPath - } - g, err := registered.Group(api.GroupName) - if err != nil { - return err - } - // TODO: Unconditionally set the config.Version, until we fix the config. - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - if config.NegotiatedSerializer == nil { - config.NegotiatedSerializer = api.Codecs - } - if config.Codec == nil { - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - } - - return restclient.SetKubernetesDefaults(config) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go deleted file mode 100644 index 8cdba3a26..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/horizontalpodautoscaler.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/watch" -) - -// HorizontalPodAutoscalersNamespacer has methods to work with HorizontalPodAutoscaler resources in a namespace -type HorizontalPodAutoscalersNamespacer interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - List(opts api.ListOptions) (*autoscaling.HorizontalPodAutoscalerList, error) - Get(name string) (*autoscaling.HorizontalPodAutoscaler, error) - Delete(name string, options *api.DeleteOptions) error - Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (*autoscaling.HorizontalPodAutoscaler, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface using AutoscalingClient internally -type horizontalPodAutoscalers struct { - client *AutoscalingClient - ns string -} - -// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingClient, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of horizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts api.ListOptions) (result *autoscaling.HorizontalPodAutoscalerList, err error) { - result = &autoscaling.HorizontalPodAutoscalerList{} - err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the horizontalPodAutoscaler, and returns the corresponding HorizontalPodAutoscaler object, and an error if it occurs -func (c *horizontalPodAutoscalers) Get(name string) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Get().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Post().Namespace(c.ns).Resource("horizontalPodAutoscalers").Body(horizontalPodAutoscaler).Do().Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).Body(horizontalPodAutoscaler).Do().Into(result) - return -} - -// UpdateStatus takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if it occurs. -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *autoscaling.HorizontalPodAutoscaler) (result *autoscaling.HorizontalPodAutoscaler, err error) { - result = &autoscaling.HorizontalPodAutoscaler{} - err = c.client.Put().Namespace(c.ns).Resource("horizontalPodAutoscalers").Name(horizontalPodAutoscaler.Name).SubResource("status").Body(horizontalPodAutoscaler).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("horizontalPodAutoscalers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go deleted file mode 100644 index bbe61472d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/import_known_versions.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -// These imports are the API groups the client will support. -import ( - "fmt" - - _ "k8s.io/kubernetes/pkg/api/install" - "k8s.io/kubernetes/pkg/apimachinery/registered" - _ "k8s.io/kubernetes/pkg/apis/apps/install" - _ "k8s.io/kubernetes/pkg/apis/authentication.k8s.io/install" - _ "k8s.io/kubernetes/pkg/apis/authorization/install" - _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" - _ "k8s.io/kubernetes/pkg/apis/batch/install" - _ "k8s.io/kubernetes/pkg/apis/componentconfig/install" - _ "k8s.io/kubernetes/pkg/apis/extensions/install" - _ "k8s.io/kubernetes/pkg/apis/policy/install" - _ "k8s.io/kubernetes/pkg/apis/rbac/install" -) - -func init() { - if missingVersions := registered.ValidateEnvRequestedVersions(); len(missingVersions) != 0 { - panic(fmt.Sprintf("KUBE_API_VERSIONS contains versions that are not installed: %q.", missingVersions)) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go deleted file mode 100644 index 4865b2086..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/ingress.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// IngressNamespacer has methods to work with Ingress resources in a namespace -type IngressNamespacer interface { - Ingress(namespace string) IngressInterface -} - -// IngressInterface exposes methods to work on Ingress resources. -type IngressInterface interface { - List(opts api.ListOptions) (*extensions.IngressList, error) - Get(name string) (*extensions.Ingress, error) - Create(ingress *extensions.Ingress) (*extensions.Ingress, error) - Update(ingress *extensions.Ingress) (*extensions.Ingress, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) - UpdateStatus(ingress *extensions.Ingress) (*extensions.Ingress, error) -} - -// ingress implements IngressNamespacer interface -type ingress struct { - r *ExtensionsClient - ns string -} - -// newIngress returns a ingress -func newIngress(c *ExtensionsClient, namespace string) *ingress { - return &ingress{c, namespace} -} - -// List returns a list of ingress that match the label and field selectors. -func (c *ingress) List(opts api.ListOptions) (result *extensions.IngressList, err error) { - result = &extensions.IngressList{} - err = c.r.Get().Namespace(c.ns).Resource("ingresses").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular ingress. -func (c *ingress) Get(name string) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.r.Get().Namespace(c.ns).Resource("ingresses").Name(name).Do().Into(result) - return -} - -// Create creates a new ingress. -func (c *ingress) Create(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.r.Post().Namespace(c.ns).Resource("ingresses").Body(ingress).Do().Into(result) - return -} - -// Update updates an existing ingress. -func (c *ingress) Update(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.r.Put().Namespace(c.ns).Resource("ingresses").Name(ingress.Name).Body(ingress).Do().Into(result) - return -} - -// Delete deletes a ingress, returns error if one occurs. -func (c *ingress) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("ingresses").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested ingress. -func (c *ingress) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// UpdateStatus takes the name of the ingress and the new status. Returns the server's representation of the ingress, and an error, if it occurs. -func (c *ingress) UpdateStatus(ingress *extensions.Ingress) (result *extensions.Ingress, err error) { - result = &extensions.Ingress{} - err = c.r.Put().Namespace(c.ns).Resource("ingresses").Name(ingress.Name).SubResource("status").Body(ingress).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go deleted file mode 100644 index 94b819079..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/jobs.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/watch" -) - -// JobsNamespacer has methods to work with Job resources in a namespace -type JobsNamespacer interface { - Jobs(namespace string) JobInterface -} - -// JobInterface exposes methods to work on Job resources. -type JobInterface interface { - List(opts api.ListOptions) (*batch.JobList, error) - Get(name string) (*batch.Job, error) - Create(job *batch.Job) (*batch.Job, error) - Update(job *batch.Job) (*batch.Job, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) - UpdateStatus(job *batch.Job) (*batch.Job, error) -} - -// jobs implements JobsNamespacer interface -type jobs struct { - r *ExtensionsClient - ns string -} - -// newJobs returns a jobs -func newJobs(c *ExtensionsClient, namespace string) *jobs { - return &jobs{c, namespace} -} - -// Ensure statically that jobs implements JobInterface. -var _ JobInterface = &jobs{} - -// List returns a list of jobs that match the label and field selectors. -func (c *jobs) List(opts api.ListOptions) (result *batch.JobList, err error) { - result = &batch.JobList{} - err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular job. -func (c *jobs) Get(name string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result) - return -} - -// Create creates a new job. -func (c *jobs) Create(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result) - return -} - -// Update updates an existing job. -func (c *jobs) Update(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result) - return -} - -// Delete deletes a job, returns error if one occurs. -func (c *jobs) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("jobs").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs. -func (c *jobs) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) - return -} - -// jobsV1 implements JobsNamespacer interface using BatchClient internally -type jobsV1 struct { - r *BatchClient - ns string -} - -// newJobsV1 returns a jobsV1 -func newJobsV1(c *BatchClient, namespace string) *jobsV1 { - return &jobsV1{c, namespace} -} - -// Ensure statically that jobsV1 implements JobInterface. -var _ JobInterface = &jobsV1{} - -// List returns a list of jobs that match the label and field selectors. -func (c *jobsV1) List(opts api.ListOptions) (result *batch.JobList, err error) { - result = &batch.JobList{} - err = c.r.Get().Namespace(c.ns).Resource("jobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular job. -func (c *jobsV1) Get(name string) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Get().Namespace(c.ns).Resource("jobs").Name(name).Do().Into(result) - return -} - -// Create creates a new job. -func (c *jobsV1) Create(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Post().Namespace(c.ns).Resource("jobs").Body(job).Do().Into(result) - return -} - -// Update updates an existing job. -func (c *jobsV1) Update(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).Body(job).Do().Into(result) - return -} - -// Delete deletes a job, returns error if one occurs. -func (c *jobsV1) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("jobs").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobsV1) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// UpdateStatus takes the name of the job and the new status. Returns the server's representation of the job, and an error, if it occurs. -func (c *jobsV1) UpdateStatus(job *batch.Job) (result *batch.Job, err error) { - result = &batch.Job{} - err = c.r.Put().Namespace(c.ns).Resource("jobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go deleted file mode 100644 index 8bc2253da..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/limit_ranges.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -// LimitRangesNamespacer has methods to work with LimitRange resources in a namespace -type LimitRangesNamespacer interface { - LimitRanges(namespace string) LimitRangeInterface -} - -// LimitRangeInterface has methods to work with LimitRange resources. -type LimitRangeInterface interface { - List(opts api.ListOptions) (*api.LimitRangeList, error) - Get(name string) (*api.LimitRange, error) - Delete(name string) error - Create(limitRange *api.LimitRange) (*api.LimitRange, error) - Update(limitRange *api.LimitRange) (*api.LimitRange, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// limitRanges implements LimitRangesNamespacer interface -type limitRanges struct { - r *Client - ns string -} - -// newLimitRanges returns a limitRanges -func newLimitRanges(c *Client, namespace string) *limitRanges { - return &limitRanges{ - r: c, - ns: namespace, - } -} - -// List takes a selector, and returns the list of limitRanges that match that selector. -func (c *limitRanges) List(opts api.ListOptions) (result *api.LimitRangeList, err error) { - result = &api.LimitRangeList{} - err = c.r.Get().Namespace(c.ns).Resource("limitRanges").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the limitRange, and returns the corresponding Pod object, and an error if it occurs -func (c *limitRanges) Get(name string) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.r.Get().Namespace(c.ns).Resource("limitRanges").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the limitRange, and returns an error if one occurs -func (c *limitRanges) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("limitRanges").Name(name).Do().Error() -} - -// Create takes the representation of a limitRange. Returns the server's representation of the limitRange, and an error, if it occurs. -func (c *limitRanges) Create(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.r.Post().Namespace(c.ns).Resource("limitRanges").Body(limitRange).Do().Into(result) - return -} - -// Update takes the representation of a limitRange to update. Returns the server's representation of the limitRange, and an error, if it occurs. -func (c *limitRanges) Update(limitRange *api.LimitRange) (result *api.LimitRange, err error) { - result = &api.LimitRange{} - err = c.r.Put().Namespace(c.ns).Resource("limitRanges").Name(limitRange.Name).Body(limitRange).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resource -func (c *limitRanges) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("limitRanges"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go deleted file mode 100644 index 122bcba50..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/namespaces.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -type NamespacesInterface interface { - Namespaces() NamespaceInterface -} - -type NamespaceInterface interface { - Create(item *api.Namespace) (*api.Namespace, error) - Get(name string) (result *api.Namespace, err error) - List(opts api.ListOptions) (*api.NamespaceList, error) - Delete(name string) error - Update(item *api.Namespace) (*api.Namespace, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Finalize(item *api.Namespace) (*api.Namespace, error) - Status(item *api.Namespace) (*api.Namespace, error) -} - -// namespaces implements NamespacesInterface -type namespaces struct { - r *Client -} - -// newNamespaces returns a namespaces object. -func newNamespaces(c *Client) *namespaces { - return &namespaces{r: c} -} - -// Create creates a new namespace. -func (c *namespaces) Create(namespace *api.Namespace) (*api.Namespace, error) { - result := &api.Namespace{} - err := c.r.Post().Resource("namespaces").Body(namespace).Do().Into(result) - return result, err -} - -// List lists all the namespaces in the cluster. -func (c *namespaces) List(opts api.ListOptions) (*api.NamespaceList, error) { - result := &api.NamespaceList{} - err := c.r.Get(). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Do().Into(result) - return result, err -} - -// Update takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Update(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - err = c.r.Put().Resource("namespaces").Name(namespace.Name).Body(namespace).Do().Into(result) - return -} - -// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - if len(namespace.ResourceVersion) == 0 { - err = fmt.Errorf("invalid update object, missing resource version: %v", namespace) - return - } - err = c.r.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) - return -} - -// Status takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Status(namespace *api.Namespace) (result *api.Namespace, err error) { - result = &api.Namespace{} - if len(namespace.ResourceVersion) == 0 { - err = fmt.Errorf("invalid update object, missing resource version: %v", namespace) - return - } - err = c.r.Put().Resource("namespaces").Name(namespace.Name).SubResource("status").Body(namespace).Do().Into(result) - return -} - -// Get gets an existing namespace -func (c *namespaces) Get(name string) (*api.Namespace, error) { - result := &api.Namespace{} - err := c.r.Get().Resource("namespaces").Name(name).Do().Into(result) - return result, err -} - -// Delete deletes an existing namespace. -func (c *namespaces) Delete(name string) error { - return c.r.Delete().Resource("namespaces").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Resource("namespaces"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go deleted file mode 100644 index 0dc9d97be..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/network_policys.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// NetworkPolicyNamespacer has methods to work with NetworkPolicy resources in a namespace -type NetworkPolicyNamespacer interface { - NetworkPolicies(namespace string) NetworkPolicyInterface -} - -// NetworkPolicyInterface exposes methods to work on NetworkPolicy resources. -type NetworkPolicyInterface interface { - List(opts api.ListOptions) (*extensions.NetworkPolicyList, error) - Get(name string) (*extensions.NetworkPolicy, error) - Create(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) - Update(networkPolicy *extensions.NetworkPolicy) (*extensions.NetworkPolicy, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// NetworkPolicies implements NetworkPolicyNamespacer interface -type NetworkPolicies struct { - r *ExtensionsClient - ns string -} - -// newNetworkPolicies returns a NetworkPolicies -func newNetworkPolicies(c *ExtensionsClient, namespace string) *NetworkPolicies { - return &NetworkPolicies{c, namespace} -} - -// List returns a list of networkPolicy that match the label and field selectors. -func (c *NetworkPolicies) List(opts api.ListOptions) (result *extensions.NetworkPolicyList, err error) { - result = &extensions.NetworkPolicyList{} - err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular networkPolicy. -func (c *NetworkPolicies) Get(name string) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.r.Get().Namespace(c.ns).Resource("networkpolicies").Name(name).Do().Into(result) - return -} - -// Create creates a new networkPolicy. -func (c *NetworkPolicies) Create(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.r.Post().Namespace(c.ns).Resource("networkpolicies").Body(networkPolicy).Do().Into(result) - return -} - -// Update updates an existing networkPolicy. -func (c *NetworkPolicies) Update(networkPolicy *extensions.NetworkPolicy) (result *extensions.NetworkPolicy, err error) { - result = &extensions.NetworkPolicy{} - err = c.r.Put().Namespace(c.ns).Resource("networkpolicies").Name(networkPolicy.Name).Body(networkPolicy).Do().Into(result) - return -} - -// Delete deletes a networkPolicy, returns error if one occurs. -func (c *NetworkPolicies) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("networkpolicies").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested networkPolicy. -func (c *NetworkPolicies) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go deleted file mode 100644 index 452a03f16..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/nodes.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -type NodesInterface interface { - Nodes() NodeInterface -} - -type NodeInterface interface { - Get(name string) (result *api.Node, err error) - Create(node *api.Node) (*api.Node, error) - List(opts api.ListOptions) (*api.NodeList, error) - Delete(name string) error - Update(*api.Node) (*api.Node, error) - UpdateStatus(*api.Node) (*api.Node, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// nodes implements NodesInterface -type nodes struct { - r *Client -} - -// newNodes returns a nodes object. -func newNodes(c *Client) *nodes { - return &nodes{c} -} - -// resourceName returns node's URL resource name. -func (c *nodes) resourceName() string { - return "nodes" -} - -// Create creates a new node. -func (c *nodes) Create(node *api.Node) (*api.Node, error) { - result := &api.Node{} - err := c.r.Post().Resource(c.resourceName()).Body(node).Do().Into(result) - return result, err -} - -// List takes a selector, and returns the list of nodes that match that selector in the cluster. -func (c *nodes) List(opts api.ListOptions) (*api.NodeList, error) { - result := &api.NodeList{} - err := c.r.Get().Resource(c.resourceName()).VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return result, err -} - -// Get gets an existing node. -func (c *nodes) Get(name string) (*api.Node, error) { - result := &api.Node{} - err := c.r.Get().Resource(c.resourceName()).Name(name).Do().Into(result) - return result, err -} - -// Delete deletes an existing node. -func (c *nodes) Delete(name string) error { - return c.r.Delete().Resource(c.resourceName()).Name(name).Do().Error() -} - -// Update updates an existing node. -func (c *nodes) Update(node *api.Node) (*api.Node, error) { - result := &api.Node{} - err := c.r.Put().Resource(c.resourceName()).Name(node.Name).Body(node).Do().Into(result) - return result, err -} - -func (c *nodes) UpdateStatus(node *api.Node) (*api.Node, error) { - result := &api.Node{} - err := c.r.Put().Resource(c.resourceName()).Name(node.Name).SubResource("status").Body(node).Do().Into(result) - return result, err -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(api.NamespaceAll). - Resource(c.resourceName()). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go deleted file mode 100644 index bf5447d75..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumeclaim.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -// PersistentVolumeClaimsNamespacer has methods to work with PersistentVolumeClaim resources in a namespace -type PersistentVolumeClaimsNamespacer interface { - PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface -} - -// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. -type PersistentVolumeClaimInterface interface { - List(opts api.ListOptions) (*api.PersistentVolumeClaimList, error) - Get(name string) (*api.PersistentVolumeClaim, error) - Create(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Update(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - UpdateStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) - Delete(name string) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// persistentVolumeClaims implements PersistentVolumeClaimsNamespacer interface -type persistentVolumeClaims struct { - client *Client - namespace string -} - -// newPersistentVolumeClaims returns a PodsClient -func newPersistentVolumeClaims(c *Client, namespace string) *persistentVolumeClaims { - return &persistentVolumeClaims{c, namespace} -} - -func (c *persistentVolumeClaims) List(opts api.ListOptions) (result *api.PersistentVolumeClaimList, err error) { - result = &api.PersistentVolumeClaimList{} - - err = c.client.Get(). - Namespace(c.namespace). - Resource("persistentVolumeClaims"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -func (c *persistentVolumeClaims) Get(name string) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Get().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(name).Do().Into(result) - return -} - -func (c *persistentVolumeClaims) Create(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Post().Namespace(c.namespace).Resource("persistentVolumeClaims").Body(claim).Do().Into(result) - return -} - -func (c *persistentVolumeClaims) Update(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Put().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(claim.Name).Body(claim).Do().Into(result) - return -} - -func (c *persistentVolumeClaims) UpdateStatus(claim *api.PersistentVolumeClaim) (result *api.PersistentVolumeClaim, err error) { - result = &api.PersistentVolumeClaim{} - err = c.client.Put().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(claim.Name).SubResource("status").Body(claim).Do().Into(result) - return -} - -func (c *persistentVolumeClaims) Delete(name string) error { - return c.client.Delete().Namespace(c.namespace).Resource("persistentVolumeClaims").Name(name).Do().Error() -} - -func (c *persistentVolumeClaims) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.namespace). - Resource("persistentVolumeClaims"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go deleted file mode 100644 index 2de17bb71..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/persistentvolumes.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -type PersistentVolumesInterface interface { - PersistentVolumes() PersistentVolumeInterface -} - -// PersistentVolumeInterface has methods to work with PersistentVolume resources. -type PersistentVolumeInterface interface { - List(opts api.ListOptions) (*api.PersistentVolumeList, error) - Get(name string) (*api.PersistentVolume, error) - Create(volume *api.PersistentVolume) (*api.PersistentVolume, error) - Update(volume *api.PersistentVolume) (*api.PersistentVolume, error) - UpdateStatus(persistentVolume *api.PersistentVolume) (*api.PersistentVolume, error) - Delete(name string) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// persistentVolumes implements PersistentVolumesInterface -type persistentVolumes struct { - client *Client -} - -func newPersistentVolumes(c *Client) *persistentVolumes { - return &persistentVolumes{c} -} - -func (c *persistentVolumes) List(opts api.ListOptions) (result *api.PersistentVolumeList, err error) { - result = &api.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentVolumes"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -func (c *persistentVolumes) Get(name string) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Get().Resource("persistentVolumes").Name(name).Do().Into(result) - return -} - -func (c *persistentVolumes) Create(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Post().Resource("persistentVolumes").Body(volume).Do().Into(result) - return -} - -func (c *persistentVolumes) Update(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Put().Resource("persistentVolumes").Name(volume.Name).Body(volume).Do().Into(result) - return -} - -func (c *persistentVolumes) UpdateStatus(volume *api.PersistentVolume) (result *api.PersistentVolume, err error) { - result = &api.PersistentVolume{} - err = c.client.Put().Resource("persistentVolumes").Name(volume.Name).SubResource("status").Body(volume).Do().Into(result) - return -} - -func (c *persistentVolumes) Delete(name string) error { - return c.client.Delete().Resource("persistentVolumes").Name(name).Do().Error() -} - -func (c *persistentVolumes) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Resource("persistentVolumes"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go deleted file mode 100644 index 71b1ea021..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pet_sets.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/watch" -) - -// PetSetNamespacer has methods to work with PetSet resources in a namespace -type PetSetNamespacer interface { - PetSets(namespace string) PetSetInterface -} - -// PetSetInterface exposes methods to work on PetSet resources. -type PetSetInterface interface { - List(opts api.ListOptions) (*apps.PetSetList, error) - Get(name string) (*apps.PetSet, error) - Create(petSet *apps.PetSet) (*apps.PetSet, error) - Update(petSet *apps.PetSet) (*apps.PetSet, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) - UpdateStatus(petSet *apps.PetSet) (*apps.PetSet, error) -} - -// petSet implements PetSetNamespacer interface -type petSet struct { - r *AppsClient - ns string -} - -// newPetSet returns a petSet -func newPetSet(c *AppsClient, namespace string) *petSet { - return &petSet{c, namespace} -} - -// List returns a list of petSet that match the label and field selectors. -func (c *petSet) List(opts api.ListOptions) (result *apps.PetSetList, err error) { - result = &apps.PetSetList{} - err = c.r.Get().Namespace(c.ns).Resource("petsets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular petSet. -func (c *petSet) Get(name string) (result *apps.PetSet, err error) { - result = &apps.PetSet{} - err = c.r.Get().Namespace(c.ns).Resource("petsets").Name(name).Do().Into(result) - return -} - -// Create creates a new petSet. -func (c *petSet) Create(petSet *apps.PetSet) (result *apps.PetSet, err error) { - result = &apps.PetSet{} - err = c.r.Post().Namespace(c.ns).Resource("petsets").Body(petSet).Do().Into(result) - return -} - -// Update updates an existing petSet. -func (c *petSet) Update(petSet *apps.PetSet) (result *apps.PetSet, err error) { - result = &apps.PetSet{} - err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).Body(petSet).Do().Into(result) - return -} - -// Delete deletes a petSet, returns error if one occurs. -func (c *petSet) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("petsets").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested petSet. -func (c *petSet) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("petsets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// UpdateStatus takes the name of the petSet and the new status. Returns the server's representation of the petSet, and an error, if it occurs. -func (c *petSet) UpdateStatus(petSet *apps.PetSet) (result *apps.PetSet, err error) { - result = &apps.PetSet{} - err = c.r.Put().Namespace(c.ns).Resource("petsets").Name(petSet.Name).SubResource("status").Body(petSet).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go deleted file mode 100644 index 14f373f37..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_disruption_budgets.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/watch" -) - -// PodDisruptionBudgetNamespacer has methods to work with PodDisruptionBudget resources in a namespace -type PodDisruptionBudgetNamespacer interface { - PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface -} - -// PodDisruptionBudgetInterface exposes methods to work on PodDisruptionBudget resources. -type PodDisruptionBudgetInterface interface { - List(opts api.ListOptions) (*policy.PodDisruptionBudgetList, error) - Get(name string) (*policy.PodDisruptionBudget, error) - Create(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - Update(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) - UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (*policy.PodDisruptionBudget, error) -} - -// podDisruptionBudget implements PodDisruptionBudgetNamespacer interface -type podDisruptionBudget struct { - r *PolicyClient - ns string -} - -// newPodDisruptionBudget returns a podDisruptionBudget -func newPodDisruptionBudget(c *PolicyClient, namespace string) *podDisruptionBudget { - return &podDisruptionBudget{c, namespace} -} - -// List returns a list of podDisruptionBudget that match the label and field selectors. -func (c *podDisruptionBudget) List(opts api.ListOptions) (result *policy.PodDisruptionBudgetList, err error) { - result = &policy.PodDisruptionBudgetList{} - err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular podDisruptionBudget. -func (c *podDisruptionBudget) Get(name string) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.r.Get().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Do().Into(result) - return -} - -// Create creates a new podDisruptionBudget. -func (c *podDisruptionBudget) Create(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.r.Post().Namespace(c.ns).Resource("poddisruptionbudgets").Body(podDisruptionBudget).Do().Into(result) - return -} - -// Update updates an existing podDisruptionBudget. -func (c *podDisruptionBudget) Update(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).Body(podDisruptionBudget).Do().Into(result) - return -} - -// Delete deletes a podDisruptionBudget, returns error if one occurs. -func (c *podDisruptionBudget) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("poddisruptionbudgets").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudget. -func (c *podDisruptionBudget) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// UpdateStatus takes the name of the podDisruptionBudget and the new status. Returns the server's representation of the podDisruptionBudget, and an error, if it occurs. -func (c *podDisruptionBudget) UpdateStatus(podDisruptionBudget *policy.PodDisruptionBudget) (result *policy.PodDisruptionBudget, err error) { - result = &policy.PodDisruptionBudget{} - err = c.r.Put().Namespace(c.ns).Resource("poddisruptionbudgets").Name(podDisruptionBudget.Name).SubResource("status").Body(podDisruptionBudget).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go deleted file mode 100644 index ed5b733c6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pod_templates.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -// PodTemplatesNamespacer has methods to work with PodTemplate resources in a namespace -type PodTemplatesNamespacer interface { - PodTemplates(namespace string) PodTemplateInterface -} - -// PodTemplateInterface has methods to work with PodTemplate resources. -type PodTemplateInterface interface { - List(opts api.ListOptions) (*api.PodTemplateList, error) - Get(name string) (*api.PodTemplate, error) - Delete(name string, options *api.DeleteOptions) error - Create(podTemplate *api.PodTemplate) (*api.PodTemplate, error) - Update(podTemplate *api.PodTemplate) (*api.PodTemplate, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// podTemplates implements PodTemplatesNamespacer interface -type podTemplates struct { - r *Client - ns string -} - -// newPodTemplates returns a podTemplates -func newPodTemplates(c *Client, namespace string) *podTemplates { - return &podTemplates{ - r: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of podTemplates that match those selectors. -func (c *podTemplates) List(opts api.ListOptions) (result *api.PodTemplateList, err error) { - result = &api.PodTemplateList{} - err = c.r.Get().Namespace(c.ns).Resource("podTemplates").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the podTemplate, and returns the corresponding PodTemplate object, and an error if it occurs -func (c *podTemplates) Get(name string) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.r.Get().Namespace(c.ns).Resource("podTemplates").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the podTemplate, and returns an error if one occurs -func (c *podTemplates) Delete(name string, options *api.DeleteOptions) error { - return c.r.Delete().Namespace(c.ns).Resource("podTemplates").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a podTemplate. Returns the server's representation of the podTemplate, and an error, if it occurs. -func (c *podTemplates) Create(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.r.Post().Namespace(c.ns).Resource("podTemplates").Body(podTemplate).Do().Into(result) - return -} - -// Update takes the representation of a podTemplate to update. Returns the server's representation of the podTemplate, and an error, if it occurs. -func (c *podTemplates) Update(podTemplate *api.PodTemplate) (result *api.PodTemplate, err error) { - result = &api.PodTemplate{} - err = c.r.Put().Namespace(c.ns).Resource("podTemplates").Name(podTemplate.Name).Body(podTemplate).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("podTemplates"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go deleted file mode 100644 index 426d3ee8e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/pods.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/watch" -) - -// PodsNamespacer has methods to work with Pod resources in a namespace -type PodsNamespacer interface { - Pods(namespace string) PodInterface -} - -// PodInterface has methods to work with Pod resources. -type PodInterface interface { - List(opts api.ListOptions) (*api.PodList, error) - Get(name string) (*api.Pod, error) - Delete(name string, options *api.DeleteOptions) error - Create(pod *api.Pod) (*api.Pod, error) - Update(pod *api.Pod) (*api.Pod, error) - Watch(opts api.ListOptions) (watch.Interface, error) - Bind(binding *api.Binding) error - UpdateStatus(pod *api.Pod) (*api.Pod, error) - GetLogs(name string, opts *api.PodLogOptions) *restclient.Request -} - -// pods implements PodsNamespacer interface -type pods struct { - r *Client - ns string -} - -// newPods returns a pods -func newPods(c *Client, namespace string) *pods { - return &pods{ - r: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of pods that match those selectors. -func (c *pods) List(opts api.ListOptions) (result *api.PodList, err error) { - result = &api.PodList{} - err = c.r.Get().Namespace(c.ns).Resource("pods").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the pod, and returns the corresponding Pod object, and an error if it occurs -func (c *pods) Get(name string) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.r.Get().Namespace(c.ns).Resource("pods").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the pod, and returns an error if one occurs -func (c *pods) Delete(name string, options *api.DeleteOptions) error { - return c.r.Delete().Namespace(c.ns).Resource("pods").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a pod. Returns the server's representation of the pod, and an error, if it occurs. -func (c *pods) Create(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.r.Post().Namespace(c.ns).Resource("pods").Body(pod).Do().Into(result) - return -} - -// Update takes the representation of a pod to update. Returns the server's representation of the pod, and an error, if it occurs. -func (c *pods) Update(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.r.Put().Namespace(c.ns).Resource("pods").Name(pod.Name).Body(pod).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). -func (c *pods) Bind(binding *api.Binding) error { - return c.r.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() -} - -// UpdateStatus takes the name of the pod and the new status. Returns the server's representation of the pod, and an error, if it occurs. -func (c *pods) UpdateStatus(pod *api.Pod) (result *api.Pod, err error) { - result = &api.Pod{} - err = c.r.Put().Namespace(c.ns).Resource("pods").Name(pod.Name).SubResource("status").Body(pod).Do().Into(result) - return -} - -// Get constructs a request for getting the logs for a pod -func (c *pods) GetLogs(name string, opts *api.PodLogOptions) *restclient.Request { - return c.r.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go deleted file mode 100644 index 356d913db..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/podsecuritypolicy.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -type PodSecurityPoliciesInterface interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -type PodSecurityPolicyInterface interface { - Get(name string) (result *extensions.PodSecurityPolicy, err error) - Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) - List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) - Delete(name string) error - Update(*extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// podSecurityPolicy implements PodSecurityPolicyInterface -type podSecurityPolicy struct { - client *ExtensionsClient -} - -// newPodSecurityPolicy returns a podSecurityPolicy object. -func newPodSecurityPolicy(c *ExtensionsClient) *podSecurityPolicy { - return &podSecurityPolicy{c} -} - -func (s *podSecurityPolicy) Create(psp *extensions.PodSecurityPolicy) (*extensions.PodSecurityPolicy, error) { - result := &extensions.PodSecurityPolicy{} - err := s.client.Post(). - Resource("podsecuritypolicies"). - Body(psp). - Do(). - Into(result) - - return result, err -} - -// List returns a list of PodSecurityPolicies matching the selectors. -func (s *podSecurityPolicy) List(opts api.ListOptions) (*extensions.PodSecurityPolicyList, error) { - result := &extensions.PodSecurityPolicyList{} - - err := s.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -// Get returns the given PodSecurityPolicy, or an error. -func (s *podSecurityPolicy) Get(name string) (*extensions.PodSecurityPolicy, error) { - result := &extensions.PodSecurityPolicy{} - err := s.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - Do(). - Into(result) - - return result, err -} - -// Watch starts watching for PodSecurityPolicies matching the given selectors. -func (s *podSecurityPolicy) Watch(opts api.ListOptions) (watch.Interface, error) { - return s.client.Get(). - Prefix("watch"). - Resource("podsecuritypolicies"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -func (s *podSecurityPolicy) Delete(name string) error { - return s.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Do(). - Error() -} - -func (s *podSecurityPolicy) Update(psp *extensions.PodSecurityPolicy) (result *extensions.PodSecurityPolicy, err error) { - result = &extensions.PodSecurityPolicy{} - err = s.client.Put(). - Resource("podsecuritypolicies"). - Name(psp.Name). - Body(psp). - Do(). - Into(result) - - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go deleted file mode 100644 index 8b06ce275..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/policy.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/client/restclient" -) - -type PolicyInterface interface { - PodDisruptionBudgetNamespacer -} - -// PolicyClient is used to interact with Kubernetes batch features. -type PolicyClient struct { - *restclient.RESTClient -} - -func (c *PolicyClient) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { - return newPodDisruptionBudget(c, namespace) -} - -func NewPolicy(c *restclient.Config) (*PolicyClient, error) { - config := *c - if err := setPolicyDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &PolicyClient{client}, nil -} - -func NewPolicyOrDie(c *restclient.Config) *PolicyClient { - client, err := NewPolicy(c) - if err != nil { - panic(err) - } - return client -} - -func setPolicyDefaults(config *restclient.Config) error { - g, err := registered.Group(policy.GroupName) - if err != nil { - return err - } - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - config.NegotiatedSerializer = api.Codecs - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go deleted file mode 100644 index 76ec392c3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/rbac.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/client/restclient" -) - -// Interface holds the methods for clients of Kubernetes to allow mock testing. -type RbacInterface interface { - RoleBindingsNamespacer - RolesNamespacer - ClusterRoleBindings - ClusterRoles -} - -type RbacClient struct { - *restclient.RESTClient -} - -func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -func (c *RbacClient) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacClient) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -// NewRbac creates a new RbacClient for the given config. -func NewRbac(c *restclient.Config) (*RbacClient, error) { - config := *c - if err := setRbacDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacClient{client}, nil -} - -// NewRbacOrDie creates a new RbacClient for the given config and -// panics if there is an error in the config. -func NewRbacOrDie(c *restclient.Config) *RbacClient { - client, err := NewRbac(c) - if err != nil { - panic(err) - } - return client -} - -func setRbacDefaults(config *restclient.Config) error { - // if rbac group is not registered, return an error - g, err := registered.Group(rbac.GroupName) - if err != nil { - return err - } - config.APIPath = defaultAPIPath - if config.UserAgent == "" { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - - // TODO: Unconditionally set the config.Version, until we fix the config. - //if config.Version == "" { - copyGroupVersion := g.GroupVersion - config.GroupVersion = ©GroupVersion - //} - - config.Codec = api.Codecs.LegacyCodec(*config.GroupVersion) - config.NegotiatedSerializer = api.Codecs - if config.QPS == 0 { - config.QPS = 5 - } - if config.Burst == 0 { - config.Burst = 10 - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go deleted file mode 100644 index be9284084..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/replica_sets.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// ReplicaSetsNamespacer has methods to work with ReplicaSet resources in a namespace -type ReplicaSetsNamespacer interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - List(opts api.ListOptions) (*extensions.ReplicaSetList, error) - Get(name string) (*extensions.ReplicaSet, error) - Create(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Update(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error) - UpdateStatus(ctrl *extensions.ReplicaSet) (*extensions.ReplicaSet, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// replicaSets implements ReplicaSetsNamespacer interface -type replicaSets struct { - client *ExtensionsClient - ns string -} - -// newReplicaSets returns a ReplicaSetClient -func newReplicaSets(c *ExtensionsClient, namespace string) *replicaSets { - return &replicaSets{c, namespace} -} - -// List takes a selector, and returns the list of ReplicaSets that match that selector. -func (c *replicaSets) List(opts api.ListOptions) (result *extensions.ReplicaSetList, err error) { - result = &extensions.ReplicaSetList{} - err = c.client.Get().Namespace(c.ns).Resource("replicasets").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular ReplicaSet. -func (c *replicaSets) Get(name string) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Get().Namespace(c.ns).Resource("replicasets").Name(name).Do().Into(result) - return -} - -// Create creates a new ReplicaSet. -func (c *replicaSets) Create(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Post().Namespace(c.ns).Resource("replicasets").Body(rs).Do().Into(result) - return -} - -// Update updates an existing ReplicaSet. -func (c *replicaSets) Update(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put().Namespace(c.ns).Resource("replicasets").Name(rs.Name).Body(rs).Do().Into(result) - return -} - -// UpdateStatus updates an existing ReplicaSet status -func (c *replicaSets) UpdateStatus(rs *extensions.ReplicaSet) (result *extensions.ReplicaSet, err error) { - result = &extensions.ReplicaSet{} - err = c.client.Put().Namespace(c.ns).Resource("replicasets").Name(rs.Name).SubResource("status").Body(rs).Do().Into(result) - return -} - -// Delete deletes an existing ReplicaSet. -func (c *replicaSets) Delete(name string, options *api.DeleteOptions) (err error) { - return c.client.Delete().Namespace(c.ns).Resource("replicasets").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested ReplicaSets. -func (c *replicaSets) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go deleted file mode 100644 index f237a76ac..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/replication_controllers.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -// ReplicationControllersNamespacer has methods to work with ReplicationController resources in a namespace -type ReplicationControllersNamespacer interface { - ReplicationControllers(namespace string) ReplicationControllerInterface -} - -// ReplicationControllerInterface has methods to work with ReplicationController resources. -type ReplicationControllerInterface interface { - List(opts api.ListOptions) (*api.ReplicationControllerList, error) - Get(name string) (*api.ReplicationController, error) - Create(ctrl *api.ReplicationController) (*api.ReplicationController, error) - Update(ctrl *api.ReplicationController) (*api.ReplicationController, error) - UpdateStatus(ctrl *api.ReplicationController) (*api.ReplicationController, error) - Delete(name string) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// replicationControllers implements ReplicationControllersNamespacer interface -type replicationControllers struct { - r *Client - ns string -} - -// newReplicationControllers returns a PodsClient -func newReplicationControllers(c *Client, namespace string) *replicationControllers { - return &replicationControllers{c, namespace} -} - -// List takes a selector, and returns the list of replication controllers that match that selector. -func (c *replicationControllers) List(opts api.ListOptions) (result *api.ReplicationControllerList, err error) { - result = &api.ReplicationControllerList{} - err = c.r.Get().Namespace(c.ns).Resource("replicationControllers").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular replication controller. -func (c *replicationControllers) Get(name string) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.r.Get().Namespace(c.ns).Resource("replicationControllers").Name(name).Do().Into(result) - return -} - -// Create creates a new replication controller. -func (c *replicationControllers) Create(controller *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.r.Post().Namespace(c.ns).Resource("replicationControllers").Body(controller).Do().Into(result) - return -} - -// Update updates an existing replication controller. -func (c *replicationControllers) Update(controller *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.r.Put().Namespace(c.ns).Resource("replicationControllers").Name(controller.Name).Body(controller).Do().Into(result) - return -} - -// UpdateStatus updates an existing replication controller status -func (c *replicationControllers) UpdateStatus(controller *api.ReplicationController) (result *api.ReplicationController, err error) { - result = &api.ReplicationController{} - err = c.r.Put().Namespace(c.ns).Resource("replicationControllers").Name(controller.Name).SubResource("status").Body(controller).Do().Into(result) - return -} - -// Delete deletes an existing replication controller. -func (c *replicationControllers) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("replicationControllers").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested controllers. -func (c *replicationControllers) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("replicationControllers"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go deleted file mode 100644 index acfd8ddb3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/resource_quotas.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -// ResourceQuotasNamespacer has methods to work with ResourceQuota resources in a namespace -type ResourceQuotasNamespacer interface { - ResourceQuotas(namespace string) ResourceQuotaInterface -} - -// ResourceQuotaInterface has methods to work with ResourceQuota resources. -type ResourceQuotaInterface interface { - List(opts api.ListOptions) (*api.ResourceQuotaList, error) - Get(name string) (*api.ResourceQuota, error) - Delete(name string) error - Create(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) - Update(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) - UpdateStatus(resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// resourceQuotas implements ResourceQuotasNamespacer interface -type resourceQuotas struct { - r *Client - ns string -} - -// newResourceQuotas returns a resourceQuotas -func newResourceQuotas(c *Client, namespace string) *resourceQuotas { - return &resourceQuotas{ - r: c, - ns: namespace, - } -} - -// List takes a selector, and returns the list of resourceQuotas that match that selector. -func (c *resourceQuotas) List(opts api.ListOptions) (result *api.ResourceQuotaList, err error) { - result = &api.ResourceQuotaList{} - err = c.r.Get().Namespace(c.ns).Resource("resourceQuotas").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the resourceQuota, and returns the corresponding ResourceQuota object, and an error if it occurs -func (c *resourceQuotas) Get(name string) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.r.Get().Namespace(c.ns).Resource("resourceQuotas").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the resourceQuota, and returns an error if one occurs -func (c *resourceQuotas) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("resourceQuotas").Name(name).Do().Error() -} - -// Create takes the representation of a resourceQuota. Returns the server's representation of the resourceQuota, and an error, if it occurs. -func (c *resourceQuotas) Create(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.r.Post().Namespace(c.ns).Resource("resourceQuotas").Body(resourceQuota).Do().Into(result) - return -} - -// Update takes the representation of a resourceQuota to update spec. Returns the server's representation of the resourceQuota, and an error, if it occurs. -func (c *resourceQuotas) Update(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.r.Put().Namespace(c.ns).Resource("resourceQuotas").Name(resourceQuota.Name).Body(resourceQuota).Do().Into(result) - return -} - -// Status takes the representation of a resourceQuota to update status. Returns the server's representation of the resourceQuota, and an error, if it occurs. -func (c *resourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (result *api.ResourceQuota, err error) { - result = &api.ResourceQuota{} - err = c.r.Put().Namespace(c.ns).Resource("resourceQuotas").Name(resourceQuota.Name).SubResource("status").Body(resourceQuota).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resource -func (c *resourceQuotas) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("resourceQuotas"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go deleted file mode 100644 index a43815c55..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/rolebindings.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/watch" -) - -// RoleBindingsNamespacer has methods to work with RoleBinding resources in a namespace -type RoleBindingsNamespacer interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - List(opts api.ListOptions) (*rbac.RoleBindingList, error) - Get(name string) (*rbac.RoleBinding, error) - Delete(name string, options *api.DeleteOptions) error - Create(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error) - Update(roleBinding *rbac.RoleBinding) (*rbac.RoleBinding, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// roleBindings implements RoleBindingsNamespacer interface -type roleBindings struct { - client *RbacClient - ns string -} - -// newRoleBindings returns a roleBindings -func newRoleBindings(c *RbacClient, namespace string) *roleBindings { - return &roleBindings{ - client: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of roleBindings that match those selectors. -func (c *roleBindings) List(opts api.ListOptions) (result *rbac.RoleBindingList, err error) { - result = &rbac.RoleBindingList{} - err = c.client.Get().Namespace(c.ns).Resource("rolebindings").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the roleBinding, and returns the corresponding RoleBinding object, and an error if it occurs -func (c *roleBindings) Get(name string) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Get().Namespace(c.ns).Resource("rolebindings").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Namespace(c.ns).Resource("rolebindings").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if it occurs. -func (c *roleBindings) Create(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Post().Namespace(c.ns).Resource("rolebindings").Body(roleBinding).Do().Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if it occurs. -func (c *roleBindings) Update(roleBinding *rbac.RoleBinding) (result *rbac.RoleBinding, err error) { - result = &rbac.RoleBinding{} - err = c.client.Put().Namespace(c.ns).Resource("rolebindings").Name(roleBinding.Name).Body(roleBinding).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go deleted file mode 100644 index 29aee1bae..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/roles.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/watch" -) - -// RolesNamespacer has methods to work with Role resources in a namespace -type RolesNamespacer interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - List(opts api.ListOptions) (*rbac.RoleList, error) - Get(name string) (*rbac.Role, error) - Delete(name string, options *api.DeleteOptions) error - Create(role *rbac.Role) (*rbac.Role, error) - Update(role *rbac.Role) (*rbac.Role, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// roles implements RolesNamespacer interface -type roles struct { - client *RbacClient - ns string -} - -// newRoles returns a roles -func newRoles(c *RbacClient, namespace string) *roles { - return &roles{ - client: c, - ns: namespace, - } -} - -// List takes label and field selectors, and returns the list of roles that match those selectors. -func (c *roles) List(opts api.ListOptions) (result *rbac.RoleList, err error) { - result = &rbac.RoleList{} - err = c.client.Get().Namespace(c.ns).Resource("roles").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get takes the name of the role, and returns the corresponding Role object, and an error if it occurs -func (c *roles) Get(name string) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Get().Namespace(c.ns).Resource("roles").Name(name).Do().Into(result) - return -} - -// Delete takes the name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *api.DeleteOptions) error { - return c.client.Delete().Namespace(c.ns).Resource("roles").Name(name).Body(options).Do().Error() -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if it occurs. -func (c *roles) Create(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Post().Namespace(c.ns).Resource("roles").Body(role).Do().Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if it occurs. -func (c *roles) Update(role *rbac.Role) (result *rbac.Role, err error) { - result = &rbac.Role{} - err = c.client.Put().Namespace(c.ns).Resource("roles").Name(role.Name).Body(role).Do().Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.client.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go deleted file mode 100644 index 705f6048b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/scale.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -type ScaleNamespacer interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale (sub)resources. -type ScaleInterface interface { - Get(string, string) (*extensions.Scale, error) - Update(string, *extensions.Scale) (*extensions.Scale, error) -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalersNamespacer interface -type scales struct { - client *ExtensionsClient - ns string -} - -// newHorizontalPodAutoscalers returns a horizontalPodAutoscalers -func newScales(c *ExtensionsClient, namespace string) *scales { - return &scales{ - client: c, - ns: namespace, - } -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) - - err = c.client.Get().Namespace(c.ns).Resource(resource.Resource).Name(name).SubResource("scale").Do().Into(result) - return -} - -func (c *scales) Update(kind string, scale *extensions.Scale) (result *extensions.Scale, err error) { - result = &extensions.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := unversioned.GroupVersionKind{Kind: kind} - resource, _ := meta.KindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go deleted file mode 100644 index d2b83fce2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/scheduledjobs.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/watch" -) - -// ScheduledJobsNamespacer has methods to work with ScheduledJob resources in a namespace -type ScheduledJobsNamespacer interface { - ScheduledJobs(namespace string) ScheduledJobInterface -} - -// ScheduledJobInterface exposes methods to work on ScheduledJob resources. -type ScheduledJobInterface interface { - List(opts api.ListOptions) (*batch.ScheduledJobList, error) - Get(name string) (*batch.ScheduledJob, error) - Create(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) - Update(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) - Delete(name string, options *api.DeleteOptions) error - Watch(opts api.ListOptions) (watch.Interface, error) - UpdateStatus(scheduledJob *batch.ScheduledJob) (*batch.ScheduledJob, error) -} - -// scheduledJobs implements ScheduledJobsNamespacer interface -type scheduledJobs struct { - r *BatchClient - ns string -} - -// newScheduledJobs returns a scheduledJobs -func newScheduledJobs(c *BatchClient, namespace string) *scheduledJobs { - return &scheduledJobs{c, namespace} -} - -// Ensure statically that scheduledJobs implements ScheduledJobInterface. -var _ ScheduledJobInterface = &scheduledJobs{} - -// List returns a list of scheduled jobs that match the label and field selectors. -func (c *scheduledJobs) List(opts api.ListOptions) (result *batch.ScheduledJobList, err error) { - result = &batch.ScheduledJobList{} - err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular scheduled job. -func (c *scheduledJobs) Get(name string) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.r.Get().Namespace(c.ns).Resource("scheduledJobs").Name(name).Do().Into(result) - return -} - -// Create creates a new scheduled job. -func (c *scheduledJobs) Create(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.r.Post().Namespace(c.ns).Resource("scheduledJobs").Body(job).Do().Into(result) - return -} - -// Update updates an existing scheduled job. -func (c *scheduledJobs) Update(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).Body(job).Do().Into(result) - return -} - -// Delete deletes a scheduled job, returns error if one occurs. -func (c *scheduledJobs) Delete(name string, options *api.DeleteOptions) (err error) { - return c.r.Delete().Namespace(c.ns).Resource("scheduledJobs").Name(name).Body(options).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested scheduled jobs. -func (c *scheduledJobs) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("scheduledJobs"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// UpdateStatus takes the name of the scheduled job and the new status. Returns the server's representation of the scheduled job, and an error, if it occurs. -func (c *scheduledJobs) UpdateStatus(job *batch.ScheduledJob) (result *batch.ScheduledJob, err error) { - result = &batch.ScheduledJob{} - err = c.r.Put().Namespace(c.ns).Resource("scheduledJobs").Name(job.Name).SubResource("status").Body(job).Do().Into(result) - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go deleted file mode 100644 index 33d77ad24..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/secrets.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -type SecretsNamespacer interface { - Secrets(namespace string) SecretsInterface -} - -type SecretsInterface interface { - Create(secret *api.Secret) (*api.Secret, error) - Update(secret *api.Secret) (*api.Secret, error) - Delete(name string) error - List(opts api.ListOptions) (*api.SecretList, error) - Get(name string) (*api.Secret, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// events implements Secrets interface -type secrets struct { - client *Client - namespace string -} - -// newSecrets returns a new secrets object. -func newSecrets(c *Client, ns string) *secrets { - return &secrets{ - client: c, - namespace: ns, - } -} - -func (s *secrets) Create(secret *api.Secret) (*api.Secret, error) { - result := &api.Secret{} - err := s.client.Post(). - Namespace(s.namespace). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - - return result, err -} - -// List returns a list of secrets matching the selectors. -func (s *secrets) List(opts api.ListOptions) (*api.SecretList, error) { - result := &api.SecretList{} - - err := s.client.Get(). - Namespace(s.namespace). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -// Get returns the given secret, or an error. -func (s *secrets) Get(name string) (*api.Secret, error) { - result := &api.Secret{} - err := s.client.Get(). - Namespace(s.namespace). - Resource("secrets"). - Name(name). - Do(). - Into(result) - - return result, err -} - -// Watch starts watching for secrets matching the given selectors. -func (s *secrets) Watch(opts api.ListOptions) (watch.Interface, error) { - return s.client.Get(). - Prefix("watch"). - Namespace(s.namespace). - Resource("secrets"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -func (s *secrets) Delete(name string) error { - return s.client.Delete(). - Namespace(s.namespace). - Resource("secrets"). - Name(name). - Do(). - Error() -} - -func (s *secrets) Update(secret *api.Secret) (result *api.Secret, err error) { - result = &api.Secret{} - err = s.client.Put(). - Namespace(s.namespace). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go deleted file mode 100644 index d78a25c47..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/service_accounts.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/watch" -) - -type ServiceAccountsNamespacer interface { - ServiceAccounts(namespace string) ServiceAccountsInterface -} - -type ServiceAccountsInterface interface { - Create(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) - Update(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) - Delete(name string) error - List(opts api.ListOptions) (*api.ServiceAccountList, error) - Get(name string) (*api.ServiceAccount, error) - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// serviceAccounts implements ServiceAccounts interface -type serviceAccounts struct { - client *Client - namespace string -} - -// newServiceAccounts returns a new serviceAccounts object. -func newServiceAccounts(c *Client, ns string) ServiceAccountsInterface { - return &serviceAccounts{ - client: c, - namespace: ns, - } -} - -func (s *serviceAccounts) Create(serviceAccount *api.ServiceAccount) (*api.ServiceAccount, error) { - result := &api.ServiceAccount{} - err := s.client.Post(). - Namespace(s.namespace). - Resource("serviceAccounts"). - Body(serviceAccount). - Do(). - Into(result) - - return result, err -} - -// List returns a list of serviceAccounts matching the selectors. -func (s *serviceAccounts) List(opts api.ListOptions) (*api.ServiceAccountList, error) { - result := &api.ServiceAccountList{} - - err := s.client.Get(). - Namespace(s.namespace). - Resource("serviceAccounts"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - - return result, err -} - -// Get returns the given serviceAccount, or an error. -func (s *serviceAccounts) Get(name string) (*api.ServiceAccount, error) { - result := &api.ServiceAccount{} - err := s.client.Get(). - Namespace(s.namespace). - Resource("serviceAccounts"). - Name(name). - Do(). - Into(result) - - return result, err -} - -// Watch starts watching for serviceAccounts matching the given selectors. -func (s *serviceAccounts) Watch(opts api.ListOptions) (watch.Interface, error) { - return s.client.Get(). - Prefix("watch"). - Namespace(s.namespace). - Resource("serviceAccounts"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -func (s *serviceAccounts) Delete(name string) error { - return s.client.Delete(). - Namespace(s.namespace). - Resource("serviceAccounts"). - Name(name). - Do(). - Error() -} - -func (s *serviceAccounts) Update(serviceAccount *api.ServiceAccount) (result *api.ServiceAccount, err error) { - result = &api.ServiceAccount{} - err = s.client.Put(). - Namespace(s.namespace). - Resource("serviceAccounts"). - Name(serviceAccount.Name). - Body(serviceAccount). - Do(). - Into(result) - - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go deleted file mode 100644 index 8b40a5d04..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/services.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/net" - "k8s.io/kubernetes/pkg/watch" -) - -// ServicesNamespacer has methods to work with Service resources in a namespace -type ServicesNamespacer interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - List(opts api.ListOptions) (*api.ServiceList, error) - Get(name string) (*api.Service, error) - Create(srv *api.Service) (*api.Service, error) - Update(srv *api.Service) (*api.Service, error) - UpdateStatus(srv *api.Service) (*api.Service, error) - Delete(name string) error - Watch(opts api.ListOptions) (watch.Interface, error) - ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper -} - -// services implements ServicesNamespacer interface -type services struct { - r *Client - ns string -} - -// newServices returns a services -func newServices(c *Client, namespace string) *services { - return &services{c, namespace} -} - -// List takes a selector, and returns the list of services that match that selector -func (c *services) List(opts api.ListOptions) (result *api.ServiceList, err error) { - result = &api.ServiceList{} - err = c.r.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Do(). - Into(result) - return -} - -// Get returns information about a particular service. -func (c *services) Get(name string) (result *api.Service, err error) { - result = &api.Service{} - err = c.r.Get().Namespace(c.ns).Resource("services").Name(name).Do().Into(result) - return -} - -// Create creates a new service. -func (c *services) Create(svc *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.r.Post().Namespace(c.ns).Resource("services").Body(svc).Do().Into(result) - return -} - -// Update updates an existing service. -func (c *services) Update(svc *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.r.Put().Namespace(c.ns).Resource("services").Name(svc.Name).Body(svc).Do().Into(result) - return -} - -// UpdateStatus takes a Service object with the new status and applies it as an update to the existing Service. -func (c *services) UpdateStatus(service *api.Service) (result *api.Service, err error) { - result = &api.Service{} - err = c.r.Put().Namespace(c.ns).Resource("services").Name(service.Name).SubResource("status").Body(service).Do().Into(result) - return -} - -// Delete deletes an existing service. -func (c *services) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("services").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} - -// ProxyGet returns a response of the service by calling it through the proxy. -func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.r.Get(). - Namespace(c.ns). - Resource("services"). - SubResource("proxy"). - Name(net.JoinSchemeNamePort(scheme, name, port)). - Suffix(path) - for k, v := range params { - request = request.Param(k, v) - } - return request -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go deleted file mode 100644 index 0908db06e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/thirdpartyresources.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// ThirdPartyResourceNamespacer has methods to work with ThirdPartyResource resources in a namespace -type ThirdPartyResourceNamespacer interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -type ThirdPartyResourceInterface interface { - List(opts api.ListOptions) (*extensions.ThirdPartyResourceList, error) - Get(name string) (*extensions.ThirdPartyResource, error) - Create(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Update(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - UpdateStatus(ctrl *extensions.ThirdPartyResource) (*extensions.ThirdPartyResource, error) - Delete(name string) error - Watch(opts api.ListOptions) (watch.Interface, error) -} - -// thirdPartyResources implements DaemonsSetsNamespacer interface -type thirdPartyResources struct { - r *ExtensionsClient -} - -func newThirdPartyResources(c *ExtensionsClient) *thirdPartyResources { - return &thirdPartyResources{c} -} - -// Ensure statically that thirdPartyResources implements ThirdPartyResourcesInterface. -var _ ThirdPartyResourceInterface = &thirdPartyResources{} - -func (c *thirdPartyResources) List(opts api.ListOptions) (result *extensions.ThirdPartyResourceList, err error) { - result = &extensions.ThirdPartyResourceList{} - err = c.r.Get().Resource("thirdpartyresources").VersionedParams(&opts, api.ParameterCodec).Do().Into(result) - return -} - -// Get returns information about a particular third party resource. -func (c *thirdPartyResources) Get(name string) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.r.Get().Resource("thirdpartyresources").Name(name).Do().Into(result) - return -} - -// Create creates a new third party resource. -func (c *thirdPartyResources) Create(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.r.Post().Resource("thirdpartyresources").Body(resource).Do().Into(result) - return -} - -// Update updates an existing third party resource. -func (c *thirdPartyResources) Update(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).Body(resource).Do().Into(result) - return -} - -// UpdateStatus updates an existing third party resource status -func (c *thirdPartyResources) UpdateStatus(resource *extensions.ThirdPartyResource) (result *extensions.ThirdPartyResource, err error) { - result = &extensions.ThirdPartyResource{} - err = c.r.Put().Resource("thirdpartyresources").Name(resource.Name).SubResource("status").Body(resource).Do().Into(result) - return -} - -// Delete deletes an existing third party resource. -func (c *thirdPartyResources) Delete(name string) error { - return c.r.Delete().Resource("thirdpartyresources").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested third party resources. -func (c *thirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Resource("thirdpartyresources"). - VersionedParams(&opts, api.ParameterCodec). - Watch() -} diff --git a/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go b/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go deleted file mode 100644 index 37ada3c3d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/client/unversioned/util.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "time" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/util/wait" -) - -// DefaultRetry is the recommended retry for a conflict where multiple clients -// are making changes to the same resource. -var DefaultRetry = wait.Backoff{ - Steps: 5, - Duration: 10 * time.Millisecond, - Factor: 1.0, - Jitter: 0.1, -} - -// DefaultBackoff is the recommended backoff for a conflict where a client -// may be attempting to make an unrelated modification to a resource under -// active management by one or more controllers. -var DefaultBackoff = wait.Backoff{ - Steps: 4, - Duration: 10 * time.Millisecond, - Factor: 5.0, - Jitter: 0.1, -} - -// RetryConflict executes the provided function repeatedly, retrying if the server returns a conflicting -// write. Callers should preserve previous executions if they wish to retry changes. It performs an -// exponential backoff. -// -// var pod *api.Pod -// err := RetryOnConflict(DefaultBackoff, func() (err error) { -// pod, err = c.Pods("mynamespace").UpdateStatus(podStatus) -// return -// }) -// if err != nil { -// // may be conflict if max retries were hit -// return err -// } -// ... -// -// TODO: Make Backoff an interface? -func RetryOnConflict(backoff wait.Backoff, fn func() error) error { - var lastConflictErr error - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - err := fn() - switch { - case err == nil: - return true, nil - case errors.IsConflict(err): - lastConflictErr = err - return false, nil - default: - return false, err - } - }) - if err == wait.ErrWaitTimeout { - err = lastConflictErr - } - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS b/vendor/k8s.io/kubernetes/pkg/controller/OWNERS deleted file mode 100644 index ac8301602..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -assignees: - - bprashanth - - derekwaynecarr - - mikedanese diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go deleted file mode 100644 index a7aba40be..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ /dev/null @@ -1,661 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/record" - "k8s.io/kubernetes/pkg/controller/framework" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/integer" - "k8s.io/kubernetes/pkg/util/sets" -) - -const ( - CreatedByAnnotation = "kubernetes.io/created-by" - - // If a watch drops a delete event for a pod, it'll take this long - // before a dormant controller waiting for those packets is woken up anyway. It is - // specifically targeted at the case where some problem prevents an update - // of expectations, without it the controller could stay asleep forever. This should - // be set based on the expected latency of watch events. - // - // Currently a controller can service (create *and* observe the watch events for said - // creation) about 10 pods a second, so it takes about 1 min to service - // 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s - // latency/pod at the scale of 3000 pods over 100 nodes. - ExpectationsTimeout = 5 * time.Minute -) - -var ( - KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc -) - -type ResyncPeriodFunc func() time.Duration - -// Returns 0 for resyncPeriod in case resyncing is not needed. -func NoResyncPeriodFunc() time.Duration { - return 0 -} - -// StaticResyncPeriodFunc returns the resync period specified -func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc { - return func() time.Duration { - return resyncPeriod - } -} - -// Expectations are a way for controllers to tell the controller manager what they expect. eg: -// ControllerExpectations: { -// controller1: expects 2 adds in 2 minutes -// controller2: expects 2 dels in 2 minutes -// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met -// } -// -// Implementation: -// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion -// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller -// -// * Once set expectations can only be lowered -// * A controller isn't synced till its expectations are either fulfilled, or expire -// * Controllers that don't set expectations will get woken up for every matching controllee - -// ExpKeyFunc to parse out the key from a ControlleeExpectation -var ExpKeyFunc = func(obj interface{}) (string, error) { - if e, ok := obj.(*ControlleeExpectations); ok { - return e.key, nil - } - return "", fmt.Errorf("Could not find key for obj %#v", obj) -} - -// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations. -// Only abstracted out for testing. -// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different -// types of controllers, because the keys might conflict across types. -type ControllerExpectationsInterface interface { - GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) - SatisfiedExpectations(controllerKey string) bool - DeleteExpectations(controllerKey string) - SetExpectations(controllerKey string, add, del int) error - ExpectCreations(controllerKey string, adds int) error - ExpectDeletions(controllerKey string, dels int) error - CreationObserved(controllerKey string) - DeletionObserved(controllerKey string) - RaiseExpectations(controllerKey string, add, del int) - LowerExpectations(controllerKey string, add, del int) -} - -// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync. -type ControllerExpectations struct { - cache.Store -} - -// GetExpectations returns the ControlleeExpectations of the given controller. -func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) { - if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { - return exp.(*ControlleeExpectations), true, nil - } else { - return nil, false, err - } -} - -// DeleteExpectations deletes the expectations of the given controller from the TTLStore. -func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { - if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { - if err := r.Delete(exp); err != nil { - glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) - } - } -} - -// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed. -// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller -// manager. -func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { - if exp, exists, err := r.GetExpectations(controllerKey); exists { - if exp.Fulfilled() { - return true - } else if exp.isExpired() { - glog.V(4).Infof("Controller expectations expired %#v", exp) - return true - } else { - glog.V(4).Infof("Controller still waiting on expectations %#v", exp) - return false - } - } else if err != nil { - glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) - } else { - // When a new controller is created, it doesn't have expectations. - // When it doesn't see expected watch events for > TTL, the expectations expire. - // - In this case it wakes up, creates/deletes controllees, and sets expectations again. - // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. - // - In this case it continues without setting expectations till it needs to create/delete controllees. - glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) - } - // Trigger a sync if we either encountered and error (which shouldn't happen since we're - // getting from local store) or this controller hasn't established expectations. - return true -} - -// TODO: Extend ExpirationCache to support explicit expiration. -// TODO: Make this possible to disable in tests. -// TODO: Support injection of clock. -func (exp *ControlleeExpectations) isExpired() bool { - return util.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout -} - -// SetExpectations registers new expectations for the given controller. Forgets existing expectations. -func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { - exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: util.RealClock{}.Now()} - glog.V(4).Infof("Setting expectations %#v", exp) - return r.Add(exp) -} - -func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error { - return r.SetExpectations(controllerKey, adds, 0) -} - -func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error { - return r.SetExpectations(controllerKey, 0, dels) -} - -// Decrements the expectation counts of the given controller. -func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) { - if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { - exp.Add(int64(-add), int64(-del)) - // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Lowered expectations %+v", exp) - } -} - -// Increments the expectation counts of the given controller. -func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) { - if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { - exp.Add(int64(add), int64(del)) - // The expectations might've been modified since the update on the previous line. - glog.V(4).Infof("Raised expectations %+v", exp) - } -} - -// CreationObserved atomically decrements the `add` expecation count of the given controller. -func (r *ControllerExpectations) CreationObserved(controllerKey string) { - r.LowerExpectations(controllerKey, 1, 0) -} - -// DeletionObserved atomically decrements the `del` expectation count of the given controller. -func (r *ControllerExpectations) DeletionObserved(controllerKey string) { - r.LowerExpectations(controllerKey, 0, 1) -} - -// Expectations are either fulfilled, or expire naturally. -type Expectations interface { - Fulfilled() bool -} - -// ControlleeExpectations track controllee creates/deletes. -type ControlleeExpectations struct { - add int64 - del int64 - key string - timestamp time.Time -} - -// Add increments the add and del counters. -func (e *ControlleeExpectations) Add(add, del int64) { - atomic.AddInt64(&e.add, add) - atomic.AddInt64(&e.del, del) -} - -// Fulfilled returns true if this expectation has been fulfilled. -func (e *ControlleeExpectations) Fulfilled() bool { - // TODO: think about why this line being atomic doesn't matter - return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0 -} - -// GetExpectations returns the add and del expectations of the controllee. -func (e *ControlleeExpectations) GetExpectations() (int64, int64) { - return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del) -} - -// NewControllerExpectations returns a store for ControllerExpectations. -func NewControllerExpectations() *ControllerExpectations { - return &ControllerExpectations{cache.NewStore(ExpKeyFunc)} -} - -// UIDSetKeyFunc to parse out the key from a UIDSet. -var UIDSetKeyFunc = func(obj interface{}) (string, error) { - if u, ok := obj.(*UIDSet); ok { - return u.key, nil - } - return "", fmt.Errorf("Could not find key for obj %#v", obj) -} - -// UIDSet holds a key and a set of UIDs. Used by the -// UIDTrackingControllerExpectations to remember which UID it has seen/still -// waiting for. -type UIDSet struct { - sets.String - key string -} - -// UIDTrackingControllerExpectations tracks the UID of the pods it deletes. -// This cache is needed over plain old expectations to safely handle graceful -// deletion. The desired behavior is to treat an update that sets the -// DeletionTimestamp on an object as a delete. To do so consistenly, one needs -// to remember the expected deletes so they aren't double counted. -// TODO: Track creates as well (#22599) -type UIDTrackingControllerExpectations struct { - ControllerExpectationsInterface - // TODO: There is a much nicer way to do this that involves a single store, - // a lock per entry, and a ControlleeExpectationsInterface type. - uidStoreLock sync.Mutex - // Store used for the UIDs associated with any expectation tracked via the - // ControllerExpectationsInterface. - uidStore cache.Store -} - -// GetUIDs is a convenience method to avoid exposing the set of expected uids. -// The returned set is not thread safe, all modifications must be made holding -// the uidStoreLock. -func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String { - if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists { - return uid.(*UIDSet).String - } - return nil -} - -// ExpectDeletions records expectations for the given deleteKeys, against the given controller. -func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error { - u.uidStoreLock.Lock() - defer u.uidStoreLock.Unlock() - - if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { - glog.Errorf("Clobbering existing delete keys: %+v", existing) - } - expectedUIDs := sets.NewString() - for _, k := range deletedKeys { - expectedUIDs.Insert(k) - } - glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) - if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { - return err - } - return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len()) -} - -// DeletionObserved records the given deleteKey as a deletion, for the given rc. -func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) { - u.uidStoreLock.Lock() - defer u.uidStoreLock.Unlock() - - uids := u.GetUIDs(rcKey) - if uids != nil && uids.Has(deleteKey) { - glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) - u.ControllerExpectationsInterface.DeletionObserved(rcKey) - uids.Delete(deleteKey) - } -} - -// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the -// underlying ControllerExpectationsInterface. -func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { - u.uidStoreLock.Lock() - defer u.uidStoreLock.Unlock() - - u.ControllerExpectationsInterface.DeleteExpectations(rcKey) - if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { - if err := u.uidStore.Delete(uidExp); err != nil { - glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) - } - } -} - -// NewUIDTrackingControllerExpectations returns a wrapper around -// ControllerExpectations that is aware of deleteKeys. -func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations { - return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)} -} - -// PodControlInterface is an interface that knows how to add or delete pods -// created as an interface to allow testing. -type PodControlInterface interface { - // CreatePods creates new pods according to the spec. - CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error - // CreatePodsOnNode creates a new pod accorting to the spec on the specified node. - CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error - // DeletePod deletes the pod identified by podID. - DeletePod(namespace string, podID string, object runtime.Object) error -} - -// RealPodControl is the default implementation of PodControlInterface. -type RealPodControl struct { - KubeClient clientset.Interface - Recorder record.EventRecorder -} - -var _ PodControlInterface = &RealPodControl{} - -func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set { - desiredLabels := make(labels.Set) - for k, v := range template.Labels { - desiredLabels[k] = v - } - return desiredLabels -} - -func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) { - desiredAnnotations := make(labels.Set) - for k, v := range template.Annotations { - desiredAnnotations[k] = v - } - createdByRef, err := api.GetReference(object) - if err != nil { - return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err) - } - - // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients - // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. - // We need to consistently handle this case of annotation versioning. - codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) - - createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{ - Reference: *createdByRef, - }) - if err != nil { - return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err) - } - desiredAnnotations[CreatedByAnnotation] = string(createdByRefJson) - return desiredAnnotations, nil -} - -func getPodsPrefix(controllerName string) string { - // use the dash (if the name isn't too long) to make the pod name a bit prettier - prefix := fmt.Sprintf("%s-", controllerName) - if len(validation.ValidatePodName(prefix, true)) != 0 { - prefix = controllerName - } - return prefix -} - -func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - return r.createPods("", namespace, template, object) -} - -func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - return r.createPods(nodeName, namespace, template, object) -} - -func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object) (*api.Pod, error) { - desiredLabels := getPodsLabelSet(template) - desiredAnnotations, err := getPodsAnnotationSet(template, parentObject) - if err != nil { - return nil, err - } - accessor, err := meta.Accessor(parentObject) - if err != nil { - return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) - } - prefix := getPodsPrefix(accessor.GetName()) - - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ - Labels: desiredLabels, - Annotations: desiredAnnotations, - GenerateName: prefix, - }, - } - if err := api.Scheme.Convert(&template.Spec, &pod.Spec); err != nil { - return nil, fmt.Errorf("unable to convert pod template: %v", err) - } - return pod, nil -} - -func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - pod, err := GetPodFromTemplate(template, object) - if err != nil { - return err - } - if len(nodeName) != 0 { - pod.Spec.NodeName = nodeName - } - if labels.Set(pod.Labels).AsSelector().Empty() { - return fmt.Errorf("unable to create pods, no labels") - } - if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil { - r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err) - return fmt.Errorf("unable to create pods: %v", err) - } else { - accessor, err := meta.Accessor(object) - if err != nil { - glog.Errorf("parentObject does not have ObjectMeta, %v", err) - return nil - } - glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) - r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name) - } - return nil -} - -func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error { - accessor, err := meta.Accessor(object) - if err != nil { - return fmt.Errorf("object does not have ObjectMeta, %v", err) - } - if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil { - r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err) - return fmt.Errorf("unable to delete pods: %v", err) - } else { - glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID) - r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulDelete", "Deleted pod: %v", podID) - } - return nil -} - -type FakePodControl struct { - sync.Mutex - Templates []api.PodTemplateSpec - DeletePodName []string - Err error -} - -var _ PodControlInterface = &FakePodControl{} - -func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error { - f.Lock() - defer f.Unlock() - if f.Err != nil { - return f.Err - } - f.Templates = append(f.Templates, *spec) - return nil -} - -func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { - f.Lock() - defer f.Unlock() - if f.Err != nil { - return f.Err - } - f.Templates = append(f.Templates, *template) - return nil -} - -func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error { - f.Lock() - defer f.Unlock() - if f.Err != nil { - return f.Err - } - f.DeletePodName = append(f.DeletePodName, podID) - return nil -} - -func (f *FakePodControl) Clear() { - f.Lock() - defer f.Unlock() - f.DeletePodName = []string{} - f.Templates = []api.PodTemplateSpec{} -} - -// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. -type ActivePods []*api.Pod - -func (s ActivePods) Len() int { return len(s) } -func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s ActivePods) Less(i, j int) bool { - // 1. Unassigned < assigned - // If only one of the pods is unassigned, the unassigned one is smaller - if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { - return len(s[i].Spec.NodeName) == 0 - } - // 2. PodPending < PodUnknown < PodRunning - m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2} - if m[s[i].Status.Phase] != m[s[j].Status.Phase] { - return m[s[i].Status.Phase] < m[s[j].Status.Phase] - } - // 3. Not ready < ready - // If only one of the pods is not ready, the not ready one is smaller - if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { - return !api.IsPodReady(s[i]) - } - // TODO: take availability into account when we push minReadySeconds information from deployment into pods, - // see https://github.com/kubernetes/kubernetes/issues/22065 - // 4. Been ready for empty time < less time < more time - // If both pods are ready, the latest ready one is smaller - if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { - return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) - } - // 5. Pods with containers with higher restart counts < lower restart counts - if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { - return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) - } - // 6. Empty creation time pods < newer pods < older pods - if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) { - return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp) - } - return false -} - -// afterOrZero checks if time t1 is after time t2; if one of them -// is zero, the zero time is seen as after non-zero time. -func afterOrZero(t1, t2 unversioned.Time) bool { - if t1.Time.IsZero() || t2.Time.IsZero() { - return t1.Time.IsZero() - } - return t1.After(t2.Time) -} - -func podReadyTime(pod *api.Pod) unversioned.Time { - if api.IsPodReady(pod) { - for _, c := range pod.Status.Conditions { - // we only care about pod ready conditions - if c.Type == api.PodReady && c.Status == api.ConditionTrue { - return c.LastTransitionTime - } - } - } - return unversioned.Time{} -} - -func maxContainerRestarts(pod *api.Pod) int { - maxRestarts := 0 - for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) - } - return maxRestarts -} - -// FilterActivePods returns pods that have not terminated. -func FilterActivePods(pods []api.Pod) []*api.Pod { - var result []*api.Pod - for i := range pods { - p := pods[i] - if IsPodActive(p) { - result = append(result, &p) - } else { - glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", - p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) - } - } - return result -} - -func IsPodActive(p api.Pod) bool { - return api.PodSucceeded != p.Status.Phase && - api.PodFailed != p.Status.Phase && - p.DeletionTimestamp == nil -} - -// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods. -func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet { - active := []*extensions.ReplicaSet{} - for i := range replicaSets { - if replicaSets[i].Spec.Replicas > 0 { - active = append(active, replicaSets[i]) - } - } - return active -} - -// PodKey returns a key unique to the given pod within a cluster. -// It's used so we consistently use the same key scheme in this module. -// It does exactly what cache.MetaNamespaceKeyFunc would have done -// expcept there's not possibility for error since we know the exact type. -func PodKey(pod *api.Pod) string { - return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name) -} - -// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker. -type ControllersByCreationTimestamp []*api.ReplicationController - -func (o ControllersByCreationTimestamp) Len() int { return len(o) } -func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } - -func (o ControllersByCreationTimestamp) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) -} - -// ReplicaSetsByCreationTimestamp sorts a list of ReplicationSets by creation timestamp, using their names as a tie breaker. -type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet - -func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } -func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } - -func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { - if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) { - return o[i].Name < o[j].Name - } - return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/doc.go deleted file mode 100644 index 1e310b466..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package controller contains code for controllers (like the replication -// controller). -package controller diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go deleted file mode 100644 index c6363952b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go +++ /dev/null @@ -1,326 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "sync" - "time" - - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" -) - -// Config contains all the settings for a Controller. -type Config struct { - // The queue for your objects; either a cache.FIFO or - // a cache.DeltaFIFO. Your Process() function should accept - // the output of this Oueue's Pop() method. - cache.Queue - - // Something that can list and watch your objects. - cache.ListerWatcher - - // Something that can process your objects. - Process ProcessFunc - - // The type of your objects. - ObjectType runtime.Object - - // Reprocess everything at least this often. - // Note that if it takes longer for you to clear the queue than this - // period, you will end up processing items in the order determined - // by cache.FIFO.Replace(). Currently, this is random. If this is a - // problem, we can change that replacement policy to append new - // things to the end of the queue instead of replacing the entire - // queue. - FullResyncPeriod time.Duration - - // If true, when Process() returns an error, re-enqueue the object. - // TODO: add interface to let you inject a delay/backoff or drop - // the object completely if desired. Pass the object in - // question to this interface as a parameter. - RetryOnError bool -} - -// ProcessFunc processes a single object. -type ProcessFunc func(obj interface{}) error - -// Controller is a generic controller framework. -type Controller struct { - config Config - reflector *cache.Reflector - reflectorMutex sync.RWMutex -} - -// TODO make the "Controller" private, and convert all references to use ControllerInterface instead -type ControllerInterface interface { - Run(stopCh <-chan struct{}) - HasSynced() bool -} - -// New makes a new Controller from the given Config. -func New(c *Config) *Controller { - ctlr := &Controller{ - config: *c, - } - return ctlr -} - -// Run begins processing items, and will continue until a value is sent down stopCh. -// It's an error to call Run more than once. -// Run blocks; call via go. -func (c *Controller) Run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - r := cache.NewReflector( - c.config.ListerWatcher, - c.config.ObjectType, - c.config.Queue, - c.config.FullResyncPeriod, - ) - - c.reflectorMutex.Lock() - c.reflector = r - c.reflectorMutex.Unlock() - - r.RunUntil(stopCh) - - wait.Until(c.processLoop, time.Second, stopCh) -} - -// Returns true once this controller has completed an initial resource listing -func (c *Controller) HasSynced() bool { - return c.config.Queue.HasSynced() -} - -// Requeue adds the provided object back into the queue if it does not already exist. -func (c *Controller) Requeue(obj interface{}) error { - return c.config.Queue.AddIfNotPresent(cache.Deltas{ - cache.Delta{ - Type: cache.Sync, - Object: obj, - }, - }) -} - -// processLoop drains the work queue. -// TODO: Consider doing the processing in parallel. This will require a little thought -// to make sure that we don't end up processing the same object multiple times -// concurrently. -func (c *Controller) processLoop() { - for { - obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process)) - if err != nil { - if c.config.RetryOnError { - // This is the safe way to re-enqueue. - c.config.Queue.AddIfNotPresent(obj) - } - } - } -} - -// ResourceEventHandler can handle notifications for events that happen to a -// resource. The events are informational only, so you can't return an -// error. -// * OnAdd is called when an object is added. -// * OnUpdate is called when an object is modified. Note that oldObj is the -// last known state of the object-- it is possible that several changes -// were combined together, so you can't use this to see every single -// change. OnUpdate is also called when a re-list happens, and it will -// get called even if nothing changed. This is useful for periodically -// evaluating or syncing something. -// * OnDelete will get the final state of the item if it is known, otherwise -// it will get an object of type cache.DeletedFinalStateUnknown. This can -// happen if the watch is closed and misses the delete event and we don't -// notice the deletion until the subsequent re-list. -type ResourceEventHandler interface { - OnAdd(obj interface{}) - OnUpdate(oldObj, newObj interface{}) - OnDelete(obj interface{}) -} - -// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or -// as few of the notification functions as you want while still implementing -// ResourceEventHandler. -type ResourceEventHandlerFuncs struct { - AddFunc func(obj interface{}) - UpdateFunc func(oldObj, newObj interface{}) - DeleteFunc func(obj interface{}) -} - -// OnAdd calls AddFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { - if r.AddFunc != nil { - r.AddFunc(obj) - } -} - -// OnUpdate calls UpdateFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { - if r.UpdateFunc != nil { - r.UpdateFunc(oldObj, newObj) - } -} - -// OnDelete calls DeleteFunc if it's not nil. -func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { - if r.DeleteFunc != nil { - r.DeleteFunc(obj) - } -} - -// DeletionHandlingMetaNamespaceKeyFunc checks for -// cache.DeletedFinalStateUnknown objects before calling -// cache.MetaNamespaceKeyFunc. -func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { - if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { - return d.Key, nil - } - return cache.MetaNamespaceKeyFunc(obj) -} - -// NewInformer returns a cache.Store and a controller for populating the store -// while also providing event notifications. You should only used the returned -// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event -// notifications to be faulty. -// -// Parameters: -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// -func NewInformer( - lw cache.ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, -) (cache.Store, *Controller) { - // This will hold the client state, as we know it. - clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc) - - // This will hold incoming changes. Note how we pass clientState in as a - // KeyLister, that way resync operations will result in the correct set - // of update/delete deltas. - fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, - RetryOnError: false, - - Process: func(obj interface{}) error { - // from oldest to newest - for _, d := range obj.(cache.Deltas) { - switch d.Type { - case cache.Sync, cache.Added, cache.Updated: - if old, exists, err := clientState.Get(d.Object); err == nil && exists { - if err := clientState.Update(d.Object); err != nil { - return err - } - h.OnUpdate(old, d.Object) - } else { - if err := clientState.Add(d.Object); err != nil { - return err - } - h.OnAdd(d.Object) - } - case cache.Deleted: - if err := clientState.Delete(d.Object); err != nil { - return err - } - h.OnDelete(d.Object) - } - } - return nil - }, - } - return clientState, New(cfg) -} - -// NewIndexerInformer returns a cache.Indexer and a controller for populating the index -// while also providing event notifications. You should only used the returned -// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event -// notifications to be faulty. -// -// Parameters: -// * lw is list and watch functions for the source of the resource you want to -// be informed of. -// * objType is an object of the type that you expect to receive. -// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate -// calls, even if nothing changed). Otherwise, re-list will be delayed as -// long as possible (until the upstream source closes the watch or times out, -// or you stop the controller). -// * h is the object you want notifications sent to. -// -func NewIndexerInformer( - lw cache.ListerWatcher, - objType runtime.Object, - resyncPeriod time.Duration, - h ResourceEventHandler, - indexers cache.Indexers, -) (cache.Indexer, *Controller) { - // This will hold the client state, as we know it. - clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) - - // This will hold incoming changes. Note how we pass clientState in as a - // KeyLister, that way resync operations will result in the correct set - // of update/delete deltas. - fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: lw, - ObjectType: objType, - FullResyncPeriod: resyncPeriod, - RetryOnError: false, - - Process: func(obj interface{}) error { - // from oldest to newest - for _, d := range obj.(cache.Deltas) { - switch d.Type { - case cache.Sync, cache.Added, cache.Updated: - if old, exists, err := clientState.Get(d.Object); err == nil && exists { - if err := clientState.Update(d.Object); err != nil { - return err - } - h.OnUpdate(old, d.Object) - } else { - if err := clientState.Add(d.Object); err != nil { - return err - } - h.OnAdd(d.Object) - } - case cache.Deleted: - if err := clientState.Delete(d.Object); err != nil { - return err - } - h.OnDelete(d.Object) - } - } - return nil - }, - } - return clientState, New(cfg) -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go deleted file mode 100644 index ecd3cf28a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package framework implements all the grunt work involved in running a simple controller. -package framework diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go deleted file mode 100644 index 9e90e7c91..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "errors" - "math/rand" - "strconv" - "sync" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/watch" -) - -func NewFakeControllerSource() *FakeControllerSource { - return &FakeControllerSource{ - Items: map[nnu]runtime.Object{}, - Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull), - } -} - -func NewFakePVControllerSource() *FakePVControllerSource { - return &FakePVControllerSource{ - FakeControllerSource{ - Items: map[nnu]runtime.Object{}, - Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull), - }} -} - -func NewFakePVCControllerSource() *FakePVCControllerSource { - return &FakePVCControllerSource{ - FakeControllerSource{ - Items: map[nnu]runtime.Object{}, - Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull), - }} -} - -// FakeControllerSource implements listing/watching for testing. -type FakeControllerSource struct { - lock sync.RWMutex - Items map[nnu]runtime.Object - changes []watch.Event // one change per resourceVersion - Broadcaster *watch.Broadcaster -} - -type FakePVControllerSource struct { - FakeControllerSource -} - -type FakePVCControllerSource struct { - FakeControllerSource -} - -// namespace, name, uid to be used as a key. -type nnu struct { - namespace, name string - uid types.UID -} - -// Add adds an object to the set and sends an add event to watchers. -// obj's ResourceVersion is set. -func (f *FakeControllerSource) Add(obj runtime.Object) { - f.Change(watch.Event{Type: watch.Added, Object: obj}, 1) -} - -// Modify updates an object in the set and sends a modified event to watchers. -// obj's ResourceVersion is set. -func (f *FakeControllerSource) Modify(obj runtime.Object) { - f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1) -} - -// Delete deletes an object from the set and sends a delete event to watchers. -// obj's ResourceVersion is set. -func (f *FakeControllerSource) Delete(lastValue runtime.Object) { - f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1) -} - -// AddDropWatch adds an object to the set but forgets to send an add event to -// watchers. -// obj's ResourceVersion is set. -func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) { - f.Change(watch.Event{Type: watch.Added, Object: obj}, 0) -} - -// ModifyDropWatch updates an object in the set but forgets to send a modify -// event to watchers. -// obj's ResourceVersion is set. -func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) { - f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0) -} - -// DeleteDropWatch deletes an object from the set but forgets to send a delete -// event to watchers. -// obj's ResourceVersion is set. -func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) { - f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0) -} - -func (f *FakeControllerSource) key(accessor meta.Object) nnu { - return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()} -} - -// Change records the given event (setting the object's resource version) and -// sends a watch event with the specified probability. -func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) { - f.lock.Lock() - defer f.lock.Unlock() - - accessor, err := meta.Accessor(e.Object) - if err != nil { - panic(err) // this is test code only - } - - resourceVersion := len(f.changes) + 1 - accessor.SetResourceVersion(strconv.Itoa(resourceVersion)) - f.changes = append(f.changes, e) - key := f.key(accessor) - switch e.Type { - case watch.Added, watch.Modified: - f.Items[key] = e.Object - case watch.Deleted: - delete(f.Items, key) - } - - if rand.Float64() < watchProbability { - f.Broadcaster.Action(e.Type, e.Object) - } -} - -func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) { - list := make([]runtime.Object, 0, len(f.Items)) - for _, obj := range f.Items { - // Must make a copy to allow clients to modify the object. - // Otherwise, if they make a change and write it back, they - // will inadvertently change our canonical copy (in - // addition to racing with other clients). - objCopy, err := api.Scheme.DeepCopy(obj) - if err != nil { - return nil, err - } - list = append(list, objCopy.(runtime.Object)) - } - return list, nil -} - -// List returns a list object, with its resource version set. -func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) { - f.lock.RLock() - defer f.lock.RUnlock() - list, err := f.getListItemsLocked() - if err != nil { - return nil, err - } - listObj := &api.List{} - if err := meta.SetList(listObj, list); err != nil { - return nil, err - } - objMeta, err := api.ListMetaFor(listObj) - if err != nil { - return nil, err - } - resourceVersion := len(f.changes) - objMeta.ResourceVersion = strconv.Itoa(resourceVersion) - return listObj, nil -} - -// List returns a list object, with its resource version set. -func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) { - f.lock.RLock() - defer f.lock.RUnlock() - list, err := f.FakeControllerSource.getListItemsLocked() - if err != nil { - return nil, err - } - listObj := &api.PersistentVolumeList{} - if err := meta.SetList(listObj, list); err != nil { - return nil, err - } - objMeta, err := api.ListMetaFor(listObj) - if err != nil { - return nil, err - } - resourceVersion := len(f.changes) - objMeta.ResourceVersion = strconv.Itoa(resourceVersion) - return listObj, nil -} - -// List returns a list object, with its resource version set. -func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) { - f.lock.RLock() - defer f.lock.RUnlock() - list, err := f.FakeControllerSource.getListItemsLocked() - if err != nil { - return nil, err - } - listObj := &api.PersistentVolumeClaimList{} - if err := meta.SetList(listObj, list); err != nil { - return nil, err - } - objMeta, err := api.ListMetaFor(listObj) - if err != nil { - return nil, err - } - resourceVersion := len(f.changes) - objMeta.ResourceVersion = strconv.Itoa(resourceVersion) - return listObj, nil -} - -// Watch returns a watch, which will be pre-populated with all changes -// after resourceVersion. -func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) { - f.lock.RLock() - defer f.lock.RUnlock() - rc, err := strconv.Atoi(options.ResourceVersion) - if err != nil { - return nil, err - } - if rc < len(f.changes) { - changes := []watch.Event{} - for _, c := range f.changes[rc:] { - // Must make a copy to allow clients to modify the - // object. Otherwise, if they make a change and write - // it back, they will inadvertently change the our - // canonical copy (in addition to racing with other - // clients). - objCopy, err := api.Scheme.DeepCopy(c.Object) - if err != nil { - return nil, err - } - changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)}) - } - return f.Broadcaster.WatchWithPrefix(changes), nil - } else if rc > len(f.changes) { - return nil, errors.New("resource version in the future not supported by this fake") - } - return f.Broadcaster.Watch(), nil -} - -// Shutdown closes the underlying broadcaster, waiting for events to be -// delivered. It's an error to call any method after calling shutdown. This is -// enforced by Shutdown() leaving f locked. -func (f *FakeControllerSource) Shutdown() { - f.lock.Lock() // Purposely no unlock. - f.Broadcaster.Shutdown() -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go deleted file mode 100644 index c557bf975..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go +++ /dev/null @@ -1,342 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "fmt" - "sync" - "time" - - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" -) - -// if you use this, there is one behavior change compared to a standard Informer. -// When you receive a notification, the cache will be AT LEAST as fresh as the -// notification, but it MAY be more fresh. You should NOT depend on the contents -// of the cache exactly matching the notification you've received in handler -// functions. If there was a create, followed by a delete, the cache may NOT -// have your item. This has advantages over the broadcaster since it allows us -// to share a common cache across many controllers. Extending the broadcaster -// would have required us keep duplicate caches for each watch. -type SharedInformer interface { - // events to a single handler are delivered sequentially, but there is no coordination between different handlers - // You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned. - // TODO we should try to remove this restriction eventually. - AddEventHandler(handler ResourceEventHandler) error - GetStore() cache.Store - // GetController gives back a synthetic interface that "votes" to start the informer - GetController() ControllerInterface - Run(stopCh <-chan struct{}) - HasSynced() bool -} - -type SharedIndexInformer interface { - SharedInformer - // AddIndexers add indexers to the informer before it starts. - AddIndexers(indexers cache.Indexers) error - GetIndexer() cache.Indexer -} - -// NewSharedInformer creates a new instance for the listwatcher. -// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can -// be shared amongst all consumers. -func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer { - return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{}) -} - -// NewSharedIndexInformer creates a new instance for the listwatcher. -// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can -// be shared amongst all consumers. -func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer { - sharedIndexInformer := &sharedIndexInformer{ - processor: &sharedProcessor{}, - indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), - listerWatcher: lw, - objectType: objType, - fullResyncPeriod: resyncPeriod, - } - return sharedIndexInformer -} - -type sharedIndexInformer struct { - indexer cache.Indexer - controller *Controller - - processor *sharedProcessor - - // This block is tracked to handle late initialization of the controller - listerWatcher cache.ListerWatcher - objectType runtime.Object - fullResyncPeriod time.Duration - - started bool - startedLock sync.Mutex -} - -// dummyController hides the fact that a SharedInformer is different from a dedicated one -// where a caller can `Run`. The run method is disonnected in this case, because higher -// level logic will decide when to start the SharedInformer and related controller. -// Because returning information back is always asynchronous, the legacy callers shouldn't -// notice any change in behavior. -type dummyController struct { - informer *sharedIndexInformer -} - -func (v *dummyController) Run(stopCh <-chan struct{}) { -} - -func (v *dummyController) HasSynced() bool { - return v.informer.HasSynced() -} - -type updateNotification struct { - oldObj interface{} - newObj interface{} -} - -type addNotification struct { - newObj interface{} -} - -type deleteNotification struct { - oldObj interface{} -} - -func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer) - - cfg := &Config{ - Queue: fifo, - ListerWatcher: s.listerWatcher, - ObjectType: s.objectType, - FullResyncPeriod: s.fullResyncPeriod, - RetryOnError: false, - - Process: s.HandleDeltas, - } - - func() { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - s.controller = New(cfg) - s.started = true - }() - - s.processor.run(stopCh) - s.controller.Run(stopCh) -} - -func (s *sharedIndexInformer) isStarted() bool { - s.startedLock.Lock() - defer s.startedLock.Unlock() - return s.started -} - -func (s *sharedIndexInformer) HasSynced() bool { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if s.controller == nil { - return false - } - return s.controller.HasSynced() -} - -func (s *sharedIndexInformer) GetStore() cache.Store { - return s.indexer -} - -func (s *sharedIndexInformer) GetIndexer() cache.Indexer { - return s.indexer -} - -func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if s.started { - return fmt.Errorf("informer has already started") - } - - return s.indexer.AddIndexers(indexers) -} - -func (s *sharedIndexInformer) GetController() ControllerInterface { - return &dummyController{informer: s} -} - -func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error { - s.startedLock.Lock() - defer s.startedLock.Unlock() - - if s.started { - return fmt.Errorf("informer has already started") - } - - listener := newProcessListener(handler) - s.processor.listeners = append(s.processor.listeners, listener) - return nil -} - -func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { - // from oldest to newest - for _, d := range obj.(cache.Deltas) { - switch d.Type { - case cache.Sync, cache.Added, cache.Updated: - if old, exists, err := s.indexer.Get(d.Object); err == nil && exists { - if err := s.indexer.Update(d.Object); err != nil { - return err - } - s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}) - } else { - if err := s.indexer.Add(d.Object); err != nil { - return err - } - s.processor.distribute(addNotification{newObj: d.Object}) - } - case cache.Deleted: - if err := s.indexer.Delete(d.Object); err != nil { - return err - } - s.processor.distribute(deleteNotification{oldObj: d.Object}) - } - } - return nil -} - -type sharedProcessor struct { - listeners []*processorListener -} - -func (p *sharedProcessor) distribute(obj interface{}) { - for _, listener := range p.listeners { - listener.add(obj) - } -} - -func (p *sharedProcessor) run(stopCh <-chan struct{}) { - for _, listener := range p.listeners { - go listener.run(stopCh) - go listener.pop(stopCh) - } -} - -type processorListener struct { - // lock/cond protects access to 'pendingNotifications'. - lock sync.RWMutex - cond sync.Cond - - // pendingNotifications is an unbounded slice that holds all notifications not yet distributed - // there is one per listener, but a failing/stalled listener will have infinite pendingNotifications - // added until we OOM. - // TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but - // we should try to do something better - pendingNotifications []interface{} - - nextCh chan interface{} - - handler ResourceEventHandler -} - -func newProcessListener(handler ResourceEventHandler) *processorListener { - ret := &processorListener{ - pendingNotifications: []interface{}{}, - nextCh: make(chan interface{}), - handler: handler, - } - - ret.cond.L = &ret.lock - return ret -} - -func (p *processorListener) add(notification interface{}) { - p.lock.Lock() - defer p.lock.Unlock() - - p.pendingNotifications = append(p.pendingNotifications, notification) - p.cond.Broadcast() -} - -func (p *processorListener) pop(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - for { - blockingGet := func() (interface{}, bool) { - p.lock.Lock() - defer p.lock.Unlock() - - for len(p.pendingNotifications) == 0 { - // check if we're shutdown - select { - case <-stopCh: - return nil, true - default: - } - p.cond.Wait() - } - - nt := p.pendingNotifications[0] - p.pendingNotifications = p.pendingNotifications[1:] - return nt, false - } - - notification, stopped := blockingGet() - if stopped { - return - } - - select { - case <-stopCh: - return - case p.nextCh <- notification: - } - } -} - -func (p *processorListener) run(stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - for { - var next interface{} - select { - case <-stopCh: - func() { - p.lock.Lock() - defer p.lock.Unlock() - p.cond.Broadcast() - }() - return - case next = <-p.nextCh: - } - - switch notification := next.(type) { - case updateNotification: - p.handler.OnUpdate(notification.oldObj, notification.newObj) - case addNotification: - p.handler.OnAdd(notification.newObj) - case deleteNotification: - p.handler.OnDelete(notification.oldObj) - default: - utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next)) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go b/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go deleted file mode 100644 index 0333eff38..000000000 --- a/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "hash/adler32" - "sync" - - "github.com/golang/groupcache/lru" - "k8s.io/kubernetes/pkg/api/meta" - hashutil "k8s.io/kubernetes/pkg/util/hash" -) - -type objectWithMeta interface { - meta.Object -} - -// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object. -// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels, -// they will have the same key. -func keyFunc(obj objectWithMeta) uint64 { - hash := adler32.New() - hashutil.DeepHashObject(hash, &equivalenceLabelObj{ - namespace: obj.GetNamespace(), - labels: obj.GetLabels(), - }) - return uint64(hash.Sum32()) -} - -type equivalenceLabelObj struct { - namespace string - labels map[string]string -} - -// MatchingCache save label and selector matching relationship -type MatchingCache struct { - mutex sync.RWMutex - cache *lru.Cache -} - -// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship. -func NewMatchingCache(maxCacheEntries int) *MatchingCache { - return &MatchingCache{ - cache: lru.New(maxCacheEntries), - } -} - -// Add will add matching information to the cache. -func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) { - key := keyFunc(labelObj) - c.mutex.Lock() - defer c.mutex.Unlock() - c.cache.Add(key, selectorObj) -} - -// GetMatchingObject lookup the matching object for a given object. -// Note: the cache information may be invalid since the controller may be deleted or updated, -// we need check in the external request to ensure the cache data is not dirty. -func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) { - key := keyFunc(labelObj) - // NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state( - // it need update the least recently usage information). So we can not call it concurrently. - c.mutex.Lock() - defer c.mutex.Unlock() - return c.cache.Get(key) -} - -// Update update the cached matching information. -func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) { - c.Add(labelObj, selectorObj) -} - -// InvalidateAll invalidate the whole cache. -func (c *MatchingCache) InvalidateAll() { - c.mutex.Lock() - defer c.mutex.Unlock() - c.cache = lru.New(c.cache.MaxEntries) -} diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS b/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS deleted file mode 100644 index a046efc0c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - derekwaynecarr - - lavalamp - - smarterclayton - - wojtek-t diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/doc.go b/vendor/k8s.io/kubernetes/pkg/conversion/doc.go deleted file mode 100644 index 3ef2eaba4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package conversion provides go object versioning. -// -// Specifically, conversion provides a way for you to define multiple versions -// of the same object. You may write functions which implement conversion logic, -// but for the fields which did not change, copying is automated. This makes it -// easy to modify the structures you use in memory without affecting the format -// you store on disk or respond to in your external API calls. -package conversion diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/helper.go b/vendor/k8s.io/kubernetes/pkg/conversion/helper.go deleted file mode 100644 index 39f782659..000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/helper.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conversion - -import ( - "fmt" - "reflect" -) - -// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value -// of the dereferenced pointer, ensuring that it is settable/addressable. -// Returns an error if this is not possible. -func EnforcePtr(obj interface{}) (reflect.Value, error) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - if v.Kind() == reflect.Invalid { - return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") - } - return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type()) - } - if v.IsNil() { - return reflect.Value{}, fmt.Errorf("expected pointer, but got nil") - } - return v.Elem(), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go deleted file mode 100644 index 63c545697..000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/convert.go +++ /dev/null @@ -1,188 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package queryparams - -import ( - "fmt" - "net/url" - "reflect" - "strings" -) - -// Marshaler converts an object to a query parameter string representation -type Marshaler interface { - MarshalQueryParameter() (string, error) -} - -// Unmarshaler converts a string representation to an object -type Unmarshaler interface { - UnmarshalQueryParameter(string) error -} - -func jsonTag(field reflect.StructField) (string, bool) { - structTag := field.Tag.Get("json") - if len(structTag) == 0 { - return "", false - } - parts := strings.Split(structTag, ",") - tag := parts[0] - if tag == "-" { - tag = "" - } - omitempty := false - parts = parts[1:] - for _, part := range parts { - if part == "omitempty" { - omitempty = true - break - } - } - return tag, omitempty -} - -func formatValue(value interface{}) string { - return fmt.Sprintf("%v", value) -} - -func isPointerKind(kind reflect.Kind) bool { - return kind == reflect.Ptr -} - -func isStructKind(kind reflect.Kind) bool { - return kind == reflect.Struct -} - -func isValueKind(kind reflect.Kind) bool { - switch kind { - case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, - reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, - reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, - reflect.Float64, reflect.Complex64, reflect.Complex128: - return true - default: - return false - } -} - -func zeroValue(value reflect.Value) bool { - return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface()) -} - -func customMarshalValue(value reflect.Value) (reflect.Value, bool) { - // Return unless we implement a custom query marshaler - if !value.CanInterface() { - return reflect.Value{}, false - } - - marshaler, ok := value.Interface().(Marshaler) - if !ok { - return reflect.Value{}, false - } - - // Don't invoke functions on nil pointers - // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response - if isPointerKind(value.Kind()) && zeroValue(value) { - return reflect.ValueOf(""), true - } - - // Get the custom marshalled value - v, err := marshaler.MarshalQueryParameter() - if err != nil { - return reflect.Value{}, false - } - return reflect.ValueOf(v), true -} - -func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) { - if omitempty && zeroValue(value) { - return - } - val := "" - iValue := fmt.Sprintf("%v", value.Interface()) - - if iValue != "<nil>" { - val = iValue - } - values.Add(tag, val) -} - -func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) { - for i := 0; i < list.Len(); i++ { - addParam(values, tag, omitempty, list.Index(i)) - } -} - -// Convert takes an object and converts it to a url.Values object using JSON tags as -// parameter names. Only top-level simple values, arrays, and slices are serialized. -// Embedded structs, maps, etc. will not be serialized. -func Convert(obj interface{}) (url.Values, error) { - result := url.Values{} - if obj == nil { - return result, nil - } - var sv reflect.Value - switch reflect.TypeOf(obj).Kind() { - case reflect.Ptr, reflect.Interface: - sv = reflect.ValueOf(obj).Elem() - default: - return nil, fmt.Errorf("expecting a pointer or interface") - } - st := sv.Type() - if !isStructKind(st.Kind()) { - return nil, fmt.Errorf("expecting a pointer to a struct") - } - - // Check all object fields - convertStruct(result, st, sv) - - return result, nil -} - -func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { - for i := 0; i < st.NumField(); i++ { - field := sv.Field(i) - tag, omitempty := jsonTag(st.Field(i)) - if len(tag) == 0 { - continue - } - ft := field.Type() - - kind := ft.Kind() - if isPointerKind(kind) { - ft = ft.Elem() - kind = ft.Kind() - if !field.IsNil() { - field = reflect.Indirect(field) - } - } - - switch { - case isValueKind(kind): - addParam(result, tag, omitempty, field) - case kind == reflect.Array || kind == reflect.Slice: - if isValueKind(ft.Elem().Kind()) { - addListOfParams(result, tag, omitempty, field) - } - case isStructKind(kind) && !(zeroValue(field) && omitempty): - if marshalValue, ok := customMarshalValue(field); ok { - addParam(result, tag, omitempty, marshalValue) - } else { - convertStruct(result, ft, field) - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go deleted file mode 100644 index 0e9127a18..000000000 --- a/vendor/k8s.io/kubernetes/pkg/conversion/queryparams/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package queryparams provides conversion from versioned -// runtime objects to URL query values -package queryparams diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS b/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS deleted file mode 100644 index 766c481bd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -assignees: - - erictune - - liggitt diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go deleted file mode 100644 index b80fa5945..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/config.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/golang/glog" -) - -// DockerConfigJson represents ~/.docker/config.json file info -// see https://github.com/docker/docker/pull/12009 -type DockerConfigJson struct { - Auths DockerConfig `json:"auths"` - HttpHeaders map[string]string `json:"HttpHeaders,omitempty"` -} - -// DockerConfig represents the config file used by the docker CLI. -// This config that represents the credentials that should be used -// when pulling images from specific image repositories. -type DockerConfig map[string]DockerConfigEntry - -type DockerConfigEntry struct { - Username string - Password string - Email string - Provider DockerConfigProvider -} - -var ( - preferredPathLock sync.Mutex - preferredPath = "" - workingDirPath = "" - homeDirPath = os.Getenv("HOME") - rootDirPath = "/" - homeJsonDirPath = filepath.Join(homeDirPath, ".docker") - rootJsonDirPath = filepath.Join(rootDirPath, ".docker") - - configFileName = ".dockercfg" - configJsonFileName = "config.json" -) - -func SetPreferredDockercfgPath(path string) { - preferredPathLock.Lock() - defer preferredPathLock.Unlock() - preferredPath = path -} - -func GetPreferredDockercfgPath() string { - preferredPathLock.Lock() - defer preferredPathLock.Unlock() - return preferredPath -} - -func ReadDockerConfigFile() (cfg DockerConfig, err error) { - // Try happy path first - latest config file - dockerConfigJsonLocations := []string{GetPreferredDockercfgPath(), workingDirPath, homeJsonDirPath, rootJsonDirPath} - for _, configPath := range dockerConfigJsonLocations { - absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configJsonFileName)) - if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) - continue - } - glog.V(4).Infof("looking for .docker/config.json at %s", absDockerConfigFileLocation) - contents, err := ioutil.ReadFile(absDockerConfigFileLocation) - if os.IsNotExist(err) { - continue - } - if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) - continue - } - cfg, err := readDockerConfigJsonFileFromBytes(contents) - if err == nil { - glog.V(4).Infof("found .docker/config.json at %s", absDockerConfigFileLocation) - return cfg, nil - } - } - glog.V(4).Infof("couldn't find valid .docker/config.json after checking in %v", dockerConfigJsonLocations) - - // Can't find latest config file so check for the old one - dockerConfigFileLocations := []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath} - for _, configPath := range dockerConfigFileLocations { - absDockerConfigFileLocation, err := filepath.Abs(filepath.Join(configPath, configFileName)) - if err != nil { - glog.Errorf("while trying to canonicalize %s: %v", configPath, err) - continue - } - glog.V(4).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation) - contents, err := ioutil.ReadFile(absDockerConfigFileLocation) - if os.IsNotExist(err) { - continue - } - if err != nil { - glog.V(4).Infof("while trying to read %s: %v", absDockerConfigFileLocation, err) - continue - } - cfg, err := readDockerConfigFileFromBytes(contents) - if err == nil { - glog.V(4).Infof("found .dockercfg at %s", absDockerConfigFileLocation) - return cfg, nil - } - } - return nil, fmt.Errorf("couldn't find valid .dockercfg after checking in %v", dockerConfigFileLocations) -} - -// HttpError wraps a non-StatusOK error code as an error. -type HttpError struct { - StatusCode int - Url string -} - -// Error implements error -func (he *HttpError) Error() string { - return fmt.Sprintf("http status code: %d while fetching url %s", - he.StatusCode, he.Url) -} - -func ReadUrl(url string, client *http.Client, header *http.Header) (body []byte, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - if header != nil { - req.Header = *header - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - glog.V(2).Infof("body of failing http response: %v", resp.Body) - return nil, &HttpError{ - StatusCode: resp.StatusCode, - Url: url, - } - } - - contents, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return contents, nil -} - -func ReadDockerConfigFileFromUrl(url string, client *http.Client, header *http.Header) (cfg DockerConfig, err error) { - if contents, err := ReadUrl(url, client, header); err != nil { - return nil, err - } else { - return readDockerConfigFileFromBytes(contents) - } -} - -func readDockerConfigFileFromBytes(contents []byte) (cfg DockerConfig, err error) { - if err = json.Unmarshal(contents, &cfg); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) - return nil, err - } - return -} - -func readDockerConfigJsonFileFromBytes(contents []byte) (cfg DockerConfig, err error) { - var cfgJson DockerConfigJson - if err = json.Unmarshal(contents, &cfgJson); err != nil { - glog.Errorf("while trying to parse blob %q: %v", contents, err) - return nil, err - } - cfg = cfgJson.Auths - return -} - -// dockerConfigEntryWithAuth is used solely for deserializing the Auth field -// into a dockerConfigEntry during JSON deserialization. -type dockerConfigEntryWithAuth struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Email string `json:"email,omitempty"` - Auth string `json:"auth,omitempty"` -} - -func (ident *DockerConfigEntry) UnmarshalJSON(data []byte) error { - var tmp dockerConfigEntryWithAuth - err := json.Unmarshal(data, &tmp) - if err != nil { - return err - } - - ident.Username = tmp.Username - ident.Password = tmp.Password - ident.Email = tmp.Email - - if len(tmp.Auth) == 0 { - return nil - } - - ident.Username, ident.Password, err = decodeDockerConfigFieldAuth(tmp.Auth) - return err -} - -func (ident DockerConfigEntry) MarshalJSON() ([]byte, error) { - toEncode := dockerConfigEntryWithAuth{ident.Username, ident.Password, ident.Email, ""} - toEncode.Auth = encodeDockerConfigFieldAuth(ident.Username, ident.Password) - - return json.Marshal(toEncode) -} - -// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a -// username and a password. The format of the auth field is base64(<username>:<password>). -func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { - decoded, err := base64.StdEncoding.DecodeString(field) - if err != nil { - return - } - - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - err = fmt.Errorf("unable to parse auth field") - return - } - - username = parts[0] - password = parts[1] - - return -} - -func encodeDockerConfigFieldAuth(username, password string) string { - fieldValue := username + ":" + password - - return base64.StdEncoding.EncodeToString([]byte(fieldValue)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go deleted file mode 100644 index f071c0c89..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package credentialprovider supplies interfaces and implementations for -// docker registry providers to expose their authentication scheme. -package credentialprovider diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go deleted file mode 100644 index eedbee5ad..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/keyring.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "encoding/json" - "net" - "net/url" - "path/filepath" - "sort" - "strings" - - "github.com/golang/glog" - - dockertypes "github.com/docker/engine-api/types" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util/sets" -) - -// DockerKeyring tracks a set of docker registry credentials, maintaining a -// reverse index across the registry endpoints. A registry endpoint is made -// up of a host (e.g. registry.example.com), but it may also contain a path -// (e.g. registry.example.com/foo) This index is important for two reasons: -// - registry endpoints may overlap, and when this happens we must find the -// most specific match for a given image -// - iterating a map does not yield predictable results -type DockerKeyring interface { - Lookup(image string) ([]LazyAuthConfiguration, bool) -} - -// BasicDockerKeyring is a trivial map-backed implementation of DockerKeyring -type BasicDockerKeyring struct { - index []string - creds map[string][]LazyAuthConfiguration -} - -// lazyDockerKeyring is an implementation of DockerKeyring that lazily -// materializes its dockercfg based on a set of dockerConfigProviders. -type lazyDockerKeyring struct { - Providers []DockerConfigProvider -} - -// LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its -// binding. If Provider is non-nil, it will be used to obtain new credentials -// by calling LazyProvide() on it. -type LazyAuthConfiguration struct { - dockertypes.AuthConfig - Provider DockerConfigProvider -} - -func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { - return LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ - Username: ident.Username, - Password: ident.Password, - Email: ident.Email, - }, - } -} - -func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { - if dk.index == nil { - dk.index = make([]string, 0) - dk.creds = make(map[string][]LazyAuthConfiguration) - } - for loc, ident := range cfg { - - var creds LazyAuthConfiguration - if ident.Provider != nil { - creds = LazyAuthConfiguration{ - Provider: ident.Provider, - } - } else { - creds = DockerConfigEntryToLazyAuthConfiguration(ident) - } - - value := loc - if !strings.HasPrefix(value, "https://") && !strings.HasPrefix(value, "http://") { - value = "https://" + value - } - parsed, err := url.Parse(value) - if err != nil { - glog.Errorf("Entry %q in dockercfg invalid (%v), ignoring", loc, err) - continue - } - - // The docker client allows exact matches: - // foo.bar.com/namespace - // Or hostname matches: - // foo.bar.com - // It also considers /v2/ and /v1/ equivalent to the hostname - // See ResolveAuthConfig in docker/registry/auth.go. - effectivePath := parsed.Path - if strings.HasPrefix(effectivePath, "/v2/") || strings.HasPrefix(effectivePath, "/v1/") { - effectivePath = effectivePath[3:] - } - var key string - if (len(effectivePath) > 0) && (effectivePath != "/") { - key = parsed.Host + effectivePath - } else { - key = parsed.Host - } - dk.creds[key] = append(dk.creds[key], creds) - dk.index = append(dk.index, key) - } - - eliminateDupes := sets.NewString(dk.index...) - dk.index = eliminateDupes.List() - - // Update the index used to identify which credentials to use for a given - // image. The index is reverse-sorted so more specific paths are matched - // first. For example, if for the given image "quay.io/coreos/etcd", - // credentials for "quay.io/coreos" should match before "quay.io". - sort.Sort(sort.Reverse(sort.StringSlice(dk.index))) -} - -const ( - defaultRegistryHost = "index.docker.io" - defaultRegistryKey = defaultRegistryHost + "/v1/" -) - -// isDefaultRegistryMatch determines whether the given image will -// pull from the default registry (DockerHub) based on the -// characteristics of its name. -func isDefaultRegistryMatch(image string) bool { - parts := strings.SplitN(image, "/", 2) - - if len(parts[0]) == 0 { - return false - } - - if len(parts) == 1 { - // e.g. library/ubuntu - return true - } - - if parts[0] == "docker.io" || parts[0] == "index.docker.io" { - // resolve docker.io/image and index.docker.io/image as default registry - return true - } - - // From: http://blog.docker.com/2013/07/how-to-use-your-own-registry/ - // Docker looks for either a “.” (domain separator) or “:” (port separator) - // to learn that the first part of the repository name is a location and not - // a user name. - return !strings.ContainsAny(parts[0], ".:") -} - -// url.Parse require a scheme, but ours don't have schemes. Adding a -// scheme to make url.Parse happy, then clear out the resulting scheme. -func parseSchemelessUrl(schemelessUrl string) (*url.URL, error) { - parsed, err := url.Parse("https://" + schemelessUrl) - if err != nil { - return nil, err - } - // clear out the resulting scheme - parsed.Scheme = "" - return parsed, nil -} - -// split the host name into parts, as well as the port -func splitUrl(url *url.URL) (parts []string, port string) { - host, port, err := net.SplitHostPort(url.Host) - if err != nil { - // could not parse port - host, port = url.Host, "" - } - return strings.Split(host, "."), port -} - -// overloaded version of urlsMatch, operating on strings instead of URLs. -func urlsMatchStr(glob string, target string) (bool, error) { - globUrl, err := parseSchemelessUrl(glob) - if err != nil { - return false, err - } - targetUrl, err := parseSchemelessUrl(target) - if err != nil { - return false, err - } - return urlsMatch(globUrl, targetUrl) -} - -// check whether the given target url matches the glob url, which may have -// glob wild cards in the host name. -// -// Examples: -// globUrl=*.docker.io, targetUrl=blah.docker.io => match -// globUrl=*.docker.io, targetUrl=not.right.io => no match -// -// Note that we don't support wildcards in ports and paths yet. -func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) { - globUrlParts, globPort := splitUrl(globUrl) - targetUrlParts, targetPort := splitUrl(targetUrl) - if globPort != targetPort { - // port doesn't match - return false, nil - } - if len(globUrlParts) != len(targetUrlParts) { - // host name does not have the same number of parts - return false, nil - } - if !strings.HasPrefix(targetUrl.Path, globUrl.Path) { - // the path of the credential must be a prefix - return false, nil - } - for k, globUrlPart := range globUrlParts { - targetUrlPart := targetUrlParts[k] - matched, err := filepath.Match(globUrlPart, targetUrlPart) - if err != nil { - return false, err - } - if !matched { - // glob mismatch for some part - return false, nil - } - } - // everything matches - return true, nil -} - -// Lookup implements the DockerKeyring method for fetching credentials based on image name. -// Multiple credentials may be returned if there are multiple potentially valid credentials -// available. This allows for rotation. -func (dk *BasicDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - // range over the index as iterating over a map does not provide a predictable ordering - ret := []LazyAuthConfiguration{} - for _, k := range dk.index { - // both k and image are schemeless URLs because even though schemes are allowed - // in the credential configurations, we remove them in Add. - if matched, _ := urlsMatchStr(k, image); !matched { - continue - } - - ret = append(ret, dk.creds[k]...) - } - - if len(ret) > 0 { - return ret, true - } - - // Use credentials for the default registry if provided, and appropriate - if isDefaultRegistryMatch(image) { - if auth, ok := dk.creds[defaultRegistryHost]; ok { - return auth, true - } - } - - return []LazyAuthConfiguration{}, false -} - -// Lookup implements the DockerKeyring method for fetching credentials -// based on image name. -func (dk *lazyDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - keyring := &BasicDockerKeyring{} - - for _, p := range dk.Providers { - keyring.Add(p.Provide()) - } - - return keyring.Lookup(image) -} - -type FakeKeyring struct { - auth []LazyAuthConfiguration - ok bool -} - -func (f *FakeKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - return f.auth, f.ok -} - -// unionDockerKeyring delegates to a set of keyrings. -type unionDockerKeyring struct { - keyrings []DockerKeyring -} - -func (k *unionDockerKeyring) Lookup(image string) ([]LazyAuthConfiguration, bool) { - authConfigs := []LazyAuthConfiguration{} - for _, subKeyring := range k.keyrings { - if subKeyring == nil { - continue - } - - currAuthResults, _ := subKeyring.Lookup(image) - authConfigs = append(authConfigs, currAuthResults...) - } - - return authConfigs, (len(authConfigs) > 0) -} - -// MakeDockerKeyring inspects the passedSecrets to see if they contain any DockerConfig secrets. If they do, -// then a DockerKeyring is built based on every hit and unioned with the defaultKeyring. -// If they do not, then the default keyring is returned -func MakeDockerKeyring(passedSecrets []api.Secret, defaultKeyring DockerKeyring) (DockerKeyring, error) { - passedCredentials := []DockerConfig{} - for _, passedSecret := range passedSecrets { - if dockerConfigJsonBytes, dockerConfigJsonExists := passedSecret.Data[api.DockerConfigJsonKey]; (passedSecret.Type == api.SecretTypeDockerConfigJson) && dockerConfigJsonExists && (len(dockerConfigJsonBytes) > 0) { - dockerConfigJson := DockerConfigJson{} - if err := json.Unmarshal(dockerConfigJsonBytes, &dockerConfigJson); err != nil { - return nil, err - } - - passedCredentials = append(passedCredentials, dockerConfigJson.Auths) - } else if dockercfgBytes, dockercfgExists := passedSecret.Data[api.DockerConfigKey]; (passedSecret.Type == api.SecretTypeDockercfg) && dockercfgExists && (len(dockercfgBytes) > 0) { - dockercfg := DockerConfig{} - if err := json.Unmarshal(dockercfgBytes, &dockercfg); err != nil { - return nil, err - } - - passedCredentials = append(passedCredentials, dockercfg) - } - } - - if len(passedCredentials) > 0 { - basicKeyring := &BasicDockerKeyring{} - for _, currCredentials := range passedCredentials { - basicKeyring.Add(currCredentials) - } - return &unionDockerKeyring{[]DockerKeyring{basicKeyring, defaultKeyring}}, nil - } - - return defaultKeyring, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go deleted file mode 100644 index a871cc02b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/plugins.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "sync" - - "github.com/golang/glog" -) - -// All registered credential providers. -var providersMutex sync.Mutex -var providers = make(map[string]DockerConfigProvider) - -// RegisterCredentialProvider is called by provider implementations on -// initialization to register themselves, like so: -// func init() { -// RegisterCredentialProvider("name", &myProvider{...}) -// } -func RegisterCredentialProvider(name string, provider DockerConfigProvider) { - providersMutex.Lock() - defer providersMutex.Unlock() - _, found := providers[name] - if found { - glog.Fatalf("Credential provider %q was registered twice", name) - } - glog.V(4).Infof("Registered credential provider %q", name) - providers[name] = provider -} - -// NewDockerKeyring creates a DockerKeyring to use for resolving credentials, -// which lazily draws from the set of registered credential providers. -func NewDockerKeyring() DockerKeyring { - keyring := &lazyDockerKeyring{ - Providers: make([]DockerConfigProvider, 0), - } - - // TODO(mattmoor): iterating over the map is non-deterministic. We should - // introduce the notion of priorities for conflict resolution. - for name, provider := range providers { - if provider.Enabled() { - glog.V(4).Infof("Registering credential provider: %v", name) - keyring.Providers = append(keyring.Providers, provider) - } - } - - return keyring -} diff --git a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go b/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go deleted file mode 100644 index 8df116314..000000000 --- a/vendor/k8s.io/kubernetes/pkg/credentialprovider/provider.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package credentialprovider - -import ( - "os" - "reflect" - "sync" - "time" - - dockertypes "github.com/docker/engine-api/types" - "github.com/golang/glog" -) - -// DockerConfigProvider is the interface that registered extensions implement -// to materialize 'dockercfg' credentials. -type DockerConfigProvider interface { - // Enabled returns true if the config provider is enabled. - // Implementations can be blocking - e.g. metadata server unavailable. - Enabled() bool - // Provide returns docker configuration. - // Implementations can be blocking - e.g. metadata server unavailable. - Provide() DockerConfig - // LazyProvide() gets called after URL matches have been performed, so the - // location used as the key in DockerConfig would be redundant. - LazyProvide() *DockerConfigEntry -} - -func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { - if creds.Provider != nil { - entry := *creds.Provider.LazyProvide() - return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig - } else { - return creds.AuthConfig - } - -} - -// A DockerConfigProvider that simply reads the .dockercfg file -type defaultDockerConfigProvider struct{} - -// init registers our default provider, which simply reads the .dockercfg file. -func init() { - RegisterCredentialProvider(".dockercfg", - &CachingDockerConfigProvider{ - Provider: &defaultDockerConfigProvider{}, - Lifetime: 5 * time.Minute, - }) -} - -// CachingDockerConfigProvider implements DockerConfigProvider by composing -// with another DockerConfigProvider and caching the DockerConfig it provides -// for a pre-specified lifetime. -type CachingDockerConfigProvider struct { - Provider DockerConfigProvider - Lifetime time.Duration - - // cache fields - cacheDockerConfig DockerConfig - expiration time.Time - mu sync.Mutex -} - -// Enabled implements dockerConfigProvider -func (d *defaultDockerConfigProvider) Enabled() bool { - return true -} - -// Provide implements dockerConfigProvider -func (d *defaultDockerConfigProvider) Provide() DockerConfig { - // Read the standard Docker credentials from .dockercfg - if cfg, err := ReadDockerConfigFile(); err == nil { - return cfg - } else if !os.IsNotExist(err) { - glog.V(4).Infof("Unable to parse Docker config file: %v", err) - } - return DockerConfig{} -} - -// LazyProvide implements dockerConfigProvider. Should never be called. -func (d *defaultDockerConfigProvider) LazyProvide() *DockerConfigEntry { - return nil -} - -// Enabled implements dockerConfigProvider -func (d *CachingDockerConfigProvider) Enabled() bool { - return d.Provider.Enabled() -} - -// LazyProvide implements dockerConfigProvider. Should never be called. -func (d *CachingDockerConfigProvider) LazyProvide() *DockerConfigEntry { - return nil -} - -// Provide implements dockerConfigProvider -func (d *CachingDockerConfigProvider) Provide() DockerConfig { - d.mu.Lock() - defer d.mu.Unlock() - - // If the cache hasn't expired, return our cache - if time.Now().Before(d.expiration) { - return d.cacheDockerConfig - } - - glog.V(2).Infof("Refreshing cache for provider: %v", reflect.TypeOf(d.Provider).String()) - d.cacheDockerConfig = d.Provider.Provide() - d.expiration = time.Now().Add(d.Lifetime) - return d.cacheDockerConfig -} diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go deleted file mode 100644 index c91ff6eca..000000000 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fieldpath supplies methods for extracting fields from objects -// given a path to a field. -package fieldpath diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go deleted file mode 100644 index bede9b2c0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fieldpath - -import ( - "fmt" - "math" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/resource" -) - -// formatMap formats map[string]string to a string. -func FormatMap(m map[string]string) (fmtStr string) { - for key, value := range m { - fmtStr += fmt.Sprintf("%v=%q\n", key, value) - } - fmtStr = strings.TrimSuffix(fmtStr, "\n") - - return -} - -// ExtractFieldPathAsString extracts the field from the given object -// and returns it as a string. The object must be a pointer to an -// API type. -// -// Currently, this API is limited to supporting the fieldpaths: -// -// 1. metadata.name - The name of an API object -// 2. metadata.namespace - The namespace of an API object -func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { - accessor, err := meta.Accessor(obj) - if err != nil { - return "", nil - } - - switch fieldPath { - case "metadata.annotations": - return FormatMap(accessor.GetAnnotations()), nil - case "metadata.labels": - return FormatMap(accessor.GetLabels()), nil - case "metadata.name": - return accessor.GetName(), nil - case "metadata.namespace": - return accessor.GetNamespace(), nil - } - - return "", fmt.Errorf("Unsupported fieldPath: %v", fieldPath) -} - -// ExtractResourceValueByContainerName extracts the value of a resource -// by providing container name -func ExtractResourceValueByContainerName(fs *api.ResourceFieldSelector, pod *api.Pod, containerName string) (string, error) { - container, err := findContainerInPod(pod, containerName) - if err != nil { - return "", err - } - return ExtractContainerResourceValue(fs, container) -} - -// ExtractContainerResourceValue extracts the value of a resource -// in an already known container -func ExtractContainerResourceValue(fs *api.ResourceFieldSelector, container *api.Container) (string, error) { - divisor := resource.Quantity{} - if divisor.Cmp(fs.Divisor) == 0 { - divisor = resource.MustParse("1") - } else { - divisor = fs.Divisor - } - - switch fs.Resource { - case "limits.cpu": - return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) - case "limits.memory": - return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) - case "requests.cpu": - return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) - case "requests.memory": - return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) - } - - return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) -} - -// findContainerInPod finds a container by its name in the provided pod -func findContainerInPod(pod *api.Pod, containerName string) (*api.Container, error) { - for _, container := range pod.Spec.Containers { - if container.Name == containerName { - return &container, nil - } - } - return nil, fmt.Errorf("container %s not found", containerName) -} - -// convertResourceCPUTOString converts cpu value to the format of divisor and returns -// ceiling of the value. -func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { - c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) - return strconv.FormatInt(c, 10), nil -} - -// convertResourceMemoryToString converts memory value to the format of divisor and returns -// ceiling of the value. -func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { - m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) - return strconv.FormatInt(m, 10), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/fields/doc.go b/vendor/k8s.io/kubernetes/pkg/fields/doc.go deleted file mode 100644 index 767615c9f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/fields/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fields implements a simple field system, parsing and matching -// selectors with sets of fields. -package fields diff --git a/vendor/k8s.io/kubernetes/pkg/fields/fields.go b/vendor/k8s.io/kubernetes/pkg/fields/fields.go deleted file mode 100644 index 50fef14a8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/fields/fields.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fields - -import ( - "sort" - "strings" -) - -// Fields allows you to present fields independently from their storage. -type Fields interface { - // Has returns whether the provided field exists. - Has(field string) (exists bool) - - // Get returns the value for the provided field. - Get(field string) (value string) -} - -// Set is a map of field:value. It implements Fields. -type Set map[string]string - -// String returns all fields listed as a human readable string. -// Conveniently, exactly the format that ParseSelector takes. -func (ls Set) String() string { - selector := make([]string, 0, len(ls)) - for key, value := range ls { - selector = append(selector, key+"="+value) - } - // Sort for determinism. - sort.StringSlice(selector).Sort() - return strings.Join(selector, ",") -} - -// Has returns whether the provided field exists in the map. -func (ls Set) Has(field string) bool { - _, exists := ls[field] - return exists -} - -// Get returns the value in the map for the provided field. -func (ls Set) Get(field string) string { - return ls[field] -} - -// AsSelector converts fields into a selectors. -func (ls Set) AsSelector() Selector { - return SelectorFromSet(ls) -} diff --git a/vendor/k8s.io/kubernetes/pkg/fields/selector.go b/vendor/k8s.io/kubernetes/pkg/fields/selector.go deleted file mode 100644 index c0a638581..000000000 --- a/vendor/k8s.io/kubernetes/pkg/fields/selector.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fields - -import ( - "fmt" - "sort" - "strings" -) - -// Selector represents a field selector. -type Selector interface { - // Matches returns true if this selector matches the given set of fields. - Matches(Fields) bool - - // Empty returns true if this selector does not restrict the selection space. - Empty() bool - - // RequiresExactMatch allows a caller to introspect whether a given selector - // requires a single specific field to be set, and if so returns the value it - // requires. - RequiresExactMatch(field string) (value string, found bool) - - // Transform returns a new copy of the selector after TransformFunc has been - // applied to the entire selector, or an error if fn returns an error. - Transform(fn TransformFunc) (Selector, error) - - // String returns a human readable string that represents this selector. - String() string -} - -// Everything returns a selector that matches all fields. -func Everything() Selector { - return andTerm{} -} - -type hasTerm struct { - field, value string -} - -func (t *hasTerm) Matches(ls Fields) bool { - return ls.Get(t.field) == t.value -} - -func (t *hasTerm) Empty() bool { - return false -} - -func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { - if t.field == field { - return t.value, true - } - return "", false -} - -func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { - field, value, err := fn(t.field, t.value) - if err != nil { - return nil, err - } - return &hasTerm{field, value}, nil -} - -func (t *hasTerm) String() string { - return fmt.Sprintf("%v=%v", t.field, t.value) -} - -type notHasTerm struct { - field, value string -} - -func (t *notHasTerm) Matches(ls Fields) bool { - return ls.Get(t.field) != t.value -} - -func (t *notHasTerm) Empty() bool { - return false -} - -func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { - return "", false -} - -func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { - field, value, err := fn(t.field, t.value) - if err != nil { - return nil, err - } - return ¬HasTerm{field, value}, nil -} - -func (t *notHasTerm) String() string { - return fmt.Sprintf("%v!=%v", t.field, t.value) -} - -type andTerm []Selector - -func (t andTerm) Matches(ls Fields) bool { - for _, q := range t { - if !q.Matches(ls) { - return false - } - } - return true -} - -func (t andTerm) Empty() bool { - if t == nil { - return true - } - if len([]Selector(t)) == 0 { - return true - } - for i := range t { - if !t[i].Empty() { - return false - } - } - return true -} - -func (t andTerm) RequiresExactMatch(field string) (string, bool) { - if t == nil || len([]Selector(t)) == 0 { - return "", false - } - for i := range t { - if value, found := t[i].RequiresExactMatch(field); found { - return value, found - } - } - return "", false -} - -func (t andTerm) Transform(fn TransformFunc) (Selector, error) { - next := make([]Selector, len([]Selector(t))) - for i, s := range []Selector(t) { - n, err := s.Transform(fn) - if err != nil { - return nil, err - } - next[i] = n - } - return andTerm(next), nil -} - -func (t andTerm) String() string { - var terms []string - for _, q := range t { - terms = append(terms, q.String()) - } - return strings.Join(terms, ",") -} - -// SelectorFromSet returns a Selector which will match exactly the given Set. A -// nil Set is considered equivalent to Everything(). -func SelectorFromSet(ls Set) Selector { - if ls == nil { - return Everything() - } - items := make([]Selector, 0, len(ls)) - for field, value := range ls { - items = append(items, &hasTerm{field: field, value: value}) - } - if len(items) == 1 { - return items[0] - } - return andTerm(items) -} - -// ParseSelectorOrDie takes a string representing a selector and returns an -// object suitable for matching, or panic when an error occur. -func ParseSelectorOrDie(s string) Selector { - selector, err := ParseSelector(s) - if err != nil { - panic(err) - } - return selector -} - -// ParseSelector takes a string representing a selector and returns an -// object suitable for matching, or an error. -func ParseSelector(selector string) (Selector, error) { - return parseSelector(selector, - func(lhs, rhs string) (newLhs, newRhs string, err error) { - return lhs, rhs, nil - }) -} - -// Parses the selector and runs them through the given TransformFunc. -func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { - return parseSelector(selector, fn) -} - -// Function to transform selectors. -type TransformFunc func(field, value string) (newField, newValue string, err error) - -func try(selectorPiece, op string) (lhs, rhs string, ok bool) { - pieces := strings.Split(selectorPiece, op) - if len(pieces) == 2 { - return pieces[0], pieces[1], true - } - return "", "", false -} - -func parseSelector(selector string, fn TransformFunc) (Selector, error) { - parts := strings.Split(selector, ",") - sort.StringSlice(parts).Sort() - var items []Selector - for _, part := range parts { - if part == "" { - continue - } - if lhs, rhs, ok := try(part, "!="); ok { - items = append(items, ¬HasTerm{field: lhs, value: rhs}) - } else if lhs, rhs, ok := try(part, "=="); ok { - items = append(items, &hasTerm{field: lhs, value: rhs}) - } else if lhs, rhs, ok := try(part, "="); ok { - items = append(items, &hasTerm{field: lhs, value: rhs}) - } else { - return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) - } - } - if len(items) == 1 { - return items[0].Transform(fn) - } - return andTerm(items).Transform(fn) -} - -// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. -// Cannot return an error. -func OneTermEqualSelector(k, v string) Selector { - return &hasTerm{field: k, value: v} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS deleted file mode 100644 index 60bc436d4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -assignees: - - bgrant0607 - - brendandburns - - deads2k - - janetkuo - - jlowdermilk - - pwittrock - - smarterclayton diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go b/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go deleted file mode 100644 index 1836fc250..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/apply.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "encoding/json" - - "k8s.io/kubernetes/pkg/api/annotations" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/runtime" -) - -type debugError interface { - DebugError() (msg string, args []interface{}) -} - -// GetOriginalConfiguration retrieves the original configuration of the object -// from the annotation, or nil if no annotation was found. -func GetOriginalConfiguration(mapping *meta.RESTMapping, obj runtime.Object) ([]byte, error) { - annots, err := mapping.MetadataAccessor.Annotations(obj) - if err != nil { - return nil, err - } - - if annots == nil { - return nil, nil - } - - original, ok := annots[annotations.LastAppliedConfigAnnotation] - if !ok { - return nil, nil - } - - return []byte(original), nil -} - -// SetOriginalConfiguration sets the original configuration of the object -// as the annotation on the object for later use in computing a three way patch. -func SetOriginalConfiguration(info *resource.Info, original []byte) error { - if len(original) < 1 { - return nil - } - - accessor := info.Mapping.MetadataAccessor - annots, err := accessor.Annotations(info.Object) - if err != nil { - return err - } - - if annots == nil { - annots = map[string]string{} - } - - annots[annotations.LastAppliedConfigAnnotation] = string(original) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return err - } - - return nil -} - -// GetModifiedConfiguration retrieves the modified configuration of the object. -// If annotate is true, it embeds the result as an anotation in the modified -// configuration. If an object was read from the command input, it will use that -// version of the object. Otherwise, it will use the version from the server. -func GetModifiedConfiguration(info *resource.Info, annotate bool, codec runtime.Encoder) ([]byte, error) { - // First serialize the object without the annotation to prevent recursion, - // then add that serialization to it as the annotation and serialize it again. - var modified []byte - if info.VersionedObject != nil { - // If an object was read from input, use that version. - accessor, err := meta.Accessor(info.VersionedObject) - if err != nil { - return nil, err - } - - // Get the current annotations from the object. - annots := accessor.GetAnnotations() - if annots == nil { - annots = map[string]string{} - } - - original := annots[annotations.LastAppliedConfigAnnotation] - delete(annots, annotations.LastAppliedConfigAnnotation) - accessor.SetAnnotations(annots) - // TODO: this needs to be abstracted - there should be no assumption that versioned object - // can be marshalled to JSON. - modified, err = json.Marshal(info.VersionedObject) - if err != nil { - return nil, err - } - - if annotate { - annots[annotations.LastAppliedConfigAnnotation] = string(modified) - accessor.SetAnnotations(annots) - // TODO: this needs to be abstracted - there should be no assumption that versioned object - // can be marshalled to JSON. - modified, err = json.Marshal(info.VersionedObject) - if err != nil { - return nil, err - } - } - - // Restore the object to its original condition. - annots[annotations.LastAppliedConfigAnnotation] = original - accessor.SetAnnotations(annots) - } else { - // Otherwise, use the server side version of the object. - accessor := info.Mapping.MetadataAccessor - // Get the current annotations from the object. - annots, err := accessor.Annotations(info.Object) - if err != nil { - return nil, err - } - - if annots == nil { - annots = map[string]string{} - } - - original := annots[annotations.LastAppliedConfigAnnotation] - delete(annots, annotations.LastAppliedConfigAnnotation) - if err := accessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - - modified, err = runtime.Encode(codec, info.Object) - if err != nil { - return nil, err - } - - if annotate { - annots[annotations.LastAppliedConfigAnnotation] = string(modified) - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - - modified, err = runtime.Encode(codec, info.Object) - if err != nil { - return nil, err - } - } - - // Restore the object to its original condition. - annots[annotations.LastAppliedConfigAnnotation] = original - if err := info.Mapping.MetadataAccessor.SetAnnotations(info.Object, annots); err != nil { - return nil, err - } - } - - return modified, nil -} - -// UpdateApplyAnnotation calls CreateApplyAnnotation if the last applied -// configuration annotation is already present. Otherwise, it does nothing. -func UpdateApplyAnnotation(info *resource.Info, codec runtime.Encoder) error { - if original, err := GetOriginalConfiguration(info.Mapping, info.Object); err != nil || len(original) <= 0 { - return err - } - return CreateApplyAnnotation(info, codec) -} - -// CreateApplyAnnotation gets the modified configuration of the object, -// without embedding it again, and then sets it on the object as the annotation. -func CreateApplyAnnotation(info *resource.Info, codec runtime.Encoder) error { - modified, err := GetModifiedConfiguration(info, false, codec) - if err != nil { - return err - } - return SetOriginalConfiguration(info, modified) -} - -// Create the annotation used by kubectl apply only when createAnnotation is true -// Otherwise, only update the annotation when it already exists -func CreateOrUpdateAnnotation(createAnnotation bool, info *resource.Info, codec runtime.Encoder) error { - if createAnnotation { - return CreateApplyAnnotation(info, codec) - } - return UpdateApplyAnnotation(info, codec) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go deleted file mode 100644 index 979a93bf0..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/autoscale.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/runtime" -) - -type HorizontalPodAutoscalerV1Beta1 struct{} - -func (HorizontalPodAutoscalerV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"scaleRef-kind", false}, - {"scaleRef-name", false}, - {"scaleRef-apiVersion", false}, - {"scaleRef-subresource", false}, - {"min", false}, - {"max", true}, - {"cpu-percent", false}, - } -} - -func (HorizontalPodAutoscalerV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - return generateHPA(genericParams) -} - -type HorizontalPodAutoscalerV1 struct{} - -func (HorizontalPodAutoscalerV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"scaleRef-kind", false}, - {"scaleRef-name", false}, - {"scaleRef-apiVersion", false}, - {"min", false}, - {"max", true}, - {"cpu-percent", false}, - } -} - -func (HorizontalPodAutoscalerV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - return generateHPA(genericParams) -} - -func generateHPA(genericParams map[string]interface{}) (runtime.Object, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - minString, found := params["min"] - min := -1 - var err error - if found { - if min, err = strconv.Atoi(minString); err != nil { - return nil, err - } - } - maxString, found := params["max"] - if !found { - return nil, fmt.Errorf("'max' is a required parameter.") - } - max, err := strconv.Atoi(maxString) - if err != nil { - return nil, err - } - cpuString, found := params["cpu-percent"] - cpu := -1 - if found { - if cpu, err = strconv.Atoi(cpuString); err != nil { - return nil, err - } - } - - scaler := autoscaling.HorizontalPodAutoscaler{ - ObjectMeta: api.ObjectMeta{ - Name: name, - }, - Spec: autoscaling.HorizontalPodAutoscalerSpec{ - ScaleTargetRef: autoscaling.CrossVersionObjectReference{ - Kind: params["scaleRef-kind"], - Name: params["scaleRef-name"], - APIVersion: params["scaleRef-apiVersion"], - }, - MaxReplicas: int32(max), - }, - } - if min > 0 { - v := int32(min) - scaler.Spec.MinReplicas = &v - } - if cpu >= 0 { - c := int32(cpu) - scaler.Spec.TargetCPUUtilizationPercentage = &c - } - return &scaler, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go b/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go deleted file mode 100644 index e3eaf30e4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/bash_comp_utils.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. - -package kubectl - -import ( - "strings" - - "github.com/spf13/cobra" - - "k8s.io/kubernetes/pkg/kubectl/resource" -) - -func AddJsonFilenameFlag(cmd *cobra.Command, value *[]string, usage string) { - cmd.Flags().StringSliceVarP(value, "filename", "f", *value, usage) - annotations := make([]string, 0, len(resource.FileExtensions)) - for _, ext := range resource.FileExtensions { - annotations = append(annotations, strings.TrimLeft(ext, ".")) - } - cmd.Flags().SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go deleted file mode 100644 index 43ddf3e98..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/clientcache.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" -) - -func NewClientCache(loader clientcmd.ClientConfig) *ClientCache { - return &ClientCache{ - clients: make(map[unversioned.GroupVersion]*client.Client), - configs: make(map[unversioned.GroupVersion]*restclient.Config), - fedClientSets: make(map[unversioned.GroupVersion]fed_clientset.Interface), - loader: loader, - } -} - -// ClientCache caches previously loaded clients for reuse, and ensures MatchServerVersion -// is invoked only once -type ClientCache struct { - loader clientcmd.ClientConfig - clients map[unversioned.GroupVersion]*client.Client - fedClientSets map[unversioned.GroupVersion]fed_clientset.Interface - configs map[unversioned.GroupVersion]*restclient.Config - defaultConfig *restclient.Config - defaultClient *client.Client - matchVersion bool -} - -// ClientConfigForVersion returns the correct config for a server -func (c *ClientCache) ClientConfigForVersion(version *unversioned.GroupVersion) (*restclient.Config, error) { - if c.defaultConfig == nil { - config, err := c.loader.ClientConfig() - if err != nil { - return nil, err - } - c.defaultConfig = config - if c.matchVersion { - if err := client.MatchesServerVersion(c.defaultClient, config); err != nil { - return nil, err - } - } - } - if version != nil { - if config, ok := c.configs[*version]; ok { - return config, nil - } - } - - // TODO: have a better config copy method - config := *c.defaultConfig - - // TODO these fall out when we finish the refactor - var preferredGV *unversioned.GroupVersion - if version != nil { - versionCopy := *version - preferredGV = &versionCopy - } - - negotiatedVersion, err := client.NegotiateVersion(c.defaultClient, &config, preferredGV, registered.EnabledVersions()) - if err != nil { - return nil, err - } - config.GroupVersion = negotiatedVersion - client.SetKubernetesDefaults(&config) - - if version != nil { - c.configs[*version] = &config - } - - // `version` does not necessarily equal `config.Version`. However, we know that we call this method again with - // `config.Version`, we should get the the config we've just built. - configCopy := config - c.configs[*config.GroupVersion] = &configCopy - - return &config, nil -} - -// ClientForVersion initializes or reuses a client for the specified version, or returns an -// error if that is not possible -func (c *ClientCache) ClientForVersion(version *unversioned.GroupVersion) (*client.Client, error) { - if version != nil { - if client, ok := c.clients[*version]; ok { - return client, nil - } - } - config, err := c.ClientConfigForVersion(version) - if err != nil { - return nil, err - } - - kubeclient, err := client.New(config) - if err != nil { - return nil, err - } - c.clients[*config.GroupVersion] = kubeclient - - // `version` does not necessarily equal `config.Version`. However, we know that if we call this method again with - // `version`, we should get a client based on the same config we just found. There's no guarantee that a client - // is copiable, so create a new client and save it in the cache. - if version != nil { - configCopy := *config - kubeclient, err := client.New(&configCopy) - if err != nil { - return nil, err - } - c.clients[*version] = kubeclient - } - - return kubeclient, nil -} - -func (c *ClientCache) FederationClientSetForVersion(version *unversioned.GroupVersion) (fed_clientset.Interface, error) { - if version != nil { - if clientSet, found := c.fedClientSets[*version]; found { - return clientSet, nil - } - } - config, err := c.ClientConfigForVersion(version) - if err != nil { - return nil, err - } - - // TODO: support multi versions of client with clientset - clientSet, err := fed_clientset.NewForConfig(config) - if err != nil { - return nil, err - } - c.fedClientSets[*config.GroupVersion] = clientSet - - if version != nil { - configCopy := *config - clientSet, err := fed_clientset.NewForConfig(&configCopy) - if err != nil { - return nil, err - } - c.fedClientSets[*version] = clientSet - } - - return clientSet, nil -} - -func (c *ClientCache) FederationClientForVersion(version *unversioned.GroupVersion) (*restclient.RESTClient, error) { - fedClientSet, err := c.FederationClientSetForVersion(version) - if err != nil { - return nil, err - } - return fedClientSet.(*fed_clientset.Clientset).FederationClient.RESTClient, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go deleted file mode 100644 index f6bd256d2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go +++ /dev/null @@ -1,1230 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "os/user" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/emicklei/go-restful/swagger" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" - apierrors "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/service" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/apimachinery" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/policy" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/client/restclient" - client "k8s.io/kubernetes/pkg/client/unversioned" - clientset "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/json" - utilflag "k8s.io/kubernetes/pkg/util/flag" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - FlagMatchBinaryVersion = "match-server-version" -) - -// Factory provides abstractions that allow the Kubectl command to be extended across multiple types -// of resources and different API sets. -// TODO: make the functions interfaces -// TODO: pass the various interfaces on the factory directly into the command constructors (so the -// commands are decoupled from the factory). -type Factory struct { - clients *ClientCache - flags *pflag.FlagSet - - // Returns interfaces for dealing with arbitrary runtime.Objects. If thirdPartyDiscovery is true, performs API calls - // to discovery dynamic API objects registered by third parties. - Object func(thirdPartyDiscovery bool) (meta.RESTMapper, runtime.ObjectTyper) - // Returns interfaces for decoding objects - if toInternal is set, decoded objects will be converted - // into their internal form (if possible). Eventually the internal form will be removed as an option, - // and only versioned objects will be returned. - Decoder func(toInternal bool) runtime.Decoder - // Returns an encoder capable of encoding a provided object into JSON in the default desired version. - JSONEncoder func() runtime.Encoder - // Returns a client for accessing Kubernetes resources or an error. - Client func() (*client.Client, error) - // Returns a client.Config for accessing the Kubernetes server. - ClientConfig func() (*restclient.Config, error) - // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended - // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. - ClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) - // Returns a Describer for displaying the specified RESTMapping type or an error. - Describer func(mapping *meta.RESTMapping) (kubectl.Describer, error) - // Returns a Printer for formatting objects of the given type or an error. - Printer func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) - // Returns a Scaler for changing the size of the specified RESTMapping type or an error - Scaler func(mapping *meta.RESTMapping) (kubectl.Scaler, error) - // Returns a Reaper for gracefully shutting down resources. - Reaper func(mapping *meta.RESTMapping) (kubectl.Reaper, error) - // Returns a HistoryViewer for viewing change history - HistoryViewer func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) - // Returns a Rollbacker for changing the rollback version of the specified RESTMapping type or an error - Rollbacker func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) - // Returns a StatusViewer for printing rollout status. - StatusViewer func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) - // MapBasedSelectorForObject returns the map-based selector associated with the provided object. If a - // new set-based selector is provided, an error is returned if the selector cannot be converted to a - // map-based selector - MapBasedSelectorForObject func(object runtime.Object) (string, error) - // PortsForObject returns the ports associated with the provided object - PortsForObject func(object runtime.Object) ([]string, error) - // ProtocolsForObject returns the <port, protocol> mapping associated with the provided object - ProtocolsForObject func(object runtime.Object) (map[string]string, error) - // LabelsForObject returns the labels associated with the provided object - LabelsForObject func(object runtime.Object) (map[string]string, error) - // LogsForObject returns a request for the logs associated with the provided object - LogsForObject func(object, options runtime.Object) (*restclient.Request, error) - // PauseObject marks the provided object as paused ie. it will not be reconciled by its controller. - PauseObject func(object runtime.Object) (bool, error) - // ResumeObject resumes a paused object ie. it will be reconciled by its controller. - ResumeObject func(object runtime.Object) (bool, error) - // Returns a schema that can validate objects stored on disk. - Validator func(validate bool, cacheDir string) (validation.Schema, error) - // SwaggerSchema returns the schema declaration for the provided group version kind. - SwaggerSchema func(unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) - // Returns the default namespace to use in cases where no - // other namespace is specified and whether the namespace was - // overridden. - DefaultNamespace func() (string, bool, error) - // Generators returns the generators for the provided command - Generators func(cmdName string) map[string]kubectl.Generator - // Check whether the kind of resources could be exposed - CanBeExposed func(kind unversioned.GroupKind) error - // Check whether the kind of resources could be autoscaled - CanBeAutoscaled func(kind unversioned.GroupKind) error - // AttachablePodForObject returns the pod to which to attach given an object. - AttachablePodForObject func(object runtime.Object) (*api.Pod, error) - // UpdatePodSpecForObject will call the provided function on the pod spec this object supports, - // return false if no pod spec is supported, or return an error. - UpdatePodSpecForObject func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) - // EditorEnvs returns a group of environment variables that the edit command - // can range over in order to determine if the user has specified an editor - // of their choice. - EditorEnvs func() []string - // PrintObjectSpecificMessage prints object-specific messages on the provided writer - PrintObjectSpecificMessage func(obj runtime.Object, out io.Writer) -} - -const ( - RunV1GeneratorName = "run/v1" - RunPodV1GeneratorName = "run-pod/v1" - ServiceV1GeneratorName = "service/v1" - ServiceV2GeneratorName = "service/v2" - ServiceAccountV1GeneratorName = "serviceaccount/v1" - HorizontalPodAutoscalerV1Beta1GeneratorName = "horizontalpodautoscaler/v1beta1" - HorizontalPodAutoscalerV1GeneratorName = "horizontalpodautoscaler/v1" - DeploymentV1Beta1GeneratorName = "deployment/v1beta1" - JobV1Beta1GeneratorName = "job/v1beta1" - JobV1GeneratorName = "job/v1" - NamespaceV1GeneratorName = "namespace/v1" - SecretV1GeneratorName = "secret/v1" - SecretForDockerRegistryV1GeneratorName = "secret-for-docker-registry/v1" - SecretForTLSV1GeneratorName = "secret-for-tls/v1" - ConfigMapV1GeneratorName = "configmap/v1" -) - -// DefaultGenerators returns the set of default generators for use in Factory instances -func DefaultGenerators(cmdName string) map[string]kubectl.Generator { - generators := map[string]map[string]kubectl.Generator{} - generators["expose"] = map[string]kubectl.Generator{ - ServiceV1GeneratorName: kubectl.ServiceGeneratorV1{}, - ServiceV2GeneratorName: kubectl.ServiceGeneratorV2{}, - } - generators["run"] = map[string]kubectl.Generator{ - RunV1GeneratorName: kubectl.BasicReplicationController{}, - RunPodV1GeneratorName: kubectl.BasicPod{}, - DeploymentV1Beta1GeneratorName: kubectl.DeploymentV1Beta1{}, - JobV1Beta1GeneratorName: kubectl.JobV1Beta1{}, - JobV1GeneratorName: kubectl.JobV1{}, - } - generators["autoscale"] = map[string]kubectl.Generator{ - HorizontalPodAutoscalerV1Beta1GeneratorName: kubectl.HorizontalPodAutoscalerV1Beta1{}, - HorizontalPodAutoscalerV1GeneratorName: kubectl.HorizontalPodAutoscalerV1{}, - } - generators["namespace"] = map[string]kubectl.Generator{ - NamespaceV1GeneratorName: kubectl.NamespaceGeneratorV1{}, - } - generators["secret"] = map[string]kubectl.Generator{ - SecretV1GeneratorName: kubectl.SecretGeneratorV1{}, - } - generators["secret-for-docker-registry"] = map[string]kubectl.Generator{ - SecretForDockerRegistryV1GeneratorName: kubectl.SecretForDockerRegistryGeneratorV1{}, - } - generators["secret-for-tls"] = map[string]kubectl.Generator{ - SecretForTLSV1GeneratorName: kubectl.SecretForTLSGeneratorV1{}, - } - - return generators[cmdName] -} - -func getGroupVersionKinds(gvks []unversioned.GroupVersionKind, group string) []unversioned.GroupVersionKind { - result := []unversioned.GroupVersionKind{} - for ix := range gvks { - if gvks[ix].Group == group { - result = append(result, gvks[ix]) - } - } - return result -} - -func makeInterfacesFor(versionList []unversioned.GroupVersion) func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - accessor := meta.NewAccessor() - return func(version unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - for ix := range versionList { - if versionList[ix].String() == version.String() { - return &meta.VersionInterfaces{ - ObjectConvertor: thirdpartyresourcedata.NewThirdPartyObjectConverter(api.Scheme), - MetadataAccessor: accessor, - }, nil - } - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, versionList) - } -} - -// NewFactory creates a factory with the default Kubernetes resources defined -// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. -// if optionalClientConfig is not nil, then this factory will make use of it. -func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { - mapper := kubectl.ShortcutExpander{RESTMapper: registered.RESTMapper()} - - flags := pflag.NewFlagSet("", pflag.ContinueOnError) - flags.SetNormalizeFunc(utilflag.WarnWordSepNormalizeFunc) // Warn for "_" flags - - clientConfig := optionalClientConfig - if optionalClientConfig == nil { - clientConfig = DefaultClientConfig(flags) - } - - clients := NewClientCache(clientConfig) - - return &Factory{ - clients: clients, - flags: flags, - - // If discoverDynamicAPIs is true, make API calls to the discovery service to find APIs that - // have been dynamically added to the apiserver - Object: func(discoverDynamicAPIs bool) (meta.RESTMapper, runtime.ObjectTyper) { - cfg, err := clientConfig.ClientConfig() - CheckErr(err) - cmdApiVersion := unversioned.GroupVersion{} - if cfg.GroupVersion != nil { - cmdApiVersion = *cfg.GroupVersion - } - if discoverDynamicAPIs { - client, err := clients.ClientForVersion(&unversioned.GroupVersion{Version: "v1"}) - CheckErr(err) - - var versions []unversioned.GroupVersion - var gvks []unversioned.GroupVersionKind - retries := 3 - for i := 0; i < retries; i++ { - versions, gvks, err = GetThirdPartyGroupVersions(client.Discovery()) - // Retry if we got a NotFound error, because user may delete - // a thirdparty group when the GetThirdPartyGroupVersions is - // running. - if err == nil || !apierrors.IsNotFound(err) { - break - } - } - CheckErr(err) - if len(versions) > 0 { - priorityMapper, ok := mapper.RESTMapper.(meta.PriorityRESTMapper) - if !ok { - CheckErr(fmt.Errorf("expected PriorityMapper, saw: %v", mapper.RESTMapper)) - return nil, nil - } - multiMapper, ok := priorityMapper.Delegate.(meta.MultiRESTMapper) - if !ok { - CheckErr(fmt.Errorf("unexpected type: %v", mapper.RESTMapper)) - return nil, nil - } - groupsMap := map[string][]unversioned.GroupVersion{} - for _, version := range versions { - groupsMap[version.Group] = append(groupsMap[version.Group], version) - } - for group, versionList := range groupsMap { - preferredExternalVersion := versionList[0] - - thirdPartyMapper, err := kubectl.NewThirdPartyResourceMapper(versionList, getGroupVersionKinds(gvks, group)) - CheckErr(err) - accessor := meta.NewAccessor() - groupMeta := apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: versionList, - RESTMapper: thirdPartyMapper, - SelfLinker: runtime.SelfLinker(accessor), - InterfacesFor: makeInterfacesFor(versionList), - } - - CheckErr(registered.RegisterGroup(groupMeta)) - registered.AddThirdPartyAPIGroupVersions(versionList...) - multiMapper = append(meta.MultiRESTMapper{thirdPartyMapper}, multiMapper...) - } - priorityMapper.Delegate = multiMapper - // Re-assign to the RESTMapper here because priorityMapper is actually a copy, so if we - // don't re-assign, the above assignement won't actually update mapper.RESTMapper - mapper.RESTMapper = priorityMapper - } - } - outputRESTMapper := kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}} - priorityRESTMapper := meta.PriorityRESTMapper{ - Delegate: outputRESTMapper, - ResourcePriority: []unversioned.GroupVersionResource{ - {Group: api.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, - {Group: autoscaling.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, - {Group: extensions.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, - {Group: federation.GroupName, Version: meta.AnyVersion, Resource: meta.AnyResource}, - }, - KindPriority: []unversioned.GroupVersionKind{ - {Group: api.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, - {Group: autoscaling.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, - {Group: extensions.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, - {Group: federation.GroupName, Version: meta.AnyVersion, Kind: meta.AnyKind}, - }, - } - return priorityRESTMapper, api.Scheme - }, - Client: func() (*client.Client, error) { - return clients.ClientForVersion(nil) - }, - ClientConfig: func() (*restclient.Config, error) { - return clients.ClientConfigForVersion(nil) - }, - ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { - gvk := mapping.GroupVersionKind - mappingVersion := mapping.GroupVersionKind.GroupVersion() - c, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - switch gvk.Group { - case api.GroupName: - return c.RESTClient, nil - case autoscaling.GroupName: - return c.AutoscalingClient.RESTClient, nil - case batch.GroupName: - return c.BatchClient.RESTClient, nil - case policy.GroupName: - return c.PolicyClient.RESTClient, nil - case apps.GroupName: - return c.AppsClient.RESTClient, nil - case extensions.GroupName: - return c.ExtensionsClient.RESTClient, nil - case api.SchemeGroupVersion.Group: - return c.RESTClient, nil - case extensions.SchemeGroupVersion.Group: - return c.ExtensionsClient.RESTClient, nil - case federation.GroupName: - return clients.FederationClientForVersion(&mappingVersion) - case rbac.GroupName: - return c.RbacClient.RESTClient, nil - default: - if !registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { - return nil, fmt.Errorf("unknown api group/version: %s", gvk.String()) - } - cfg, err := clientConfig.ClientConfig() - if err != nil { - return nil, err - } - gv := gvk.GroupVersion() - cfg.GroupVersion = &gv - cfg.APIPath = "/apis" - cfg.Codec = thirdpartyresourcedata.NewCodec(c.ExtensionsClient.RESTClient.Codec(), gvk) - cfg.NegotiatedSerializer = thirdpartyresourcedata.NewNegotiatedSerializer(api.Codecs, gvk.Kind, gv, gv) - return restclient.RESTClientFor(cfg) - } - }, - Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - if mapping.GroupVersionKind.Group == federation.GroupName { - fedClientSet, err := clients.FederationClientSetForVersion(&mappingVersion) - if err != nil { - return nil, err - } - if mapping.GroupVersionKind.Kind == "Cluster" { - return &kubectl.ClusterDescriber{Interface: fedClientSet}, nil - } - } - client, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { - return describer, nil - } - return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) - }, - Decoder: func(toInternal bool) runtime.Decoder { - var decoder runtime.Decoder - if toInternal { - decoder = api.Codecs.UniversalDecoder() - } else { - decoder = api.Codecs.UniversalDeserializer() - } - return thirdpartyresourcedata.NewDecoder(decoder, "") - - }, - JSONEncoder: func() runtime.Encoder { - return api.Codecs.LegacyCodec(registered.EnabledVersions()...) - }, - Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { - return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil - }, - MapBasedSelectorForObject: func(object runtime.Object) (string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return kubectl.MakeLabels(t.Spec.Selector), nil - case *api.Pod: - if len(t.Labels) == 0 { - return "", fmt.Errorf("the pod has no labels and cannot be exposed") - } - return kubectl.MakeLabels(t.Labels), nil - case *api.Service: - if t.Spec.Selector == nil { - return "", fmt.Errorf("the service has no pod selector set") - } - return kubectl.MakeLabels(t.Spec.Selector), nil - case *extensions.Deployment: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - case *extensions.ReplicaSet: - // TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals - // operator, DoubleEquals operator and In operator with only one element in the set. - if len(t.Spec.Selector.MatchExpressions) > 0 { - return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format", t.Spec.Selector.MatchExpressions) - } - return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return "", err - } - return "", fmt.Errorf("cannot extract pod selector from %v", gvks[0]) - } - }, - PortsForObject: func(object runtime.Object) ([]string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return getPorts(t.Spec.Template.Spec), nil - case *api.Pod: - return getPorts(t.Spec), nil - case *api.Service: - return getServicePorts(t.Spec), nil - case *extensions.Deployment: - return getPorts(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getPorts(t.Spec.Template.Spec), nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot extract ports from %v", gvks[0]) - } - }, - ProtocolsForObject: func(object runtime.Object) (map[string]string, error) { - // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) - switch t := object.(type) { - case *api.ReplicationController: - return getProtocols(t.Spec.Template.Spec), nil - case *api.Pod: - return getProtocols(t.Spec), nil - case *api.Service: - return getServiceProtocols(t.Spec), nil - case *extensions.Deployment: - return getProtocols(t.Spec.Template.Spec), nil - case *extensions.ReplicaSet: - return getProtocols(t.Spec.Template.Spec), nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot extract protocols from %v", gvks[0]) - } - }, - LabelsForObject: func(object runtime.Object) (map[string]string, error) { - return meta.NewAccessor().Labels(object) - }, - LogsForObject: func(object, options runtime.Object) (*restclient.Request, error) { - c, err := clients.ClientForVersion(nil) - if err != nil { - return nil, err - } - - switch t := object.(type) { - case *api.Pod: - opts, ok := options.(*api.PodLogOptions) - if !ok { - return nil, errors.New("provided options object is not a PodLogOptions") - } - return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil - - case *api.ReplicationController: - opts, ok := options.(*api.PodLogOptions) - if !ok { - return nil, errors.New("provided options object is not a PodLogOptions") - } - selector := labels.SelectorFromSet(t.Spec.Selector) - sortBy := func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) } - pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) - if err != nil { - return nil, err - } - if numPods > 1 { - fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) - } - - return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil - - case *extensions.ReplicaSet: - opts, ok := options.(*api.PodLogOptions) - if !ok { - return nil, errors.New("provided options object is not a PodLogOptions") - } - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - sortBy := func(pods []*api.Pod) sort.Interface { return controller.ActivePods(pods) } - pod, numPods, err := GetFirstPod(c, t.Namespace, selector, 20*time.Second, sortBy) - if err != nil { - return nil, err - } - if numPods > 1 { - fmt.Fprintf(os.Stderr, "Found %v pods, using pod/%v\n", numPods, pod.Name) - } - - return c.Pods(pod.Namespace).GetLogs(pod.Name, opts), nil - - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot get the logs from %v", gvks[0]) - } - }, - PauseObject: func(object runtime.Object) (bool, error) { - c, err := clients.ClientForVersion(nil) - if err != nil { - return false, err - } - - switch t := object.(type) { - case *extensions.Deployment: - if t.Spec.Paused { - return true, nil - } - t.Spec.Paused = true - _, err := c.Extensions().Deployments(t.Namespace).Update(t) - return false, err - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return false, err - } - return false, fmt.Errorf("cannot pause %v", gvks[0]) - } - }, - ResumeObject: func(object runtime.Object) (bool, error) { - c, err := clients.ClientForVersion(nil) - if err != nil { - return false, err - } - - switch t := object.(type) { - case *extensions.Deployment: - if !t.Spec.Paused { - return true, nil - } - t.Spec.Paused = false - _, err := c.Extensions().Deployments(t.Namespace).Update(t) - return false, err - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return false, err - } - return false, fmt.Errorf("cannot resume %v", gvks[0]) - } - }, - Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) - }, - Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) - }, - HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) - clientset := clientset.FromUnversionedClient(client) - if err != nil { - return nil, err - } - return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) - }, - Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) - }, - StatusViewer: func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - mappingVersion := mapping.GroupVersionKind.GroupVersion() - client, err := clients.ClientForVersion(&mappingVersion) - if err != nil { - return nil, err - } - return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), client) - }, - Validator: func(validate bool, cacheDir string) (validation.Schema, error) { - if validate { - client, err := clients.ClientForVersion(nil) - if err != nil { - return nil, err - } - dir := cacheDir - if len(dir) > 0 { - version, err := client.ServerVersion() - if err != nil { - return nil, err - } - dir = path.Join(cacheDir, version.String()) - } - fedClient, err := clients.FederationClientForVersion(nil) - if err != nil { - return nil, err - } - return &clientSwaggerSchema{ - c: client, - fedc: fedClient, - cacheDir: dir, - mapper: api.RESTMapper, - }, nil - } - return validation.NullSchema{}, nil - }, - SwaggerSchema: func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) { - version := gvk.GroupVersion() - client, err := clients.ClientForVersion(&version) - if err != nil { - return nil, err - } - return client.Discovery().SwaggerSchema(version) - }, - DefaultNamespace: func() (string, bool, error) { - return clientConfig.Namespace() - }, - Generators: func(cmdName string) map[string]kubectl.Generator { - return DefaultGenerators(cmdName) - }, - CanBeExposed: func(kind unversioned.GroupKind) error { - switch kind { - case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): - // nothing to do here - default: - return fmt.Errorf("cannot expose a %s", kind) - } - return nil - }, - CanBeAutoscaled: func(kind unversioned.GroupKind) error { - switch kind { - case api.Kind("ReplicationController"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"): - // nothing to do here - default: - return fmt.Errorf("cannot autoscale a %v", kind) - } - return nil - }, - AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { - client, err := clients.ClientForVersion(nil) - if err != nil { - return nil, err - } - switch t := object.(type) { - case *api.ReplicationController: - selector := labels.SelectorFromSet(t.Spec.Selector) - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) - return pod, err - case *extensions.Deployment: - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) - return pod, err - case *batch.Job: - selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - sortBy := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } - pod, _, err := GetFirstPod(client, t.Namespace, selector, 1*time.Minute, sortBy) - return pod, err - case *api.Pod: - return t, nil - default: - gvks, _, err := api.Scheme.ObjectKinds(object) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("cannot attach to %v: not implemented", gvks[0]) - } - }, - // UpdatePodSpecForObject update the pod specification for the provided object - UpdatePodSpecForObject: func(obj runtime.Object, fn func(*api.PodSpec) error) (bool, error) { - // TODO: replace with a swagger schema based approach (identify pod template via schema introspection) - switch t := obj.(type) { - case *api.Pod: - return true, fn(&t.Spec) - case *api.ReplicationController: - if t.Spec.Template == nil { - t.Spec.Template = &api.PodTemplateSpec{} - } - return true, fn(&t.Spec.Template.Spec) - case *extensions.Deployment: - return true, fn(&t.Spec.Template.Spec) - case *extensions.DaemonSet: - return true, fn(&t.Spec.Template.Spec) - case *extensions.ReplicaSet: - return true, fn(&t.Spec.Template.Spec) - case *apps.PetSet: - return true, fn(&t.Spec.Template.Spec) - case *batch.Job: - return true, fn(&t.Spec.Template.Spec) - default: - return false, fmt.Errorf("the object is not a pod or does not have a pod template") - } - }, - EditorEnvs: func() []string { - return []string{"KUBE_EDITOR", "EDITOR"} - }, - PrintObjectSpecificMessage: func(obj runtime.Object, out io.Writer) { - switch obj := obj.(type) { - case *api.Service: - if obj.Spec.Type == api.ServiceTypeNodePort { - msg := fmt.Sprintf( - `You have exposed your service on an external port on all nodes in your -cluster. If you want to expose this service to the external internet, you may -need to set up firewall rules for the service port(s) (%s) to serve traffic. - -See http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md for more details. -`, - makePortsString(obj.Spec.Ports, true)) - out.Write([]byte(msg)) - } - - if _, ok := obj.Annotations[service.AnnotationLoadBalancerSourceRangesKey]; ok { - msg := fmt.Sprintf( - `You are using service annotation [service.beta.kubernetes.io/load-balancer-source-ranges]. -It has been promoted to field [loadBalancerSourceRanges] in service spec. This annotation will be deprecated in the future. -Please use the loadBalancerSourceRanges field instead. - -See http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md for more details. -`) - out.Write([]byte(msg)) - } - } - }, - } -} - -// GetFirstPod returns a pod matching the namespace and label selector -// and the number of all pods that match the label selector. -func GetFirstPod(client client.Interface, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*api.Pod) sort.Interface) (*api.Pod, int, error) { - options := api.ListOptions{LabelSelector: selector} - - podList, err := client.Pods(namespace).List(options) - if err != nil { - return nil, 0, err - } - pods := []*api.Pod{} - for i := range podList.Items { - pod := podList.Items[i] - pods = append(pods, &pod) - } - if len(pods) > 0 { - sort.Sort(sortBy(pods)) - return pods[0], len(podList.Items), nil - } - - // Watch until we observe a pod - options.ResourceVersion = podList.ResourceVersion - w, err := client.Pods(namespace).Watch(options) - if err != nil { - return nil, 0, err - } - defer w.Stop() - - condition := func(event watch.Event) (bool, error) { - return event.Type == watch.Added || event.Type == watch.Modified, nil - } - event, err := watch.Until(timeout, w, condition) - if err != nil { - return nil, 0, err - } - pod, ok := event.Object.(*api.Pod) - if !ok { - return nil, 0, fmt.Errorf("%#v is not a pod event", event) - } - return pod, 1, nil -} - -// Command will stringify and return all environment arguments ie. a command run by a client -// using the factory. -// TODO: We need to filter out stuff like secrets. -func (f *Factory) Command() string { - if len(os.Args) == 0 { - return "" - } - base := filepath.Base(os.Args[0]) - args := append([]string{base}, os.Args[1:]...) - return strings.Join(args, " ") -} - -// BindFlags adds any flags that are common to all kubectl sub commands. -func (f *Factory) BindFlags(flags *pflag.FlagSet) { - // Merge factory's flags - flags.AddFlagSet(f.flags) - - // Globally persistent flags across all subcommands. - // TODO Change flag names to consts to allow safer lookup from subcommands. - // TODO Add a verbose flag that turns on glog logging. Probably need a way - // to do that automatically for every subcommand. - flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version") - - // Normalize all flags that are coming from other packages or pre-configurations - // a.k.a. change all "_" to "-". e.g. glog package - flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) -} - -// BindCommonFlags adds any flags defined by external projects (not part of pflags) -func (f *Factory) BindExternalFlags(flags *pflag.FlagSet) { - // any flags defined by external projects (not part of pflags) - flags.AddGoFlagSet(flag.CommandLine) -} - -func makePortsString(ports []api.ServicePort, useNodePort bool) string { - pieces := make([]string, len(ports)) - for ix := range ports { - var port int32 - if useNodePort { - port = ports[ix].NodePort - } else { - port = ports[ix].Port - } - pieces[ix] = fmt.Sprintf("%s:%d", strings.ToLower(string(ports[ix].Protocol)), port) - } - return strings.Join(pieces, ",") -} - -func getPorts(spec api.PodSpec) []string { - result := []string{} - for _, container := range spec.Containers { - for _, port := range container.Ports { - result = append(result, strconv.Itoa(int(port.ContainerPort))) - } - } - return result -} - -func getProtocols(spec api.PodSpec) map[string]string { - result := make(map[string]string) - for _, container := range spec.Containers { - for _, port := range container.Ports { - result[strconv.Itoa(int(port.ContainerPort))] = string(port.Protocol) - } - } - return result -} - -// Extracts the ports exposed by a service from the given service spec. -func getServicePorts(spec api.ServiceSpec) []string { - result := []string{} - for _, servicePort := range spec.Ports { - result = append(result, strconv.Itoa(int(servicePort.Port))) - } - return result -} - -// Extracts the protocols exposed by a service from the given service spec. -func getServiceProtocols(spec api.ServiceSpec) map[string]string { - result := make(map[string]string) - for _, servicePort := range spec.Ports { - result[strconv.Itoa(int(servicePort.Port))] = string(servicePort.Protocol) - } - return result -} - -type clientSwaggerSchema struct { - c *client.Client - fedc *restclient.RESTClient - cacheDir string - mapper meta.RESTMapper -} - -const schemaFileName = "schema.json" - -type schemaClient interface { - Get() *restclient.Request -} - -func recursiveSplit(dir string) []string { - parent, file := path.Split(dir) - if len(parent) == 0 { - return []string{file} - } - return append(recursiveSplit(parent[:len(parent)-1]), file) -} - -func substituteUserHome(dir string) (string, error) { - if len(dir) == 0 || dir[0] != '~' { - return dir, nil - } - parts := recursiveSplit(dir) - if len(parts[0]) == 1 { - parts[0] = os.Getenv("HOME") - } else { - usr, err := user.Lookup(parts[0][1:]) - if err != nil { - return "", err - } - parts[0] = usr.HomeDir - } - return path.Join(parts...), nil -} - -func writeSchemaFile(schemaData []byte, cacheDir, cacheFile, prefix, groupVersion string) error { - if err := os.MkdirAll(path.Join(cacheDir, prefix, groupVersion), 0755); err != nil { - return err - } - tmpFile, err := ioutil.TempFile(cacheDir, "schema") - if err != nil { - // If we can't write, keep going. - if os.IsPermission(err) { - return nil - } - return err - } - if _, err := io.Copy(tmpFile, bytes.NewBuffer(schemaData)); err != nil { - return err - } - if err := os.Link(tmpFile.Name(), cacheFile); err != nil { - // If we can't write due to file existing, or permission problems, keep going. - if os.IsExist(err) || os.IsPermission(err) { - return nil - } - return err - } - return nil -} - -func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string, delegate validation.Schema) (err error) { - var schemaData []byte - var firstSeen bool - fullDir, err := substituteUserHome(cacheDir) - if err != nil { - return err - } - cacheFile := path.Join(fullDir, prefix, groupVersion, schemaFileName) - - if len(cacheDir) != 0 { - if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) { - return err - } - } - if schemaData == nil { - firstSeen = true - schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) - if err != nil { - return err - } - } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) - if err != nil { - return err - } - err = schema.ValidateBytes(data) - if _, ok := err.(validation.TypeNotFoundError); ok && !firstSeen { - // As a temporay hack, kubectl would re-get the schema if validation - // fails for type not found reason. - // TODO: runtime-config settings needs to make into the file's name - schemaData, err = downloadSchemaAndStore(c, cacheDir, fullDir, cacheFile, prefix, groupVersion) - if err != nil { - return err - } - schema, err := validation.NewSwaggerSchemaFromBytes(schemaData, delegate) - if err != nil { - return err - } - return schema.ValidateBytes(data) - } - - return err -} - -// Download swagger schema from apiserver and store it to file. -func downloadSchemaAndStore(c schemaClient, cacheDir, fullDir, cacheFile, prefix, groupVersion string) (schemaData []byte, err error) { - schemaData, err = c.Get(). - AbsPath("/swaggerapi", prefix, groupVersion). - Do(). - Raw() - if err != nil { - return - } - if len(cacheDir) != 0 { - if err = writeSchemaFile(schemaData, fullDir, cacheFile, prefix, groupVersion); err != nil { - return - } - } - return -} - -func (c *clientSwaggerSchema) ValidateBytes(data []byte) error { - gvk, err := json.DefaultMetaFactory.Interpret(data) - if err != nil { - return err - } - if ok := registered.IsEnabledVersion(gvk.GroupVersion()); !ok { - return fmt.Errorf("API version %q isn't supported, only supports API versions %q", gvk.GroupVersion().String(), registered.EnabledVersions()) - } - if gvk.Group == autoscaling.GroupName { - if c.c.AutoscalingClient == nil { - return errors.New("unable to validate: no autoscaling client") - } - return getSchemaAndValidate(c.c.AutoscalingClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - if gvk.Group == policy.GroupName { - if c.c.PolicyClient == nil { - return errors.New("unable to validate: no policy client") - } - return getSchemaAndValidate(c.c.PolicyClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - if gvk.Group == apps.GroupName { - if c.c.AppsClient == nil { - return errors.New("unable to validate: no autoscaling client") - } - return getSchemaAndValidate(c.c.AppsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - - if gvk.Group == batch.GroupName { - if c.c.BatchClient == nil { - return errors.New("unable to validate: no batch client") - } - return getSchemaAndValidate(c.c.BatchClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - if gvk.Group == rbac.GroupName { - if c.c.RbacClient == nil { - return errors.New("unable to validate: no rbac client") - } - return getSchemaAndValidate(c.c.RbacClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) { - // Don't attempt to validate third party objects - return nil - } - if gvk.Group == extensions.GroupName { - if c.c.ExtensionsClient == nil { - return errors.New("unable to validate: no experimental client") - } - return getSchemaAndValidate(c.c.ExtensionsClient.RESTClient, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - if gvk.Group == federation.GroupName { - if c.fedc == nil { - return errors.New("unable to validate: no federation client") - } - return getSchemaAndValidate(c.fedc, data, "apis/", gvk.GroupVersion().String(), c.cacheDir, c) - } - return getSchemaAndValidate(c.c.RESTClient, data, "api", gvk.GroupVersion().String(), c.cacheDir, c) -} - -// DefaultClientConfig creates a clientcmd.ClientConfig with the following hierarchy: -// 1. Use the kubeconfig builder. The number of merges and overrides here gets a little crazy. Stay with me. -// 1. Merge together the kubeconfig itself. This is done with the following hierarchy rules: -// 1. CommandLineLocation - this parsed from the command line, so it must be late bound. If you specify this, -// then no other kubeconfig files are merged. This file must exist. -// 2. If $KUBECONFIG is set, then it is treated as a list of files that should be merged. -// 3. HomeDirectoryLocation -// Empty filenames are ignored. Files with non-deserializable content produced errors. -// The first file to set a particular value or map key wins and the value or map key is never changed. -// This means that the first file to set CurrentContext will have its context preserved. It also means -// that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// 2. Determine the context to use based on the first hit in this chain -// 1. command line argument - again, parsed from the command line, so it must be late bound -// 2. CurrentContext from the merged kubeconfig file -// 3. Empty is allowed at this stage -// 3. Determine the cluster info and auth info to use. At this point, we may or may not have a context. They -// are built based on the first hit in this chain. (run it twice, once for auth, once for cluster) -// 1. command line argument -// 2. If context is present, then use the context value -// 3. Empty is allowed -// 4. Determine the actual cluster info to use. At this point, we may or may not have a cluster info. Build -// each piece of the cluster info based on the chain: -// 1. command line argument -// 2. If cluster info is present and a value for the attribute is present, use it. -// 3. If you don't have a server location, bail. -// 5. Auth info is build using the same rules as cluster info, EXCEPT that you can only have one authentication -// technique per auth info. The following conditions result in an error: -// 1. If there are two conflicting techniques specified from the command line, fail. -// 2. If the command line does not specify one, and the auth info has conflicting techniques, fail. -// 3. If the command line specifies one and the auth info specifies another, honor the command line technique. -// 2. Use default values and potentially prompt for auth information -// -// However, if it appears that we're running in a kubernetes cluster -// container environment, then run with the auth info kubernetes mounted for -// us. Specifically: -// The env vars KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT are -// set, and the file /var/run/secrets/kubernetes.io/serviceaccount/token -// exists and is not a directory. -func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") - - overrides := &clientcmd.ConfigOverrides{} - flagNames := clientcmd.RecommendedConfigOverrideFlags("") - // short flagnames are disabled by default. These are here for compatibility with existing scripts - flagNames.ClusterOverrideFlags.APIServer.ShortName = "s" - - clientcmd.BindOverrideFlags(overrides, flags, flagNames) - clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin) - - return clientConfig -} - -// PrintObject prints an api object given command line flags to modify the output format -func (f *Factory) PrintObject(cmd *cobra.Command, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error { - gvks, _, err := api.Scheme.ObjectKinds(obj) - if err != nil { - return err - } - - mapping, err := mapper.RESTMapping(gvks[0].GroupKind()) - if err != nil { - return err - } - - printer, err := f.PrinterForMapping(cmd, mapping, false) - if err != nil { - return err - } - return printer.PrintObj(obj, out) -} - -// PrinterForMapping returns a printer suitable for displaying the provided resource type. -// Requires that printer flags have been added to cmd (see AddPrinterFlags). -func (f *Factory) PrinterForMapping(cmd *cobra.Command, mapping *meta.RESTMapping, withNamespace bool) (kubectl.ResourcePrinter, error) { - printer, ok, err := PrinterForCommand(cmd) - if err != nil { - return nil, err - } - if ok { - clientConfig, err := f.ClientConfig() - if err != nil { - return nil, err - } - - version, err := OutputVersion(cmd, clientConfig.GroupVersion) - if err != nil { - return nil, err - } - if version.IsEmpty() { - version = mapping.GroupVersionKind.GroupVersion() - } - if version.IsEmpty() { - return nil, fmt.Errorf("you must specify an output-version when using this output format") - } - - printer = kubectl.NewVersionedPrinter(printer, mapping.ObjectConvertor, version, mapping.GroupVersionKind.GroupVersion()) - - } else { - // Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper - columnLabel, err := cmd.Flags().GetStringSlice("label-columns") - if err != nil { - columnLabel = []string{} - } - printer, err = f.Printer(mapping, GetFlagBool(cmd, "no-headers"), withNamespace, GetWideFlag(cmd), GetFlagBool(cmd, "show-all"), GetFlagBool(cmd, "show-labels"), isWatch(cmd), columnLabel) - if err != nil { - return nil, err - } - printer = maybeWrapSortingPrinter(cmd, printer) - } - - return printer, nil -} - -// One stop shopping for a Builder -func (f *Factory) NewBuilder(thirdPartyDiscovery bool) *resource.Builder { - mapper, typer := f.Object(thirdPartyDiscovery) - - return resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go deleted file mode 100644 index 218a2f9c9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/helpers.go +++ /dev/null @@ -1,613 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/client/typed/discovery" - "k8s.io/kubernetes/pkg/client/unversioned/clientcmd" - "k8s.io/kubernetes/pkg/kubectl" - "k8s.io/kubernetes/pkg/kubectl/resource" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/strategicpatch" - - "github.com/evanphx/json-patch" - "github.com/golang/glog" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -const ( - ApplyAnnotationsFlag = "save-config" -) - -type debugError interface { - DebugError() (msg string, args []interface{}) -} - -// AddSourceToErr adds handleResourcePrefix and source string to error message. -// verb is the string like "creating", "deleting" etc. -// souce is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. -func AddSourceToErr(verb string, source string, err error) error { - if source != "" { - if statusError, ok := err.(errors.APIStatus); ok { - status := statusError.Status() - status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message) - return &errors.StatusError{ErrStatus: status} - } - return fmt.Errorf("error when %s %q: %v", verb, source, err) - } - return err -} - -var fatalErrHandler = fatal - -// BehaviorOnFatal allows you to override the default behavior when a fatal -// error occurs, which is call os.Exit(1). You can pass 'panic' as a function -// here if you prefer the panic() over os.Exit(1). -func BehaviorOnFatal(f func(string)) { - fatalErrHandler = f -} - -// DefaultBehaviorOnFatal allows you to undo any previous override. Useful in -// tests. -func DefaultBehaviorOnFatal() { - fatalErrHandler = fatal -} - -// fatal prints the message and then exits. If V(2) or greater, glog.Fatal -// is invoked for extended information. -func fatal(msg string) { - // add newline if needed - if !strings.HasSuffix(msg, "\n") { - msg += "\n" - } - - if glog.V(2) { - glog.FatalDepth(2, msg) - } - fmt.Fprint(os.Stderr, msg) - os.Exit(1) -} - -// CheckErr prints a user friendly error to STDERR and exits with a non-zero -// exit code. Unrecognized errors will be printed with an "error: " prefix. -// -// This method is generic to the command in use and may be used by non-Kubectl -// commands. -func CheckErr(err error) { - checkErr(err, fatalErrHandler) -} - -func checkErr(err error, handleErr func(string)) { - if err == nil { - return - } - - if errors.IsInvalid(err) { - details := err.(*errors.StatusError).Status().Details - prefix := fmt.Sprintf("The %s %q is invalid.\n", details.Kind, details.Name) - errs := statusCausesToAggrError(details.Causes) - handleErr(MultilineError(prefix, errs)) - } - - if meta.IsNoResourceMatchError(err) { - noMatch := err.(*meta.NoResourceMatchError) - - switch { - case len(noMatch.PartialResource.Group) > 0 && len(noMatch.PartialResource.Version) > 0: - handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q and version %q", noMatch.PartialResource.Resource, noMatch.PartialResource.Group, noMatch.PartialResource.Version)) - case len(noMatch.PartialResource.Group) > 0: - handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q", noMatch.PartialResource.Resource, noMatch.PartialResource.Group)) - case len(noMatch.PartialResource.Version) > 0: - handleErr(fmt.Sprintf("the server doesn't have a resource type %q in version %q", noMatch.PartialResource.Resource, noMatch.PartialResource.Version)) - default: - handleErr(fmt.Sprintf("the server doesn't have a resource type %q", noMatch.PartialResource.Resource)) - } - return - } - - // handle multiline errors - if clientcmd.IsConfigurationInvalid(err) { - handleErr(MultilineError("Error in configuration: ", err)) - } - if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) > 0 { - handleErr(MultipleErrors("", agg.Errors())) - } - - msg, ok := StandardErrorMessage(err) - if !ok { - msg = err.Error() - if !strings.HasPrefix(msg, "error: ") { - msg = fmt.Sprintf("error: %s", msg) - } - } - handleErr(msg) -} - -func statusCausesToAggrError(scs []unversioned.StatusCause) utilerrors.Aggregate { - errs := make([]error, len(scs)) - for i, sc := range scs { - errs[i] = fmt.Errorf("%s: %s", sc.Field, sc.Message) - } - return utilerrors.NewAggregate(errs) -} - -// StandardErrorMessage translates common errors into a human readable message, or returns -// false if the error is not one of the recognized types. It may also log extended -// information to glog. -// -// This method is generic to the command in use and may be used by non-Kubectl -// commands. -func StandardErrorMessage(err error) (string, bool) { - if debugErr, ok := err.(debugError); ok { - glog.V(4).Infof(debugErr.DebugError()) - } - status, isStatus := err.(errors.APIStatus) - switch { - case isStatus: - switch s := status.Status(); { - case s.Reason == "Unauthorized": - return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true - default: - return fmt.Sprintf("Error from server: %s", err.Error()), true - } - case errors.IsUnexpectedObjectError(err): - return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true - } - switch t := err.(type) { - case *url.Error: - glog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) - switch { - case strings.Contains(t.Err.Error(), "connection refused"): - host := t.URL - if server, err := url.Parse(t.URL); err == nil { - host = server.Host - } - return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true - } - return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true - } - return "", false -} - -// MultilineError returns a string representing an error that splits sub errors into their own -// lines. The returned string will end with a newline. -func MultilineError(prefix string, err error) string { - if agg, ok := err.(utilerrors.Aggregate); ok { - errs := utilerrors.Flatten(agg).Errors() - buf := &bytes.Buffer{} - switch len(errs) { - case 0: - return fmt.Sprintf("%s%v\n", prefix, err) - case 1: - return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0])) - default: - fmt.Fprintln(buf, prefix) - for _, err := range errs { - fmt.Fprintf(buf, "* %v\n", messageForError(err)) - } - return buf.String() - } - } - return fmt.Sprintf("%s%s\n", prefix, err) -} - -// MultipleErrors returns a newline delimited string containing -// the prefix and referenced errors in standard form. -func MultipleErrors(prefix string, errs []error) string { - buf := &bytes.Buffer{} - for _, err := range errs { - fmt.Fprintf(buf, "%s%v\n", prefix, messageForError(err)) - } - return buf.String() -} - -// messageForError returns the string representing the error. -func messageForError(err error) string { - msg, ok := StandardErrorMessage(err) - if !ok { - msg = err.Error() - } - return msg -} - -func UsageError(cmd *cobra.Command, format string, args ...interface{}) error { - msg := fmt.Sprintf(format, args...) - return fmt.Errorf("%s\nSee '%s -h' for help and examples.", msg, cmd.CommandPath()) -} - -// Whether this cmd need watching objects. -func isWatch(cmd *cobra.Command) bool { - if w, err := cmd.Flags().GetBool("watch"); w && err == nil { - return true - } - - if wo, err := cmd.Flags().GetBool("watch-only"); wo && err == nil { - return true - } - - return false -} - -func getFlag(cmd *cobra.Command, flag string) *pflag.Flag { - f := cmd.Flags().Lookup(flag) - if f == nil { - glog.Fatalf("flag accessed but not defined for command %s: %s", cmd.Name(), flag) - } - return f -} - -func GetFlagString(cmd *cobra.Command, flag string) string { - s, err := cmd.Flags().GetString(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return s -} - -// GetFlagStringList can be used to accept multiple argument with flag repetition (e.g. -f arg1 -f arg2 ...) -func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { - s, err := cmd.Flags().GetStringSlice(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return s -} - -// GetWideFlag is used to determine if "-o wide" is used -func GetWideFlag(cmd *cobra.Command) bool { - f := cmd.Flags().Lookup("output") - if f.Value.String() == "wide" { - return true - } - return false -} - -func GetFlagBool(cmd *cobra.Command, flag string) bool { - b, err := cmd.Flags().GetBool(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return b -} - -// Assumes the flag has a default value. -func GetFlagInt(cmd *cobra.Command, flag string) int { - i, err := cmd.Flags().GetInt(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return i -} - -// Assumes the flag has a default value. -func GetFlagInt64(cmd *cobra.Command, flag string) int64 { - i, err := cmd.Flags().GetInt64(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return i -} - -func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { - d, err := cmd.Flags().GetDuration(flag) - if err != nil { - glog.Fatalf("err accessing flag %s for command %s: %v", flag, cmd.Name(), err) - } - return d -} - -func AddValidateFlags(cmd *cobra.Command) { - cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it") - cmd.Flags().String("schema-cache-dir", fmt.Sprintf("~/%s/%s", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName), fmt.Sprintf("If non-empty, load/store cached API schemas in this directory, default is '$HOME/%s/%s'", clientcmd.RecommendedHomeDir, clientcmd.RecommendedSchemaName)) - cmd.MarkFlagFilename("schema-cache-dir") -} - -func AddRecursiveFlag(cmd *cobra.Command, value *bool) { - cmd.Flags().BoolVarP(value, "recursive", "R", *value, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") -} - -// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. -func AddDryRunFlag(cmd *cobra.Command) { - cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") -} - -func AddApplyAnnotationFlags(cmd *cobra.Command) { - cmd.Flags().Bool(ApplyAnnotationsFlag, false, "If true, the configuration of current object will be saved in its annotation. This is useful when you want to perform kubectl apply on this object in the future.") -} - -// AddGeneratorFlags adds flags common to resource generation commands -// TODO: need to take a pass at other generator commands to use this set of flags -func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) { - cmd.Flags().String("generator", defaultGenerator, "The name of the API generator to use.") - AddDryRunFlag(cmd) -} - -func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { - data, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - - if len(data) == 0 { - return nil, fmt.Errorf(`Read from %s but no data found`, source) - } - - return data, nil -} - -// Merge requires JSON serialization -// TODO: merge assumes JSON serialization, and does not properly abstract API retrieval -func Merge(codec runtime.Codec, dst runtime.Object, fragment, kind string) (runtime.Object, error) { - // encode dst into versioned json and apply fragment directly too it - target, err := runtime.Encode(codec, dst) - if err != nil { - return nil, err - } - patched, err := jsonpatch.MergePatch(target, []byte(fragment)) - if err != nil { - return nil, err - } - out, err := runtime.Decode(codec, patched) - if err != nil { - return nil, err - } - return out, nil -} - -// DumpReaderToFile writes all data from the given io.Reader to the specified file -// (usually for temporary use). -func DumpReaderToFile(reader io.Reader, filename string) error { - f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - defer f.Close() - if err != nil { - return err - } - buffer := make([]byte, 1024) - for { - count, err := reader.Read(buffer) - if err == io.EOF { - break - } - if err != nil { - return err - } - _, err = f.Write(buffer[:count]) - if err != nil { - return err - } - } - return nil -} - -// UpdateObject updates resource object with updateFn -func UpdateObject(info *resource.Info, codec runtime.Codec, updateFn func(runtime.Object) error) (runtime.Object, error) { - helper := resource.NewHelper(info.Client, info.Mapping) - - if err := updateFn(info.Object); err != nil { - return nil, err - } - - // Update the annotation used by kubectl apply - if err := kubectl.UpdateApplyAnnotation(info, codec); err != nil { - return nil, err - } - - if _, err := helper.Replace(info.Namespace, info.Name, true, info.Object); err != nil { - return nil, err - } - - return info.Object, nil -} - -// AddCmdRecordFlag adds --record flag to command -func AddRecordFlag(cmd *cobra.Command) { - cmd.Flags().Bool("record", false, "Record current kubectl command in the resource annotation.") -} - -func GetRecordFlag(cmd *cobra.Command) bool { - return GetFlagBool(cmd, "record") -} - -func GetDryRunFlag(cmd *cobra.Command) bool { - return GetFlagBool(cmd, "dry-run") -} - -// RecordChangeCause annotate change-cause to input runtime object. -func RecordChangeCause(obj runtime.Object, changeCause string) error { - accessor, err := meta.Accessor(obj) - if err != nil { - return err - } - annotations := accessor.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations[kubectl.ChangeCauseAnnotation] = changeCause - accessor.SetAnnotations(annotations) - return nil -} - -// ChangeResourcePatch creates a strategic merge patch between the origin input resource info -// and the annotated with change-cause input resource info. -func ChangeResourcePatch(info *resource.Info, changeCause string) ([]byte, error) { - oldData, err := json.Marshal(info.Object) - if err != nil { - return nil, err - } - if err := RecordChangeCause(info.Object, changeCause); err != nil { - return nil, err - } - newData, err := json.Marshal(info.Object) - if err != nil { - return nil, err - } - return strategicpatch.CreateTwoWayMergePatch(oldData, newData, info.Object) -} - -// containsChangeCause checks if input resource info contains change-cause annotation. -func ContainsChangeCause(info *resource.Info) bool { - annotations, err := info.Mapping.MetadataAccessor.Annotations(info.Object) - if err != nil { - return false - } - return len(annotations[kubectl.ChangeCauseAnnotation]) > 0 -} - -// ShouldRecord checks if we should record current change cause -func ShouldRecord(cmd *cobra.Command, info *resource.Info) bool { - return GetRecordFlag(cmd) || ContainsChangeCause(info) -} - -// GetThirdPartyGroupVersions returns the thirdparty "group/versions"s and -// resources supported by the server. A user may delete a thirdparty resource -// when this function is running, so this function may return a "NotFound" error -// due to the race. -func GetThirdPartyGroupVersions(discovery discovery.DiscoveryInterface) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { - result := []unversioned.GroupVersion{} - gvks := []unversioned.GroupVersionKind{} - - groupList, err := discovery.ServerGroups() - if err != nil { - // On forbidden or not found, just return empty lists. - if errors.IsForbidden(err) || errors.IsNotFound(err) { - return result, gvks, nil - } - - return nil, nil, err - } - - for ix := range groupList.Groups { - group := &groupList.Groups[ix] - for jx := range group.Versions { - gv, err2 := unversioned.ParseGroupVersion(group.Versions[jx].GroupVersion) - if err2 != nil { - return nil, nil, err - } - // Skip GroupVersionKinds that have been statically registered. - if registered.IsRegisteredVersion(gv) { - continue - } - result = append(result, gv) - - resourceList, err := discovery.ServerResourcesForGroupVersion(group.Versions[jx].GroupVersion) - if err != nil { - return nil, nil, err - } - for kx := range resourceList.APIResources { - gvks = append(gvks, gv.WithKind(resourceList.APIResources[kx].Kind)) - } - } - } - return result, gvks, nil -} - -func GetIncludeThirdPartyAPIs(cmd *cobra.Command) bool { - if cmd.Flags().Lookup("include-extended-apis") == nil { - return false - } - return GetFlagBool(cmd, "include-extended-apis") -} - -func AddInclude3rdPartyFlags(cmd *cobra.Command) { - cmd.Flags().Bool("include-extended-apis", true, "If true, include definitions of new APIs via calls to the API server. [default true]") -} - -// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args -func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) { - foundPair := false - for _, s := range args { - nonResource := strings.Contains(s, "=") || strings.HasSuffix(s, "-") - switch { - case !foundPair && nonResource: - foundPair = true - fallthrough - case foundPair && nonResource: - pairArgs = append(pairArgs, s) - case !foundPair && !nonResource: - resources = append(resources, s) - case foundPair && !nonResource: - err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s) - return - } - } - return -} - -// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args -func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) { - newPairs = map[string]string{} - if supportRemove { - removePairs = []string{} - } - var invalidBuf bytes.Buffer - - for _, pairArg := range pairArgs { - if strings.Index(pairArg, "=") != -1 { - parts := strings.SplitN(pairArg, "=", 2) - if len(parts) != 2 || len(parts[1]) == 0 { - if invalidBuf.Len() > 0 { - invalidBuf.WriteString(", ") - } - invalidBuf.WriteString(fmt.Sprintf(pairArg)) - } else { - newPairs[parts[0]] = parts[1] - } - } else if supportRemove && strings.HasSuffix(pairArg, "-") { - removePairs = append(removePairs, pairArg[:len(pairArg)-1]) - } else { - if invalidBuf.Len() > 0 { - invalidBuf.WriteString(", ") - } - invalidBuf.WriteString(fmt.Sprintf(pairArg)) - } - } - if invalidBuf.Len() > 0 { - err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String()) - return - } - - return -} - -// MaybeConvertObject attempts to convert an object to a specific group/version. If the object is -// a third party resource it is simply passed through. -func MaybeConvertObject(obj runtime.Object, gv unversioned.GroupVersion, converter runtime.ObjectConvertor) (runtime.Object, error) { - switch obj.(type) { - case *extensions.ThirdPartyResourceData: - // conversion is not supported for 3rd party objects - return obj, nil - default: - return converter.ConvertToVersion(obj, gv) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go b/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go deleted file mode 100644 index 979ce7c31..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/printing.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "io" - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/kubectl" - - "github.com/spf13/cobra" -) - -// AddPrinterFlags adds printing related flags to a command (e.g. output format, no headers, template path) -func AddPrinterFlags(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.3/docs/user-guide/jsonpath.md].") - cmd.Flags().String("output-version", "", "Output the formatted object with the given group version (for ex: 'extensions/v1beta1').") - cmd.Flags().Bool("no-headers", false, "When using the default output, don't print headers.") - cmd.Flags().Bool("show-labels", false, "When printing, show all labels as the last column (default hide labels column)") - cmd.Flags().String("template", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") - cmd.MarkFlagFilename("template") - cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. '{.metadata.name}'). The field in the API resource specified by this JSONPath expression must be an integer or a string.") - cmd.Flags().BoolP("show-all", "a", false, "When printing, show all resources (default hide terminated pods.)") -} - -// AddOutputFlagsForMutation adds output related flags to a command. Used by mutations only. -func AddOutputFlagsForMutation(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "", "Output mode. Use \"-o name\" for shorter output (resource/name).") -} - -// PrintSuccess prints message after finishing mutating operations -func PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource string, name string, operation string) { - resource, _ = mapper.ResourceSingularizer(resource) - if shortOutput { - // -o name: prints resource/name - if len(resource) > 0 { - fmt.Fprintf(out, "%s/%s\n", resource, name) - } else { - fmt.Fprintf(out, "%s\n", name) - } - } else { - // understandable output by default - if len(resource) > 0 { - fmt.Fprintf(out, "%s \"%s\" %s\n", resource, name, operation) - } else { - fmt.Fprintf(out, "\"%s\" %s\n", name, operation) - } - } -} - -// ValidateOutputArgs validates -o flag args for mutations -func ValidateOutputArgs(cmd *cobra.Command) error { - outputMode := GetFlagString(cmd, "output") - if outputMode != "" && outputMode != "name" { - return UsageError(cmd, "Unexpected -o output mode: %v. We only support '-o name'.", outputMode) - } - return nil -} - -// OutputVersion returns the preferred output version for generic content (JSON, YAML, or templates) -// defaultVersion is never mutated. Nil simply allows clean passing in common usage from client.Config -func OutputVersion(cmd *cobra.Command, defaultVersion *unversioned.GroupVersion) (unversioned.GroupVersion, error) { - outputVersionString := GetFlagString(cmd, "output-version") - if len(outputVersionString) == 0 { - if defaultVersion == nil { - return unversioned.GroupVersion{}, nil - } - - return *defaultVersion, nil - } - - return unversioned.ParseGroupVersion(outputVersionString) -} - -// PrinterForCommand returns the default printer for this command. -// Requires that printer flags have been added to cmd (see AddPrinterFlags). -func PrinterForCommand(cmd *cobra.Command) (kubectl.ResourcePrinter, bool, error) { - outputFormat := GetFlagString(cmd, "output") - - // templates are logically optional for specifying a format. - // TODO once https://github.com/kubernetes/kubernetes/issues/12668 is fixed, this should fall back to GetFlagString - templateFile, _ := cmd.Flags().GetString("template") - if len(outputFormat) == 0 && len(templateFile) != 0 { - outputFormat = "template" - } - - templateFormat := []string{ - "go-template=", "go-template-file=", "jsonpath=", "jsonpath-file=", "custom-columns=", "custom-columns-file=", - } - for _, format := range templateFormat { - if strings.HasPrefix(outputFormat, format) { - templateFile = outputFormat[len(format):] - outputFormat = format[:len(format)-1] - } - } - - printer, generic, err := kubectl.GetPrinter(outputFormat, templateFile) - if err != nil { - return nil, generic, err - } - - return maybeWrapSortingPrinter(cmd, printer), generic, nil -} - -func maybeWrapSortingPrinter(cmd *cobra.Command, printer kubectl.ResourcePrinter) kubectl.ResourcePrinter { - sorting, err := cmd.Flags().GetString("sort-by") - if err != nil { - // error can happen on missing flag or bad flag type. In either case, this command didn't intent to sort - return printer - } - - if len(sorting) != 0 { - return &kubectl.SortingPrinter{ - Delegate: printer, - SortField: fmt.Sprintf("{%s}", sorting), - } - } - return printer -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go b/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go deleted file mode 100644 index 04ed4aa6a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/configmap.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" -) - -// ConfigMapGeneratorV1 supports stable generation of a configMap. -type ConfigMapGeneratorV1 struct { - // Name of configMap (required) - Name string - // Type of configMap (optional) - Type string - // FileSources to derive the configMap from (optional) - FileSources []string - // LiteralSources to derive the configMap from (optional) - LiteralSources []string -} - -// Ensure it supports the generator pattern that uses parameter injection. -var _ Generator = &ConfigMapGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction. -var _ StructuredGenerator = &ConfigMapGeneratorV1{} - -// Generate returns a configMap using the specified parameters. -func (s ConfigMapGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &ConfigMapGeneratorV1{} - fromFileStrings, found := genericParams["from-file"] - if found { - fromFileArray, isArray := fromFileStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.FileSources = fromFileArray - delete(genericParams, "from-file") - } - fromLiteralStrings, found := genericParams["from-literal"] - if found { - fromLiteralArray, isArray := fromLiteralStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.LiteralSources = fromLiteralArray - delete(genericParams, "from-literal") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Type = params["type"] - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern. -func (s ConfigMapGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"force", false}, - } -} - -// StructuredGenerate outputs a configMap object using the configured fields. -func (s ConfigMapGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - configMap := &api.ConfigMap{} - configMap.Name = s.Name - configMap.Data = map[string]string{} - if len(s.FileSources) > 0 { - if err := handleConfigMapFromFileSources(configMap, s.FileSources); err != nil { - return nil, err - } - } - if len(s.LiteralSources) > 0 { - if err := handleConfigMapFromLiteralSources(configMap, s.LiteralSources); err != nil { - return nil, err - } - } - return configMap, nil -} - -// validate validates required fields are set to support structured generation. -func (s ConfigMapGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} - -// handleConfigMapFromLiteralSources adds the specified literal source -// information into the provided configMap. -func handleConfigMapFromLiteralSources(configMap *api.ConfigMap, literalSources []string) error { - for _, literalSource := range literalSources { - keyName, value, err := parseLiteralSource(literalSource) - if err != nil { - return err - } - err = addKeyFromLiteralToConfigMap(configMap, keyName, value) - if err != nil { - return err - } - } - return nil -} - -// handleConfigMapFromFileSources adds the specified file source information -// into the provided configMap -func handleConfigMapFromFileSources(configMap *api.ConfigMap, fileSources []string) error { - for _, fileSource := range fileSources { - keyName, filePath, err := parseFileSource(fileSource) - if err != nil { - return err - } - info, err := os.Stat(filePath) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", filePath, err.Err) - default: - return fmt.Errorf("error reading %s: %v", filePath, err) - } - } - if info.IsDir() { - if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") - } - fileList, err := ioutil.ReadDir(filePath) - if err != nil { - return fmt.Errorf("error listing files in %s: %v", filePath, err) - } - for _, item := range fileList { - itemPath := path.Join(filePath, item.Name()) - if item.Mode().IsRegular() { - keyName = item.Name() - err = addKeyFromFileToConfigMap(configMap, keyName, itemPath) - if err != nil { - return err - } - } - } - } else { - err = addKeyFromFileToConfigMap(configMap, keyName, filePath) - if err != nil { - return err - } - } - } - - return nil -} - -// addKeyFromFileToConfigMap adds a key with the given name to a ConfigMap, populating -// the value with the content of the given file path, or returns an error. -func addKeyFromFileToConfigMap(configMap *api.ConfigMap, keyName, filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - return addKeyFromLiteralToConfigMap(configMap, keyName, string(data)) -} - -// addKeyFromLiteralToConfigMap adds the given key and data to the given config map, -// returning an error if the key is not valid or if the key already exists. -func addKeyFromLiteralToConfigMap(configMap *api.ConfigMap, keyName, data string) error { - // Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys - // to be consistent; validation.IsSecretKey is used here intentionally. - if !validation.IsSecretKey(keyName) { - return fmt.Errorf("%v is not a valid key name for a configMap", keyName) - } - if _, entryExists := configMap.Data[keyName]; entryExists { - return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, configMap.Data) - } - configMap.Data[keyName] = data - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go deleted file mode 100644 index 255ad1de8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/custom_column_printer.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bufio" - "bytes" - "fmt" - "io" - "reflect" - "regexp" - "strings" - "text/tabwriter" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/jsonpath" -) - -const ( - columnwidth = 10 - tabwidth = 4 - padding = 3 - padding_character = ' ' - flags = 0 -) - -var jsonRegexp = regexp.MustCompile("^\\{\\.?([^{}]+)\\}$|^\\.?([^{}]+)$") - -// MassageJSONPath attempts to be flexible with JSONPath expressions, it accepts: -// * metadata.name (no leading '.' or curly brances '{...}' -// * {metadata.name} (no leading '.') -// * .metadata.name (no curly braces '{...}') -// * {.metadata.name} (complete expression) -// And transforms them all into a valid jsonpat expression: -// {.metadata.name} -func massageJSONPath(pathExpression string) (string, error) { - if len(pathExpression) == 0 { - return pathExpression, nil - } - submatches := jsonRegexp.FindStringSubmatch(pathExpression) - if submatches == nil { - return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") - } - if len(submatches) != 3 { - return "", fmt.Errorf("unexpected submatch list: %v", submatches) - } - var fieldSpec string - if len(submatches[1]) != 0 { - fieldSpec = submatches[1] - } else { - fieldSpec = submatches[2] - } - return fmt.Sprintf("{.%s}", fieldSpec), nil -} - -// NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of <header>:<jsonpath-field-spec> pairs. -// e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: -// -// NAME API_VERSION -// foo bar -func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { - if len(spec) == 0 { - return nil, fmt.Errorf("custom-columns format specified but no custom columns given") - } - parts := strings.Split(spec, ",") - columns := make([]Column, len(parts)) - for ix := range parts { - colSpec := strings.Split(parts[ix], ":") - if len(colSpec) != 2 { - return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected <header>:<json-path-expr>", parts[ix]) - } - spec, err := massageJSONPath(colSpec[1]) - if err != nil { - return nil, err - } - columns[ix] = Column{Header: colSpec[0], FieldSpec: spec} - } - return &CustomColumnsPrinter{Columns: columns, Decoder: decoder}, nil -} - -func splitOnWhitespace(line string) []string { - lineScanner := bufio.NewScanner(bytes.NewBufferString(line)) - lineScanner.Split(bufio.ScanWords) - result := []string{} - for lineScanner.Scan() { - result = append(result, lineScanner.Text()) - } - return result -} - -// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected -// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec -// For example the template below: -// NAME API_VERSION -// {metadata.name} {apiVersion} -func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { - scanner := bufio.NewScanner(templateReader) - if !scanner.Scan() { - return nil, fmt.Errorf("invalid template, missing header line. Expected format is one line of space separated headers, one line of space separated column specs.") - } - headers := splitOnWhitespace(scanner.Text()) - - if !scanner.Scan() { - return nil, fmt.Errorf("invalid template, missing spec line. Expected format is one line of space separated headers, one line of space separated column specs.") - } - specs := splitOnWhitespace(scanner.Text()) - - if len(headers) != len(specs) { - return nil, fmt.Errorf("number of headers (%d) and field specifications (%d) don't match", len(headers), len(specs)) - } - - columns := make([]Column, len(headers)) - for ix := range headers { - spec, err := massageJSONPath(specs[ix]) - if err != nil { - return nil, err - } - columns[ix] = Column{ - Header: headers[ix], - FieldSpec: spec, - } - } - return &CustomColumnsPrinter{Columns: columns, Decoder: decoder}, nil -} - -// Column represents a user specified column -type Column struct { - // The header to print above the column, general style is ALL_CAPS - Header string - // The pointer to the field in the object to print in JSONPath form - // e.g. {.ObjectMeta.Name}, see pkg/util/jsonpath for more details. - FieldSpec string -} - -// CustomColumnPrinter is a printer that knows how to print arbitrary columns -// of data from templates specified in the `Columns` array -type CustomColumnsPrinter struct { - Columns []Column - Decoder runtime.Decoder -} - -func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error { - w := tabwriter.NewWriter(out, columnwidth, tabwidth, padding, padding_character, flags) - headers := make([]string, len(s.Columns)) - for ix := range s.Columns { - headers[ix] = s.Columns[ix].Header - } - fmt.Fprintln(w, strings.Join(headers, "\t")) - parsers := make([]*jsonpath.JSONPath, len(s.Columns)) - for ix := range s.Columns { - parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)) - if err := parsers[ix].Parse(s.Columns[ix].FieldSpec); err != nil { - return err - } - } - - if meta.IsListType(obj) { - objs, err := meta.ExtractList(obj) - if err != nil { - return err - } - for ix := range objs { - if err := s.printOneObject(objs[ix], parsers, w); err != nil { - return err - } - } - } else { - if err := s.printOneObject(obj, parsers, w); err != nil { - return err - } - } - return w.Flush() -} - -func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jsonpath.JSONPath, out io.Writer) error { - columns := make([]string, len(parsers)) - switch u := obj.(type) { - case *runtime.Unknown: - if len(u.Raw) > 0 { - var err error - if obj, err = runtime.Decode(s.Decoder, u.Raw); err != nil { - return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.Raw) - } - } - } - for ix := range parsers { - parser := parsers[ix] - values, err := parser.FindResults(reflect.ValueOf(obj).Elem().Interface()) - if err != nil { - return err - } - if len(values) == 0 || len(values[0]) == 0 { - fmt.Fprintf(out, "<none>\t") - } - valueStrings := []string{} - for arrIx := range values { - for valIx := range values[arrIx] { - valueStrings = append(valueStrings, fmt.Sprintf("%v", values[arrIx][valIx].Interface())) - } - } - columns[ix] = strings.Join(valueStrings, ",") - } - fmt.Fprintln(out, strings.Join(columns, "\t")) - return nil -} - -func (s *CustomColumnsPrinter) HandledResources() []string { - return []string{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go b/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go deleted file mode 100644 index faa0ef98d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/describe.go +++ /dev/null @@ -1,2360 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net" - "net/url" - "reflect" - "sort" - "strings" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/federation/apis/federation" - fed_clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_internalclientset" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - client "k8s.io/kubernetes/pkg/client/unversioned" - adapter "k8s.io/kubernetes/pkg/client/unversioned/adapters/internalclientset" - "k8s.io/kubernetes/pkg/fieldpath" - "k8s.io/kubernetes/pkg/fields" - qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/types" - deploymentutil "k8s.io/kubernetes/pkg/util/deployment" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/sets" -) - -// Describer generates output for the named resource or an error -// if the output could not be generated. Implementers typically -// abstract the retrieval of the named object from a remote server. -type Describer interface { - Describe(namespace, name string, describerSettings DescriberSettings) (output string, err error) -} - -// DescriberSettings holds display configuration for each object -// describer to control what is printed. -type DescriberSettings struct { - ShowEvents bool -} - -// ObjectDescriber is an interface for displaying arbitrary objects with extra -// information. Use when an object is in hand (on disk, or already retrieved). -// Implementers may ignore the additional information passed on extra, or use it -// by default. ObjectDescribers may return ErrNoDescriber if no suitable describer -// is found. -type ObjectDescriber interface { - DescribeObject(object interface{}, extra ...interface{}) (output string, err error) -} - -// ErrNoDescriber is a structured error indicating the provided object or objects -// cannot be described. -type ErrNoDescriber struct { - Types []string -} - -// Error implements the error interface. -func (e ErrNoDescriber) Error() string { - return fmt.Sprintf("no describer has been defined for %v", e.Types) -} - -func describerMap(c *client.Client) map[unversioned.GroupKind]Describer { - m := map[unversioned.GroupKind]Describer{ - api.Kind("Pod"): &PodDescriber{c}, - api.Kind("ReplicationController"): &ReplicationControllerDescriber{c}, - api.Kind("Secret"): &SecretDescriber{c}, - api.Kind("Service"): &ServiceDescriber{c}, - api.Kind("ServiceAccount"): &ServiceAccountDescriber{c}, - api.Kind("Node"): &NodeDescriber{c}, - api.Kind("LimitRange"): &LimitRangeDescriber{c}, - api.Kind("ResourceQuota"): &ResourceQuotaDescriber{c}, - api.Kind("PersistentVolume"): &PersistentVolumeDescriber{c}, - api.Kind("PersistentVolumeClaim"): &PersistentVolumeClaimDescriber{c}, - api.Kind("Namespace"): &NamespaceDescriber{c}, - api.Kind("Endpoints"): &EndpointsDescriber{c}, - api.Kind("ConfigMap"): &ConfigMapDescriber{c}, - - extensions.Kind("ReplicaSet"): &ReplicaSetDescriber{c}, - extensions.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, - extensions.Kind("NetworkPolicy"): &NetworkPolicyDescriber{c}, - autoscaling.Kind("HorizontalPodAutoscaler"): &HorizontalPodAutoscalerDescriber{c}, - extensions.Kind("DaemonSet"): &DaemonSetDescriber{c}, - extensions.Kind("Deployment"): &DeploymentDescriber{adapter.FromUnversionedClient(c)}, - extensions.Kind("Job"): &JobDescriber{c}, - batch.Kind("Job"): &JobDescriber{c}, - apps.Kind("PetSet"): &PetSetDescriber{c}, - extensions.Kind("Ingress"): &IngressDescriber{c}, - } - - return m -} - -// List of all resource types we can describe -func DescribableResources() []string { - keys := make([]string, 0) - - for k := range describerMap(nil) { - resource := strings.ToLower(k.Kind) - keys = append(keys, resource) - } - return keys -} - -// Describer returns the default describe functions for each of the standard -// Kubernetes types. -func DescriberFor(kind unversioned.GroupKind, c *client.Client) (Describer, bool) { - f, ok := describerMap(c)[kind] - return f, ok -} - -// DefaultObjectDescriber can describe the default Kubernetes objects. -var DefaultObjectDescriber ObjectDescriber - -func init() { - d := &Describers{} - err := d.Add( - describeLimitRange, - describeQuota, - describePod, - describeService, - describeReplicationController, - describeDaemonSet, - describeNode, - describeNamespace, - ) - if err != nil { - glog.Fatalf("Cannot register describers: %v", err) - } - DefaultObjectDescriber = d -} - -// NamespaceDescriber generates information about a namespace -type NamespaceDescriber struct { - client.Interface -} - -func (d *NamespaceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ns, err := d.Namespaces().Get(name) - if err != nil { - return "", err - } - resourceQuotaList, err := d.ResourceQuotas(name).List(api.ListOptions{}) - if err != nil { - return "", err - } - limitRangeList, err := d.LimitRanges(name).List(api.ListOptions{}) - if err != nil { - return "", err - } - - return describeNamespace(ns, resourceQuotaList, limitRangeList) -} - -func describeNamespace(namespace *api.Namespace, resourceQuotaList *api.ResourceQuotaList, limitRangeList *api.LimitRangeList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", namespace.Name) - printLabelsMultiline(out, "Labels", namespace.Labels) - fmt.Fprintf(out, "Status:\t%s\n", string(namespace.Status.Phase)) - if resourceQuotaList != nil { - fmt.Fprintf(out, "\n") - DescribeResourceQuotas(resourceQuotaList, out) - } - if limitRangeList != nil { - fmt.Fprintf(out, "\n") - DescribeLimitRanges(limitRangeList, out) - } - return nil - }) -} - -// DescribeLimitRanges merges a set of limit range items into a single tabular description -func DescribeLimitRanges(limitRanges *api.LimitRangeList, w io.Writer) { - if len(limitRanges.Items) == 0 { - fmt.Fprint(w, "No resource limits.\n") - return - } - fmt.Fprintf(w, "Resource Limits\n Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") - fmt.Fprintf(w, " ----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") - for _, limitRange := range limitRanges.Items { - for i := range limitRange.Spec.Limits { - item := limitRange.Spec.Limits[i] - maxResources := item.Max - minResources := item.Min - defaultLimitResources := item.Default - defaultRequestResources := item.DefaultRequest - ratio := item.MaxLimitRequestRatio - - set := map[api.ResourceName]bool{} - for k := range maxResources { - set[k] = true - } - for k := range minResources { - set[k] = true - } - for k := range defaultLimitResources { - set[k] = true - } - for k := range defaultRequestResources { - set[k] = true - } - for k := range ratio { - set[k] = true - } - - for k := range set { - // if no value is set, we output - - maxValue := "-" - minValue := "-" - defaultLimitValue := "-" - defaultRequestValue := "-" - ratioValue := "-" - - maxQuantity, maxQuantityFound := maxResources[k] - if maxQuantityFound { - maxValue = maxQuantity.String() - } - - minQuantity, minQuantityFound := minResources[k] - if minQuantityFound { - minValue = minQuantity.String() - } - - defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] - if defaultLimitQuantityFound { - defaultLimitValue = defaultLimitQuantity.String() - } - - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] - if defaultRequestQuantityFound { - defaultRequestValue = defaultRequestQuantity.String() - } - - ratioQuantity, ratioQuantityFound := ratio[k] - if ratioQuantityFound { - ratioValue = ratioQuantity.String() - } - - msg := " %s\t%v\t%v\t%v\t%v\t%v\t%v\n" - fmt.Fprintf(w, msg, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) - } - } - } -} - -// DescribeResourceQuotas merges a set of quota items into a single tabular description of all quotas -func DescribeResourceQuotas(quotas *api.ResourceQuotaList, w io.Writer) { - if len(quotas.Items) == 0 { - fmt.Fprint(w, "No resource quota.\n") - return - } - sort.Sort(SortableResourceQuotas(quotas.Items)) - - fmt.Fprint(w, "Resource Quotas") - for _, q := range quotas.Items { - fmt.Fprintf(w, "\n Name:\t%s\n", q.Name) - if len(q.Spec.Scopes) > 0 { - scopes := make([]string, 0, len(q.Spec.Scopes)) - for _, scope := range q.Spec.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - fmt.Fprintf(w, " Scopes:\t%s\n", strings.Join(scopes, ", ")) - for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) - if len(helpText) > 0 { - fmt.Fprintf(w, " * %s\n", helpText) - } - } - } - - fmt.Fprintf(w, " Resource\tUsed\tHard\n") - fmt.Fprint(w, " --------\t---\t---\n") - - resources := make([]api.ResourceName, 0, len(q.Status.Hard)) - for resource := range q.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - - for _, resource := range resources { - hardQuantity := q.Status.Hard[resource] - usedQuantity := q.Status.Used[resource] - fmt.Fprintf(w, " %s\t%s\t%s\n", string(resource), usedQuantity.String(), hardQuantity.String()) - } - } -} - -// LimitRangeDescriber generates information about a limit range -type LimitRangeDescriber struct { - client.Interface -} - -func (d *LimitRangeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - lr := d.LimitRanges(namespace) - - limitRange, err := lr.Get(name) - if err != nil { - return "", err - } - return describeLimitRange(limitRange) -} - -func describeLimitRange(limitRange *api.LimitRange) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", limitRange.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", limitRange.Namespace) - fmt.Fprintf(out, "Type\tResource\tMin\tMax\tDefault Request\tDefault Limit\tMax Limit/Request Ratio\n") - fmt.Fprintf(out, "----\t--------\t---\t---\t---------------\t-------------\t-----------------------\n") - for i := range limitRange.Spec.Limits { - item := limitRange.Spec.Limits[i] - maxResources := item.Max - minResources := item.Min - defaultLimitResources := item.Default - defaultRequestResources := item.DefaultRequest - ratio := item.MaxLimitRequestRatio - - set := map[api.ResourceName]bool{} - for k := range maxResources { - set[k] = true - } - for k := range minResources { - set[k] = true - } - for k := range defaultLimitResources { - set[k] = true - } - for k := range defaultRequestResources { - set[k] = true - } - for k := range ratio { - set[k] = true - } - - for k := range set { - // if no value is set, we output - - maxValue := "-" - minValue := "-" - defaultLimitValue := "-" - defaultRequestValue := "-" - ratioValue := "-" - - maxQuantity, maxQuantityFound := maxResources[k] - if maxQuantityFound { - maxValue = maxQuantity.String() - } - - minQuantity, minQuantityFound := minResources[k] - if minQuantityFound { - minValue = minQuantity.String() - } - - defaultLimitQuantity, defaultLimitQuantityFound := defaultLimitResources[k] - if defaultLimitQuantityFound { - defaultLimitValue = defaultLimitQuantity.String() - } - - defaultRequestQuantity, defaultRequestQuantityFound := defaultRequestResources[k] - if defaultRequestQuantityFound { - defaultRequestValue = defaultRequestQuantity.String() - } - - ratioQuantity, ratioQuantityFound := ratio[k] - if ratioQuantityFound { - ratioValue = ratioQuantity.String() - } - - msg := "%v\t%v\t%v\t%v\t%v\t%v\t%v\n" - fmt.Fprintf(out, msg, item.Type, k, minValue, maxValue, defaultRequestValue, defaultLimitValue, ratioValue) - } - } - return nil - }) -} - -// ResourceQuotaDescriber generates information about a resource quota -type ResourceQuotaDescriber struct { - client.Interface -} - -func (d *ResourceQuotaDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - rq := d.ResourceQuotas(namespace) - - resourceQuota, err := rq.Get(name) - if err != nil { - return "", err - } - - return describeQuota(resourceQuota) -} - -func helpTextForResourceQuotaScope(scope api.ResourceQuotaScope) string { - switch scope { - case api.ResourceQuotaScopeTerminating: - return "Matches all pods that have an active deadline." - case api.ResourceQuotaScopeNotTerminating: - return "Matches all pods that do not have an active deadline." - case api.ResourceQuotaScopeBestEffort: - return "Matches all pods that have best effort quality of service." - case api.ResourceQuotaScopeNotBestEffort: - return "Matches all pods that do not have best effort quality of service." - default: - return "" - } -} -func describeQuota(resourceQuota *api.ResourceQuota) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", resourceQuota.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", resourceQuota.Namespace) - if len(resourceQuota.Spec.Scopes) > 0 { - scopes := make([]string, 0, len(resourceQuota.Spec.Scopes)) - for _, scope := range resourceQuota.Spec.Scopes { - scopes = append(scopes, string(scope)) - } - sort.Strings(scopes) - fmt.Fprintf(out, "Scopes:\t%s\n", strings.Join(scopes, ", ")) - for _, scope := range scopes { - helpText := helpTextForResourceQuotaScope(api.ResourceQuotaScope(scope)) - if len(helpText) > 0 { - fmt.Fprintf(out, " * %s\n", helpText) - } - } - } - fmt.Fprintf(out, "Resource\tUsed\tHard\n") - fmt.Fprintf(out, "--------\t----\t----\n") - - resources := make([]api.ResourceName, 0, len(resourceQuota.Status.Hard)) - for resource := range resourceQuota.Status.Hard { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - - msg := "%v\t%v\t%v\n" - for i := range resources { - resource := resources[i] - hardQuantity := resourceQuota.Status.Hard[resource] - usedQuantity := resourceQuota.Status.Used[resource] - fmt.Fprintf(out, msg, resource, usedQuantity.String(), hardQuantity.String()) - } - return nil - }) -} - -// PodDescriber generates information about a pod and the replication controllers that -// create it. -type PodDescriber struct { - client.Interface -} - -func (d *PodDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - pod, err := d.Pods(namespace).Get(name) - if err != nil { - if describerSettings.ShowEvents { - eventsInterface := d.Events(namespace) - selector := eventsInterface.GetFieldSelector(&name, &namespace, nil, nil) - options := api.ListOptions{FieldSelector: selector} - events, err2 := eventsInterface.List(options) - if describerSettings.ShowEvents && err2 == nil && len(events.Items) > 0 { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err) - DescribeEvents(events, out) - return nil - }) - } - } - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - if ref, err := api.GetReference(pod); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", pod, err) - } else { - ref.Kind = "" - events, _ = d.Events(namespace).Search(ref) - } - } - - return describePod(pod, events) -} - -func describePod(pod *api.Pod, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pod.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", pod.Namespace) - fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) - if pod.Status.StartTime != nil { - fmt.Fprintf(out, "Start Time:\t%s\n", pod.Status.StartTime.Time.Format(time.RFC1123Z)) - } - printLabelsMultiline(out, "Labels", pod.Labels) - if pod.DeletionTimestamp != nil { - fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Termination Grace Period:\t%ds\n", *pod.DeletionGracePeriodSeconds) - } else { - fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase)) - } - if len(pod.Status.Reason) > 0 { - fmt.Fprintf(out, "Reason:\t%s\n", pod.Status.Reason) - } - if len(pod.Status.Message) > 0 { - fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message) - } - fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP) - fmt.Fprintf(out, "Controllers:\t%s\n", printControllers(pod.Annotations)) - if len(pod.Spec.InitContainers) > 0 { - describeContainers("Init Containers", pod.Spec.InitContainers, pod.Status.InitContainerStatuses, EnvValueRetriever(pod), out, "") - } - describeContainers("Containers", pod.Spec.Containers, pod.Status.ContainerStatuses, EnvValueRetriever(pod), out, "") - if len(pod.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\n") - for _, c := range pod.Status.Conditions { - fmt.Fprintf(out, " %v \t%v \n", - c.Type, - c.Status) - } - } - describeVolumes(pod.Spec.Volumes, out, "") - fmt.Fprintf(out, "QoS Tier:\t%s\n", qosutil.GetPodQos(pod)) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -func printControllers(annotation map[string]string) string { - value, ok := annotation["kubernetes.io/created-by"] - if ok { - var r api.SerializedReference - err := json.Unmarshal([]byte(value), &r) - if err == nil { - return fmt.Sprintf("%s/%s", r.Reference.Kind, r.Reference.Name) - } - } - return "<none>" -} - -// TODO: Do a better job at indenting, maybe by using a prefix writer -func describeVolumes(volumes []api.Volume, out io.Writer, space string) { - if volumes == nil || len(volumes) == 0 { - fmt.Fprintf(out, "%sNo volumes.\n", space) - return - } - fmt.Fprintf(out, "%sVolumes:\n", space) - for _, volume := range volumes { - nameIndent := "" - if len(space) > 0 { - nameIndent = " " - } - fmt.Fprintf(out, " %s%v:\n", nameIndent, volume.Name) - switch { - case volume.VolumeSource.HostPath != nil: - printHostPathVolumeSource(volume.VolumeSource.HostPath, out) - case volume.VolumeSource.EmptyDir != nil: - printEmptyDirVolumeSource(volume.VolumeSource.EmptyDir, out) - case volume.VolumeSource.GCEPersistentDisk != nil: - printGCEPersistentDiskVolumeSource(volume.VolumeSource.GCEPersistentDisk, out) - case volume.VolumeSource.AWSElasticBlockStore != nil: - printAWSElasticBlockStoreVolumeSource(volume.VolumeSource.AWSElasticBlockStore, out) - case volume.VolumeSource.GitRepo != nil: - printGitRepoVolumeSource(volume.VolumeSource.GitRepo, out) - case volume.VolumeSource.Secret != nil: - printSecretVolumeSource(volume.VolumeSource.Secret, out) - case volume.VolumeSource.ConfigMap != nil: - printConfigMapVolumeSource(volume.VolumeSource.ConfigMap, out) - case volume.VolumeSource.NFS != nil: - printNFSVolumeSource(volume.VolumeSource.NFS, out) - case volume.VolumeSource.ISCSI != nil: - printISCSIVolumeSource(volume.VolumeSource.ISCSI, out) - case volume.VolumeSource.Glusterfs != nil: - printGlusterfsVolumeSource(volume.VolumeSource.Glusterfs, out) - case volume.VolumeSource.PersistentVolumeClaim != nil: - printPersistentVolumeClaimVolumeSource(volume.VolumeSource.PersistentVolumeClaim, out) - case volume.VolumeSource.RBD != nil: - printRBDVolumeSource(volume.VolumeSource.RBD, out) - case volume.VolumeSource.DownwardAPI != nil: - printDownwardAPIVolumeSource(volume.VolumeSource.DownwardAPI, out) - default: - fmt.Fprintf(out, " <unknown>\n") - } - } -} - -func printHostPathVolumeSource(hostPath *api.HostPathVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tHostPath (bare host directory volume)\n"+ - " Path:\t%v\n", hostPath.Path) -} - -func printEmptyDirVolumeSource(emptyDir *api.EmptyDirVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tEmptyDir (a temporary directory that shares a pod's lifetime)\n"+ - " Medium:\t%v\n", emptyDir.Medium) -} - -func printGCEPersistentDiskVolumeSource(gce *api.GCEPersistentDiskVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tGCEPersistentDisk (a Persistent Disk resource in Google Compute Engine)\n"+ - " PDName:\t%v\n"+ - " FSType:\t%v\n"+ - " Partition:\t%v\n"+ - " ReadOnly:\t%v\n", - gce.PDName, gce.FSType, gce.Partition, gce.ReadOnly) -} - -func printAWSElasticBlockStoreVolumeSource(aws *api.AWSElasticBlockStoreVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tAWSElasticBlockStore (a Persistent Disk resource in AWS)\n"+ - " VolumeID:\t%v\n"+ - " FSType:\t%v\n"+ - " Partition:\t%v\n"+ - " ReadOnly:\t%v\n", - aws.VolumeID, aws.FSType, aws.Partition, aws.ReadOnly) -} - -func printGitRepoVolumeSource(git *api.GitRepoVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tGitRepo (a volume that is pulled from git when the pod is created)\n"+ - " Repository:\t%v\n"+ - " Revision:\t%v\n", - git.Repository, git.Revision) -} - -func printSecretVolumeSource(secret *api.SecretVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tSecret (a volume populated by a Secret)\n"+ - " SecretName:\t%v\n", secret.SecretName) -} - -func printConfigMapVolumeSource(configMap *api.ConfigMapVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tConfigMap (a volume populated by a ConfigMap)\n"+ - " Name:\t%v\n", configMap.Name) -} - -func printNFSVolumeSource(nfs *api.NFSVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tNFS (an NFS mount that lasts the lifetime of a pod)\n"+ - " Server:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - nfs.Server, nfs.Path, nfs.ReadOnly) -} - -func printISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tISCSI (an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod)\n"+ - " TargetPortal:\t%v\n"+ - " IQN:\t%v\n"+ - " Lun:\t%v\n"+ - " ISCSIInterface\t%v\n"+ - " FSType:\t%v\n"+ - " ReadOnly:\t%v\n", - iscsi.TargetPortal, iscsi.IQN, iscsi.Lun, iscsi.ISCSIInterface, iscsi.FSType, iscsi.ReadOnly) -} - -func printGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tGlusterfs (a Glusterfs mount on the host that shares a pod's lifetime)\n"+ - " EndpointsName:\t%v\n"+ - " Path:\t%v\n"+ - " ReadOnly:\t%v\n", - glusterfs.EndpointsName, glusterfs.Path, glusterfs.ReadOnly) -} - -func printPersistentVolumeClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tPersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)\n"+ - " ClaimName:\t%v\n"+ - " ReadOnly:\t%v\n", - claim.ClaimName, claim.ReadOnly) -} - -func printRBDVolumeSource(rbd *api.RBDVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tRBD (a Rados Block Device mount on the host that shares a pod's lifetime)\n"+ - " CephMonitors:\t%v\n"+ - " RBDImage:\t%v\n"+ - " FSType:\t%v\n"+ - " RBDPool:\t%v\n"+ - " RadosUser:\t%v\n"+ - " Keyring:\t%v\n"+ - " SecretRef:\t%v\n"+ - " ReadOnly:\t%v\n", - rbd.CephMonitors, rbd.RBDImage, rbd.FSType, rbd.RBDPool, rbd.RadosUser, rbd.Keyring, rbd.SecretRef, rbd.ReadOnly) -} - -func printDownwardAPIVolumeSource(d *api.DownwardAPIVolumeSource, out io.Writer) { - fmt.Fprintf(out, " Type:\tDownwardAPI (a volume populated by information about the pod)\n Items:\n") - for _, mapping := range d.Items { - if mapping.FieldRef != nil { - fmt.Fprintf(out, " %v -> %v\n", mapping.FieldRef.FieldPath, mapping.Path) - } - if mapping.ResourceFieldRef != nil { - fmt.Fprintf(out, " %v -> %v\n", mapping.ResourceFieldRef.Resource, mapping.Path) - } - } -} - -type PersistentVolumeDescriber struct { - client.Interface -} - -func (d *PersistentVolumeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.PersistentVolumes() - - pv, err := c.Get(name) - if err != nil { - return "", err - } - - storage := pv.Spec.Capacity[api.ResourceStorage] - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Events(namespace).Search(pv) - } - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pv.Name) - printLabelsMultiline(out, "Labels", pv.Labels) - fmt.Fprintf(out, "Status:\t%s\n", pv.Status.Phase) - if pv.Spec.ClaimRef != nil { - fmt.Fprintf(out, "Claim:\t%s\n", pv.Spec.ClaimRef.Namespace+"/"+pv.Spec.ClaimRef.Name) - } else { - fmt.Fprintf(out, "Claim:\t%s\n", "") - } - fmt.Fprintf(out, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) - fmt.Fprintf(out, "Access Modes:\t%s\n", api.GetAccessModesAsString(pv.Spec.AccessModes)) - fmt.Fprintf(out, "Capacity:\t%s\n", storage.String()) - fmt.Fprintf(out, "Message:\t%s\n", pv.Status.Message) - fmt.Fprintf(out, "Source:\n") - - switch { - case pv.Spec.HostPath != nil: - printHostPathVolumeSource(pv.Spec.HostPath, out) - case pv.Spec.GCEPersistentDisk != nil: - printGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, out) - case pv.Spec.AWSElasticBlockStore != nil: - printAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, out) - case pv.Spec.NFS != nil: - printNFSVolumeSource(pv.Spec.NFS, out) - case pv.Spec.ISCSI != nil: - printISCSIVolumeSource(pv.Spec.ISCSI, out) - case pv.Spec.Glusterfs != nil: - printGlusterfsVolumeSource(pv.Spec.Glusterfs, out) - case pv.Spec.RBD != nil: - printRBDVolumeSource(pv.Spec.RBD, out) - } - - if events != nil { - DescribeEvents(events, out) - } - - return nil - }) -} - -type PersistentVolumeClaimDescriber struct { - client.Interface -} - -func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.PersistentVolumeClaims(namespace) - - pvc, err := c.Get(name) - if err != nil { - return "", err - } - - storage := pvc.Spec.Resources.Requests[api.ResourceStorage] - capacity := "" - accessModes := "" - if pvc.Spec.VolumeName != "" { - accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[api.ResourceStorage] - capacity = storage.String() - } - - events, _ := d.Events(namespace).Search(pvc) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", pvc.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", pvc.Namespace) - fmt.Fprintf(out, "Status:\t%v\n", pvc.Status.Phase) - fmt.Fprintf(out, "Volume:\t%s\n", pvc.Spec.VolumeName) - printLabelsMultiline(out, "Labels", pvc.Labels) - fmt.Fprintf(out, "Capacity:\t%s\n", capacity) - fmt.Fprintf(out, "Access Modes:\t%s\n", accessModes) - if events != nil { - DescribeEvents(events, out) - } - - return nil - }) -} - -// TODO: Do a better job at indenting, maybe by using a prefix writer -func describeContainers(label string, containers []api.Container, containerStatuses []api.ContainerStatus, resolverFn EnvVarResolverFunc, out io.Writer, space string) { - statuses := map[string]api.ContainerStatus{} - for _, status := range containerStatuses { - statuses[status.Name] = status - } - if len(containers) == 0 { - fmt.Fprintf(out, "%s%s: <none>\n", space, label) - } else { - fmt.Fprintf(out, "%s%s:\n", space, label) - } - for _, container := range containers { - status, ok := statuses[container.Name] - nameIndent := "" - if len(space) > 0 { - nameIndent = " " - } - fmt.Fprintf(out, " %s%v:\n", nameIndent, container.Name) - if ok { - fmt.Fprintf(out, " Container ID:\t%s\n", status.ContainerID) - } - fmt.Fprintf(out, " Image:\t%s\n", container.Image) - if ok { - fmt.Fprintf(out, " Image ID:\t%s\n", status.ImageID) - } - portString := describeContainerPorts(container.Ports) - if strings.Contains(portString, ",") { - fmt.Fprintf(out, " Ports:\t%s\n", portString) - } else { - fmt.Fprintf(out, " Port:\t%s\n", portString) - } - - if len(container.Command) > 0 { - fmt.Fprintf(out, " Command:\n") - for _, c := range container.Command { - fmt.Fprintf(out, " %s\n", c) - } - } - if len(container.Args) > 0 { - fmt.Fprintf(out, " Args:\n") - for _, arg := range container.Args { - fmt.Fprintf(out, " %s\n", arg) - } - } - - resources := container.Resources - if len(resources.Limits) > 0 { - fmt.Fprintf(out, " Limits:\n") - } - for _, name := range SortedResourceNames(resources.Limits) { - quantity := resources.Limits[name] - fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) - } - - if len(resources.Requests) > 0 { - fmt.Fprintf(out, " Requests:\n") - } - for _, name := range SortedResourceNames(resources.Requests) { - quantity := resources.Requests[name] - fmt.Fprintf(out, " %s:\t%s\n", name, quantity.String()) - } - - if ok { - describeStatus("State", status.State, out) - if status.LastTerminationState.Terminated != nil { - describeStatus("Last State", status.LastTerminationState, out) - } - fmt.Fprintf(out, " Ready:\t%v\n", printBool(status.Ready)) - fmt.Fprintf(out, " Restart Count:\t%d\n", status.RestartCount) - } - - if container.LivenessProbe != nil { - probe := DescribeProbe(container.LivenessProbe) - fmt.Fprintf(out, " Liveness:\t%s\n", probe) - } - if container.ReadinessProbe != nil { - probe := DescribeProbe(container.ReadinessProbe) - fmt.Fprintf(out, " Readiness:\t%s\n", probe) - } - none := "" - if len(container.Env) == 0 { - none = "\t<none>" - } - fmt.Fprintf(out, " Environment Variables:%s\n", none) - for _, e := range container.Env { - if e.ValueFrom == nil { - fmt.Fprintf(out, " %s:\t%s\n", e.Name, e.Value) - continue - } - - switch { - case e.ValueFrom.FieldRef != nil: - var valueFrom string - if resolverFn != nil { - valueFrom = resolverFn(e) - } - fmt.Fprintf(out, " %s:\t%s (%s:%s)\n", e.Name, valueFrom, e.ValueFrom.FieldRef.APIVersion, e.ValueFrom.FieldRef.FieldPath) - case e.ValueFrom.ResourceFieldRef != nil: - valueFrom, err := fieldpath.ExtractContainerResourceValue(e.ValueFrom.ResourceFieldRef, &container) - if err != nil { - valueFrom = "" - } - fmt.Fprintf(out, " %s:\t%s (%s)\n", e.Name, valueFrom, e.ValueFrom.ResourceFieldRef.Resource) - case e.ValueFrom.SecretKeyRef != nil: - fmt.Fprintf(out, " %s:\t<set to the key '%s' in secret '%s'>\n", e.Name, e.ValueFrom.SecretKeyRef.Key, e.ValueFrom.SecretKeyRef.Name) - case e.ValueFrom.ConfigMapKeyRef != nil: - fmt.Fprintf(out, " %s:\t<set to the key '%s' of config map '%s'>\n", e.Name, e.ValueFrom.ConfigMapKeyRef.Key, e.ValueFrom.ConfigMapKeyRef.Name) - } - } - } -} - -func describeContainerPorts(cPorts []api.ContainerPort) string { - ports := make([]string, 0, len(cPorts)) - for _, cPort := range cPorts { - ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol)) - } - return strings.Join(ports, ", ") -} - -// DescribeProbe is exported for consumers in other API groups that have probes -func DescribeProbe(probe *api.Probe) string { - attrs := fmt.Sprintf("delay=%ds timeout=%ds period=%ds #success=%d #failure=%d", probe.InitialDelaySeconds, probe.TimeoutSeconds, probe.PeriodSeconds, probe.SuccessThreshold, probe.FailureThreshold) - switch { - case probe.Exec != nil: - return fmt.Sprintf("exec %v %s", probe.Exec.Command, attrs) - case probe.HTTPGet != nil: - url := &url.URL{} - url.Scheme = strings.ToLower(string(probe.HTTPGet.Scheme)) - if len(probe.HTTPGet.Port.String()) > 0 { - url.Host = net.JoinHostPort(probe.HTTPGet.Host, probe.HTTPGet.Port.String()) - } else { - url.Host = probe.HTTPGet.Host - } - url.Path = probe.HTTPGet.Path - return fmt.Sprintf("http-get %s %s", url.String(), attrs) - case probe.TCPSocket != nil: - return fmt.Sprintf("tcp-socket :%s %s", probe.TCPSocket.Port.String(), attrs) - } - return fmt.Sprintf("unknown %s", attrs) -} - -type EnvVarResolverFunc func(e api.EnvVar) string - -// EnvValueFrom is exported for use by describers in other packages -func EnvValueRetriever(pod *api.Pod) EnvVarResolverFunc { - return func(e api.EnvVar) string { - internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(e.ValueFrom.FieldRef.APIVersion, "Pod", e.ValueFrom.FieldRef.FieldPath, "") - if err != nil { - return "" // pod validation should catch this on create - } - - valueFrom, err := fieldpath.ExtractFieldPathAsString(pod, internalFieldPath) - if err != nil { - return "" // pod validation should catch this on create - } - - return valueFrom - } -} - -func describeStatus(stateName string, state api.ContainerState, out io.Writer) { - switch { - case state.Running != nil: - fmt.Fprintf(out, " %s:\tRunning\n", stateName) - fmt.Fprintf(out, " Started:\t%v\n", state.Running.StartedAt.Time.Format(time.RFC1123Z)) - case state.Waiting != nil: - fmt.Fprintf(out, " %s:\tWaiting\n", stateName) - if state.Waiting.Reason != "" { - fmt.Fprintf(out, " Reason:\t%s\n", state.Waiting.Reason) - } - case state.Terminated != nil: - fmt.Fprintf(out, " %s:\tTerminated\n", stateName) - if state.Terminated.Reason != "" { - fmt.Fprintf(out, " Reason:\t%s\n", state.Terminated.Reason) - } - if state.Terminated.Message != "" { - fmt.Fprintf(out, " Message:\t%s\n", state.Terminated.Message) - } - fmt.Fprintf(out, " Exit Code:\t%d\n", state.Terminated.ExitCode) - if state.Terminated.Signal > 0 { - fmt.Fprintf(out, " Signal:\t%d\n", state.Terminated.Signal) - } - fmt.Fprintf(out, " Started:\t%s\n", state.Terminated.StartedAt.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, " Finished:\t%s\n", state.Terminated.FinishedAt.Time.Format(time.RFC1123Z)) - default: - fmt.Fprintf(out, " %s:\tWaiting\n", stateName) - } -} - -func printBool(value bool) string { - if value { - return "True" - } - - return "False" -} - -// ReplicationControllerDescriber generates information about a replication controller -// and the pods it has created. -type ReplicationControllerDescriber struct { - client.Interface -} - -func (d *ReplicationControllerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - rc := d.ReplicationControllers(namespace) - pc := d.Pods(namespace) - - controller, err := rc.Get(name) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, labels.SelectorFromSet(controller.Spec.Selector)) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Events(namespace).Search(controller) - } - - return describeReplicationController(controller, events, running, waiting, succeeded, failed) -} - -func describeReplicationController(controller *api.ReplicationController, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", controller.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", controller.Namespace) - if controller.Spec.Template != nil { - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&controller.Spec.Template.Spec)) - } else { - fmt.Fprintf(out, "Image(s):\t%s\n", "<unset>") - } - fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(controller.Spec.Selector)) - printLabelsMultiline(out, "Labels", controller.Labels) - fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - if controller.Spec.Template != nil { - describeVolumes(controller.Spec.Template.Spec.Volumes, out, "") - } - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -func DescribePodTemplate(template *api.PodTemplateSpec, out io.Writer) { - if template == nil { - fmt.Fprintf(out, " <unset>") - return - } - printLabelsMultiline(out, " Labels", template.Labels) - if len(template.Annotations) > 0 { - printLabelsMultiline(out, " Annotations", template.Annotations) - } - if len(template.Spec.ServiceAccountName) > 0 { - fmt.Fprintf(out, " Service Account:\t%s\n", template.Spec.ServiceAccountName) - } - if len(template.Spec.InitContainers) > 0 { - describeContainers("Init Containers", template.Spec.InitContainers, nil, nil, out, " ") - } - describeContainers("Containers", template.Spec.Containers, nil, nil, out, " ") - describeVolumes(template.Spec.Volumes, out, " ") -} - -// ReplicaSetDescriber generates information about a ReplicaSet and the pods it has created. -type ReplicaSetDescriber struct { - client.Interface -} - -func (d *ReplicaSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - rsc := d.Extensions().ReplicaSets(namespace) - pc := d.Pods(namespace) - - rs, err := rsc.Get(name) - if err != nil { - return "", err - } - - selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Events(namespace).Search(rs) - } - - return describeReplicaSet(rs, events, running, waiting, succeeded, failed) -} - -func describeReplicaSet(rs *extensions.ReplicaSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", rs.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", rs.Namespace) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&rs.Spec.Template.Spec)) - fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(rs.Spec.Selector)) - printLabelsMultiline(out, "Labels", rs.Labels) - fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", rs.Status.Replicas, rs.Spec.Replicas) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - describeVolumes(rs.Spec.Template.Spec.Volumes, out, "") - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// JobDescriber generates information about a job and the pods it has created. -type JobDescriber struct { - client *client.Client -} - -func (d *JobDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - job, err := d.client.Extensions().Jobs(namespace).Get(name) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.client.Events(namespace).Search(job) - } - - return describeJob(job, events) -} - -func describeJob(job *batch.Job, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", job.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", job.Namespace) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&job.Spec.Template.Spec)) - selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - fmt.Fprintf(out, "Selector:\t%s\n", selector) - fmt.Fprintf(out, "Parallelism:\t%d\n", *job.Spec.Parallelism) - if job.Spec.Completions != nil { - fmt.Fprintf(out, "Completions:\t%d\n", *job.Spec.Completions) - } else { - fmt.Fprintf(out, "Completions:\t<unset>\n") - } - if job.Status.StartTime != nil { - fmt.Fprintf(out, "Start Time:\t%s\n", job.Status.StartTime.Time.Format(time.RFC1123Z)) - } - if job.Spec.ActiveDeadlineSeconds != nil { - fmt.Fprintf(out, "Active Deadline Seconds:\t%ds\n", *job.Spec.ActiveDeadlineSeconds) - } - printLabelsMultiline(out, "Labels", job.Labels) - fmt.Fprintf(out, "Pods Statuses:\t%d Running / %d Succeeded / %d Failed\n", job.Status.Active, job.Status.Succeeded, job.Status.Failed) - describeVolumes(job.Spec.Template.Spec.Volumes, out, "") - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// DaemonSetDescriber generates information about a daemon set and the pods it has created. -type DaemonSetDescriber struct { - client.Interface -} - -func (d *DaemonSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - dc := d.Extensions().DaemonSets(namespace) - pc := d.Pods(namespace) - - daemon, err := dc.Get(name) - if err != nil { - return "", err - } - - selector, err := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) - if err != nil { - return "", err - } - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Events(namespace).Search(daemon) - } - - return describeDaemonSet(daemon, events, running, waiting, succeeded, failed) -} - -func describeDaemonSet(daemon *extensions.DaemonSet, events *api.EventList, running, waiting, succeeded, failed int) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", daemon.Name) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&daemon.Spec.Template.Spec)) - selector, err := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - fmt.Fprintf(out, "Selector:\t%s\n", selector) - fmt.Fprintf(out, "Node-Selector:\t%s\n", labels.FormatLabels(daemon.Spec.Template.Spec.NodeSelector)) - printLabelsMultiline(out, "Labels", daemon.Labels) - fmt.Fprintf(out, "Desired Number of Nodes Scheduled: %d\n", daemon.Status.DesiredNumberScheduled) - fmt.Fprintf(out, "Current Number of Nodes Scheduled: %d\n", daemon.Status.CurrentNumberScheduled) - fmt.Fprintf(out, "Number of Nodes Misscheduled: %d\n", daemon.Status.NumberMisscheduled) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// SecretDescriber generates information about a secret -type SecretDescriber struct { - client.Interface -} - -func (d *SecretDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Secrets(namespace) - - secret, err := c.Get(name) - if err != nil { - return "", err - } - - return describeSecret(secret) -} - -func describeSecret(secret *api.Secret) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", secret.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", secret.Namespace) - printLabelsMultiline(out, "Labels", secret.Labels) - printLabelsMultiline(out, "Annotations", secret.Annotations) - - fmt.Fprintf(out, "\nType:\t%s\n", secret.Type) - - fmt.Fprintf(out, "\nData\n====\n") - for k, v := range secret.Data { - switch { - case k == api.ServiceAccountTokenKey && secret.Type == api.SecretTypeServiceAccountToken: - fmt.Fprintf(out, "%s:\t%s\n", k, string(v)) - default: - fmt.Fprintf(out, "%s:\t%d bytes\n", k, len(v)) - } - } - - return nil - }) -} - -type IngressDescriber struct { - client.Interface -} - -func (i *IngressDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := i.Extensions().Ingress(namespace) - ing, err := c.Get(name) - if err != nil { - return "", err - } - return i.describeIngress(ing, describerSettings) -} - -func (i *IngressDescriber) describeBackend(ns string, backend *extensions.IngressBackend) string { - endpoints, _ := i.Endpoints(ns).Get(backend.ServiceName) - service, _ := i.Services(ns).Get(backend.ServiceName) - spName := "" - for i := range service.Spec.Ports { - sp := &service.Spec.Ports[i] - switch backend.ServicePort.Type { - case intstr.String: - if backend.ServicePort.StrVal == sp.Name { - spName = sp.Name - } - case intstr.Int: - if int32(backend.ServicePort.IntVal) == sp.Port { - spName = sp.Name - } - } - } - return formatEndpoints(endpoints, sets.NewString(spName)) -} - -func (i *IngressDescriber) describeIngress(ing *extensions.Ingress, describerSettings DescriberSettings) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%v\n", ing.Name) - fmt.Fprintf(out, "Namespace:\t%v\n", ing.Namespace) - fmt.Fprintf(out, "Address:\t%v\n", loadBalancerStatusStringer(ing.Status.LoadBalancer, true)) - def := ing.Spec.Backend - ns := ing.Namespace - if def == nil { - // Ingresses that don't specify a default backend inherit the - // default backend in the kube-system namespace. - def = &extensions.IngressBackend{ - ServiceName: "default-http-backend", - ServicePort: intstr.IntOrString{Type: intstr.Int, IntVal: 80}, - } - ns = api.NamespaceSystem - } - fmt.Fprintf(out, "Default backend:\t%s (%s)\n", backendStringer(def), i.describeBackend(ns, def)) - if len(ing.Spec.TLS) != 0 { - describeIngressTLS(out, ing.Spec.TLS) - } - fmt.Fprint(out, "Rules:\n Host\tPath\tBackends\n") - fmt.Fprint(out, " ----\t----\t--------\n") - count := 0 - for _, rules := range ing.Spec.Rules { - if rules.HTTP == nil { - continue - } - count++ - host := rules.Host - if len(host) == 0 { - host = "*" - } - fmt.Fprintf(out, " %s\t\n", host) - for _, path := range rules.HTTP.Paths { - fmt.Fprintf(out, " \t%s \t%s (%s)\n", path.Path, backendStringer(&path.Backend), i.describeBackend(ns, &path.Backend)) - } - } - if count == 0 { - fmt.Fprintf(out, " %s\t%s \t%s (%s)\n", "*", "*", backendStringer(def), i.describeBackend(ns, def)) - } - describeIngressAnnotations(out, ing.Annotations) - - if describerSettings.ShowEvents { - events, _ := i.Events(ing.Namespace).Search(ing) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -func describeIngressTLS(out io.Writer, ingTLS []extensions.IngressTLS) { - fmt.Fprintf(out, "TLS:\n") - for _, t := range ingTLS { - if t.SecretName == "" { - fmt.Fprintf(out, " SNI routes %v\n", strings.Join(t.Hosts, ",")) - } else { - fmt.Fprintf(out, " %v terminates %v\n", t.SecretName, strings.Join(t.Hosts, ",")) - } - } - return -} - -// TODO: Move from annotations into Ingress status. -func describeIngressAnnotations(out io.Writer, annotations map[string]string) { - fmt.Fprintf(out, "Annotations:\n") - for k, v := range annotations { - if !strings.HasPrefix(k, "ingress") { - continue - } - parts := strings.Split(k, "/") - name := parts[len(parts)-1] - fmt.Fprintf(out, " %v:\t%s\n", name, v) - } - return -} - -// ServiceDescriber generates information about a service. -type ServiceDescriber struct { - client.Interface -} - -func (d *ServiceDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Services(namespace) - - service, err := c.Get(name) - if err != nil { - return "", err - } - - endpoints, _ := d.Endpoints(namespace).Get(name) - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Events(namespace).Search(service) - } - return describeService(service, endpoints, events) -} - -func buildIngressString(ingress []api.LoadBalancerIngress) string { - var buffer bytes.Buffer - - for i := range ingress { - if i != 0 { - buffer.WriteString(", ") - } - if ingress[i].IP != "" { - buffer.WriteString(ingress[i].IP) - } else { - buffer.WriteString(ingress[i].Hostname) - } - } - return buffer.String() -} - -func describeService(service *api.Service, endpoints *api.Endpoints, events *api.EventList) (string, error) { - if endpoints == nil { - endpoints = &api.Endpoints{} - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", service.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", service.Namespace) - printLabelsMultiline(out, "Labels", service.Labels) - fmt.Fprintf(out, "Selector:\t%s\n", labels.FormatLabels(service.Spec.Selector)) - fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type) - fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP) - if len(service.Status.LoadBalancer.Ingress) > 0 { - list := buildIngressString(service.Status.LoadBalancer.Ingress) - fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list) - } - for i := range service.Spec.Ports { - sp := &service.Spec.Ports[i] - - name := sp.Name - if name == "" { - name = "<unset>" - } - fmt.Fprintf(out, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) - if sp.NodePort != 0 { - fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) - } - fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) - } - fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// EndpointsDescriber generates information about an Endpoint. -type EndpointsDescriber struct { - client.Interface -} - -func (d *EndpointsDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Endpoints(namespace) - - ep, err := c.Get(name) - if err != nil { - return "", err - } - - var events *api.EventList - if describerSettings.ShowEvents { - events, _ = d.Events(namespace).Search(ep) - } - - return describeEndpoints(ep, events) -} - -func describeEndpoints(ep *api.Endpoints, events *api.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", ep.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", ep.Namespace) - printLabelsMultiline(out, "Labels", ep.Labels) - - fmt.Fprintf(out, "Subsets:\n") - for i := range ep.Subsets { - subset := &ep.Subsets[i] - - addresses := make([]string, 0, len(subset.Addresses)) - for _, addr := range subset.Addresses { - addresses = append(addresses, addr.IP) - } - addressesString := strings.Join(addresses, ",") - if len(addressesString) == 0 { - addressesString = "<none>" - } - fmt.Fprintf(out, " Addresses:\t%s\n", addressesString) - - notReadyAddresses := make([]string, 0, len(subset.NotReadyAddresses)) - for _, addr := range subset.NotReadyAddresses { - notReadyAddresses = append(notReadyAddresses, addr.IP) - } - notReadyAddressesString := strings.Join(notReadyAddresses, ",") - if len(notReadyAddressesString) == 0 { - notReadyAddressesString = "<none>" - } - fmt.Fprintf(out, " NotReadyAddresses:\t%s\n", notReadyAddressesString) - - if len(subset.Ports) > 0 { - fmt.Fprintf(out, " Ports:\n") - fmt.Fprintf(out, " Name\tPort\tProtocol\n") - fmt.Fprintf(out, " ----\t----\t--------\n") - for _, port := range subset.Ports { - name := port.Name - if len(name) == 0 { - name = "<unset>" - } - fmt.Fprintf(out, " %s\t%d\t%s\n", name, port.Port, port.Protocol) - } - } - fmt.Fprintf(out, "\n") - } - - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -// ServiceAccountDescriber generates information about a service. -type ServiceAccountDescriber struct { - client.Interface -} - -func (d *ServiceAccountDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.ServiceAccounts(namespace) - - serviceAccount, err := c.Get(name) - if err != nil { - return "", err - } - - tokens := []api.Secret{} - - tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) - options := api.ListOptions{FieldSelector: tokenSelector} - secrets, err := d.Secrets(namespace).List(options) - if err == nil { - for _, s := range secrets.Items { - name, _ := s.Annotations[api.ServiceAccountNameKey] - uid, _ := s.Annotations[api.ServiceAccountUIDKey] - if name == serviceAccount.Name && uid == string(serviceAccount.UID) { - tokens = append(tokens, s) - } - } - } - - return describeServiceAccount(serviceAccount, tokens) -} - -func describeServiceAccount(serviceAccount *api.ServiceAccount, tokens []api.Secret) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", serviceAccount.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", serviceAccount.Namespace) - printLabelsMultiline(out, "Labels", serviceAccount.Labels) - fmt.Fprintln(out) - - var ( - emptyHeader = " " - pullHeader = "Image pull secrets:" - mountHeader = "Mountable secrets: " - tokenHeader = "Tokens: " - - pullSecretNames = []string{} - mountSecretNames = []string{} - tokenSecretNames = []string{} - ) - - for _, s := range serviceAccount.ImagePullSecrets { - pullSecretNames = append(pullSecretNames, s.Name) - } - for _, s := range serviceAccount.Secrets { - mountSecretNames = append(mountSecretNames, s.Name) - } - for _, s := range tokens { - tokenSecretNames = append(tokenSecretNames, s.Name) - } - - types := map[string][]string{ - pullHeader: pullSecretNames, - mountHeader: mountSecretNames, - tokenHeader: tokenSecretNames, - } - for header, names := range types { - if len(names) == 0 { - fmt.Fprintf(out, "%s\t<none>\n", header) - } else { - prefix := header - for _, name := range names { - fmt.Fprintf(out, "%s\t%s\n", prefix, name) - prefix = emptyHeader - } - } - fmt.Fprintln(out) - } - - return nil - }) -} - -// NodeDescriber generates information about a node. -type NodeDescriber struct { - client.Interface -} - -func (d *NodeDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - mc := d.Nodes() - node, err := mc.Get(name) - if err != nil { - return "", err - } - - fieldSelector, err := fields.ParseSelector("spec.nodeName=" + name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) - if err != nil { - return "", err - } - // in a policy aware setting, users may have access to a node, but not all pods - // in that case, we note that the user does not have access to the pods - canViewPods := true - nodeNonTerminatedPodsList, err := d.Pods(namespace).List(api.ListOptions{FieldSelector: fieldSelector}) - if err != nil { - if !errors.IsForbidden(err) { - return "", err - } - canViewPods = false - } - - var events *api.EventList - if describerSettings.ShowEvents { - if ref, err := api.GetReference(node); err != nil { - glog.Errorf("Unable to construct reference to '%#v': %v", node, err) - } else { - // TODO: We haven't decided the namespace for Node object yet. - ref.UID = types.UID(ref.Name) - events, _ = d.Events("").Search(ref) - } - } - - return describeNode(node, nodeNonTerminatedPodsList, events, canViewPods) -} - -func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", node.Name) - printLabelsMultiline(out, "Labels", node.Labels) - printTaintsInAnnotationMultiline(out, "Taints", node.Annotations) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Phase:\t%v\n", node.Status.Phase) - if len(node.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastHeartbeatTime\tLastTransitionTime\tReason\tMessage\n") - fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range node.Status.Conditions { - fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastHeartbeatTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - addresses := make([]string, 0, len(node.Status.Addresses)) - for _, address := range node.Status.Addresses { - addresses = append(addresses, address.Address) - } - - printResourceList := func(resourceList api.ResourceList) { - resources := make([]api.ResourceName, 0, len(resourceList)) - for resource := range resourceList { - resources = append(resources, resource) - } - sort.Sort(SortableResourceNames(resources)) - for _, resource := range resources { - value := resourceList[resource] - fmt.Fprintf(out, " %s:\t%s\n", resource, value.String()) - } - } - - fmt.Fprintf(out, "Addresses:\t%s\n", strings.Join(addresses, ",")) - if len(node.Status.Capacity) > 0 { - fmt.Fprintf(out, "Capacity:\n") - printResourceList(node.Status.Capacity) - } - if len(node.Status.Allocatable) > 0 { - fmt.Fprintf(out, "Allocatable:\n") - printResourceList(node.Status.Allocatable) - } - - fmt.Fprintf(out, "System Info:\n") - fmt.Fprintf(out, " Machine ID:\t%s\n", node.Status.NodeInfo.MachineID) - fmt.Fprintf(out, " System UUID:\t%s\n", node.Status.NodeInfo.SystemUUID) - fmt.Fprintf(out, " Boot ID:\t%s\n", node.Status.NodeInfo.BootID) - fmt.Fprintf(out, " Kernel Version:\t%s\n", node.Status.NodeInfo.KernelVersion) - fmt.Fprintf(out, " OS Image:\t%s\n", node.Status.NodeInfo.OSImage) - fmt.Fprintf(out, " Operating System:\t%s\n", node.Status.NodeInfo.OperatingSystem) - fmt.Fprintf(out, " Architecture:\t%s\n", node.Status.NodeInfo.Architecture) - fmt.Fprintf(out, " Container Runtime Version:\t%s\n", node.Status.NodeInfo.ContainerRuntimeVersion) - fmt.Fprintf(out, " Kubelet Version:\t%s\n", node.Status.NodeInfo.KubeletVersion) - fmt.Fprintf(out, " Kube-Proxy Version:\t%s\n", node.Status.NodeInfo.KubeProxyVersion) - - if len(node.Spec.PodCIDR) > 0 { - fmt.Fprintf(out, "PodCIDR:\t%s\n", node.Spec.PodCIDR) - } - if len(node.Spec.ExternalID) > 0 { - fmt.Fprintf(out, "ExternalID:\t%s\n", node.Spec.ExternalID) - } - if canViewPods && nodeNonTerminatedPodsList != nil { - if err := describeNodeResource(nodeNonTerminatedPodsList, node, out); err != nil { - return err - } - } else { - fmt.Fprintf(out, "Pods:\tnot authorized\n") - } - if events != nil { - DescribeEvents(events, out) - } - return nil - }) -} - -type PetSetDescriber struct { - client *client.Client -} - -func (p *PetSetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - ps, err := p.client.Apps().PetSets(namespace).Get(name) - if err != nil { - return "", err - } - pc := p.client.Pods(namespace) - - selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) - if err != nil { - return "", err - } - - running, waiting, succeeded, failed, err := getPodStatusForController(pc, selector) - if err != nil { - return "", err - } - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", ps.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", ps.Namespace) - fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&ps.Spec.Template.Spec)) - fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(ps.Spec.Selector)) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(ps.Labels)) - fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", ps.Status.Replicas, ps.Spec.Replicas) - fmt.Fprintf(out, "Annotations:\t%s\n", labels.FormatLabels(ps.Annotations)) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", ps.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) - describeVolumes(ps.Spec.Template.Spec.Volumes, out, "") - if describerSettings.ShowEvents { - events, _ := p.client.Events(namespace).Search(ps) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -// HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. -type HorizontalPodAutoscalerDescriber struct { - client *client.Client -} - -func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - hpa, err := d.client.Autoscaling().HorizontalPodAutoscalers(namespace).Get(name) - if err != nil { - return "", err - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", hpa.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", hpa.Namespace) - printLabelsMultiline(out, "Labels", hpa.Labels) - printLabelsMultiline(out, "Annotations", hpa.Annotations) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", hpa.CreationTimestamp.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, "Reference:\t%s/%s\n", - hpa.Spec.ScaleTargetRef.Kind, - hpa.Spec.ScaleTargetRef.Name) - if hpa.Spec.TargetCPUUtilizationPercentage != nil { - fmt.Fprintf(out, "Target CPU utilization:\t%d%%\n", *hpa.Spec.TargetCPUUtilizationPercentage) - fmt.Fprintf(out, "Current CPU utilization:\t") - if hpa.Status.CurrentCPUUtilizationPercentage != nil { - fmt.Fprintf(out, "%d%%\n", *hpa.Status.CurrentCPUUtilizationPercentage) - } else { - fmt.Fprintf(out, "<unset>\n") - } - } - minReplicas := "<unset>" - if hpa.Spec.MinReplicas != nil { - minReplicas = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) - } - fmt.Fprintf(out, "Min replicas:\t%s\n", minReplicas) - fmt.Fprintf(out, "Max replicas:\t%d\n", hpa.Spec.MaxReplicas) - - // TODO: switch to scale subresource once the required code is submitted. - if strings.ToLower(hpa.Spec.ScaleTargetRef.Kind) == "replicationcontroller" { - fmt.Fprintf(out, "ReplicationController pods:\t") - rc, err := d.client.ReplicationControllers(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Name) - if err == nil { - fmt.Fprintf(out, "%d current / %d desired\n", rc.Status.Replicas, rc.Spec.Replicas) - } else { - fmt.Fprintf(out, "failed to check Replication Controller\n") - } - } - - if describerSettings.ShowEvents { - events, _ := d.client.Events(namespace).Search(hpa) - if events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, out io.Writer) error { - fmt.Fprintf(out, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) - fmt.Fprint(out, " Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - fmt.Fprint(out, " ---------\t----\t\t------------\t----------\t---------------\t-------------\n") - allocatable := node.Status.Capacity - if len(node.Status.Allocatable) > 0 { - allocatable = node.Status.Allocatable - } - - for _, pod := range nodeNonTerminatedPodsList.Items { - req, limit, err := api.PodRequestsAndLimits(&pod) - if err != nil { - return err - } - cpuReq, cpuLimit, memoryReq, memoryLimit := req[api.ResourceCPU], limit[api.ResourceCPU], req[api.ResourceMemory], limit[api.ResourceMemory] - fractionCpuReq := float64(cpuReq.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 - fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 - fmt.Fprintf(out, " %s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, - cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), - memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) - } - - fmt.Fprint(out, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted. More info: http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md)\n CPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - fmt.Fprint(out, " ------------\t----------\t---------------\t-------------\n") - reqs, limits, err := getPodsTotalRequestsAndLimits(nodeNonTerminatedPodsList) - if err != nil { - return err - } - cpuReqs, cpuLimits, memoryReqs, memoryLimits := reqs[api.ResourceCPU], limits[api.ResourceCPU], reqs[api.ResourceMemory], limits[api.ResourceMemory] - fractionCpuReqs := float64(cpuReqs.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionCpuLimits := float64(cpuLimits.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 - fractionMemoryReqs := float64(memoryReqs.Value()) / float64(allocatable.Memory().Value()) * 100 - fractionMemoryLimits := float64(memoryLimits.Value()) / float64(allocatable.Memory().Value()) * 100 - fmt.Fprintf(out, " %s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", - cpuReqs.String(), int64(fractionCpuReqs), cpuLimits.String(), int64(fractionCpuLimits), - memoryReqs.String(), int64(fractionMemoryReqs), memoryLimits.String(), int64(fractionMemoryLimits)) - return nil -} - -func filterTerminatedPods(pods []*api.Pod) []*api.Pod { - if len(pods) == 0 { - return pods - } - result := []*api.Pod{} - for _, pod := range pods { - if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { - continue - } - result = append(result, pod) - } - return result -} - -func getPodsTotalRequestsAndLimits(podList *api.PodList) (reqs map[api.ResourceName]resource.Quantity, limits map[api.ResourceName]resource.Quantity, err error) { - reqs, limits = map[api.ResourceName]resource.Quantity{}, map[api.ResourceName]resource.Quantity{} - for _, pod := range podList.Items { - podReqs, podLimits, err := api.PodRequestsAndLimits(&pod) - if err != nil { - return nil, nil, err - } - for podReqName, podReqValue := range podReqs { - if value, ok := reqs[podReqName]; !ok { - reqs[podReqName] = *podReqValue.Copy() - } else { - value.Add(podReqValue) - reqs[podReqName] = value - } - } - for podLimitName, podLimitValue := range podLimits { - if value, ok := limits[podLimitName]; !ok { - limits[podLimitName] = *podLimitValue.Copy() - } else { - value.Add(podLimitValue) - limits[podLimitName] = value - } - } - } - return -} - -func DescribeEvents(el *api.EventList, w io.Writer) { - if len(el.Items) == 0 { - fmt.Fprint(w, "No events.") - return - } - sort.Sort(SortableEvents(el.Items)) - fmt.Fprint(w, "Events:\n FirstSeen\tLastSeen\tCount\tFrom\tSubobjectPath\tType\tReason\tMessage\n") - fmt.Fprint(w, " ---------\t--------\t-----\t----\t-------------\t--------\t------\t-------\n") - for _, e := range el.Items { - fmt.Fprintf(w, " %s\t%s\t%d\t%v\t%v\t%v\t%v\t%v\n", - translateTimestamp(e.FirstTimestamp), - translateTimestamp(e.LastTimestamp), - e.Count, - e.Source, - e.InvolvedObject.FieldPath, - e.Type, - e.Reason, - e.Message) - } -} - -// DeploymentDescriber generates information about a deployment. -type DeploymentDescriber struct { - clientset.Interface -} - -func (dd *DeploymentDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - d, err := dd.Extensions().Deployments(namespace).Get(name) - if err != nil { - return "", err - } - selector, err := unversioned.LabelSelectorAsSelector(d.Spec.Selector) - if err != nil { - return "", err - } - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", d.ObjectMeta.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", d.ObjectMeta.Namespace) - fmt.Fprintf(out, "CreationTimestamp:\t%s\n", d.CreationTimestamp.Time.Format(time.RFC1123Z)) - printLabelsMultiline(out, "Labels", d.Labels) - fmt.Fprintf(out, "Selector:\t%s\n", selector) - fmt.Fprintf(out, "Replicas:\t%d updated | %d total | %d available | %d unavailable\n", d.Status.UpdatedReplicas, d.Spec.Replicas, d.Status.AvailableReplicas, d.Status.UnavailableReplicas) - fmt.Fprintf(out, "StrategyType:\t%s\n", d.Spec.Strategy.Type) - fmt.Fprintf(out, "MinReadySeconds:\t%d\n", d.Spec.MinReadySeconds) - if d.Spec.Strategy.RollingUpdate != nil { - ru := d.Spec.Strategy.RollingUpdate - fmt.Fprintf(out, "RollingUpdateStrategy:\t%s max unavailable, %s max surge\n", ru.MaxUnavailable.String(), ru.MaxSurge.String()) - } - oldRSs, _, newRS, err := deploymentutil.GetAllReplicaSets(d, dd) - if err == nil { - fmt.Fprintf(out, "OldReplicaSets:\t%s\n", printReplicaSetsByLabels(oldRSs)) - var newRSs []*extensions.ReplicaSet - if newRS != nil { - newRSs = append(newRSs, newRS) - } - fmt.Fprintf(out, "NewReplicaSet:\t%s\n", printReplicaSetsByLabels(newRSs)) - } - if describerSettings.ShowEvents { - events, err := dd.Core().Events(namespace).Search(d) - if err == nil && events != nil { - DescribeEvents(events, out) - } - } - return nil - }) -} - -// Get all daemon set whose selectors would match a given set of labels. -// TODO: Move this to pkg/client and ideally implement it server-side (instead -// of getting all DS's and searching through them manually). -// TODO: write an interface for controllers and fuse getReplicationControllersForLabels -// and getDaemonSetsForLabels. -func getDaemonSetsForLabels(c client.DaemonSetInterface, labelsToMatch labels.Labels) ([]extensions.DaemonSet, error) { - // Get all daemon sets - // TODO: this needs a namespace scope as argument - dss, err := c.List(api.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting daemon set: %v", err) - } - - // Find the ones that match labelsToMatch. - var matchingDaemonSets []extensions.DaemonSet - for _, ds := range dss.Items { - selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - // this should never happen if the DaemonSet passed validation - return nil, err - } - if selector.Matches(labelsToMatch) { - matchingDaemonSets = append(matchingDaemonSets, ds) - } - } - return matchingDaemonSets, nil -} - -func printReplicationControllersByLabels(matchingRCs []*api.ReplicationController) string { - // Format the matching RC's into strings. - rcStrings := make([]string, 0, len(matchingRCs)) - for _, controller := range matchingRCs { - rcStrings = append(rcStrings, fmt.Sprintf("%s (%d/%d replicas created)", controller.Name, controller.Status.Replicas, controller.Spec.Replicas)) - } - - list := strings.Join(rcStrings, ", ") - if list == "" { - return "<none>" - } - return list -} - -func printReplicaSetsByLabels(matchingRSs []*extensions.ReplicaSet) string { - // Format the matching ReplicaSets into strings. - rsStrings := make([]string, 0, len(matchingRSs)) - for _, rs := range matchingRSs { - rsStrings = append(rsStrings, fmt.Sprintf("%s (%d/%d replicas created)", rs.Name, rs.Status.Replicas, rs.Spec.Replicas)) - } - - list := strings.Join(rsStrings, ", ") - if list == "" { - return "<none>" - } - return list -} - -func getPodStatusForController(c client.PodInterface, selector labels.Selector) (running, waiting, succeeded, failed int, err error) { - options := api.ListOptions{LabelSelector: selector} - rcPods, err := c.List(options) - if err != nil { - return - } - for _, pod := range rcPods.Items { - switch pod.Status.Phase { - case api.PodRunning: - running++ - case api.PodPending: - waiting++ - case api.PodSucceeded: - succeeded++ - case api.PodFailed: - failed++ - } - } - return -} - -// ConfigMapDescriber generates information about a ConfigMap -type ConfigMapDescriber struct { - client.Interface -} - -func (d *ConfigMapDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.ConfigMaps(namespace) - - configMap, err := c.Get(name) - if err != nil { - return "", err - } - - return describeConfigMap(configMap) -} - -func describeConfigMap(configMap *api.ConfigMap) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", configMap.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", configMap.Namespace) - printLabelsMultiline(out, "Labels", configMap.Labels) - printLabelsMultiline(out, "Annotations", configMap.Annotations) - - fmt.Fprintf(out, "\nData\n====\n") - for k, v := range configMap.Data { - fmt.Fprintf(out, "%s:\t%d bytes\n", k, len(v)) - } - - return nil - }) -} - -type ClusterDescriber struct { - fed_clientset.Interface -} - -func (d *ClusterDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - cluster, err := d.Federation().Clusters().Get(name) - if err != nil { - return "", err - } - return describeCluster(cluster) -} - -func describeCluster(cluster *federation.Cluster) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", cluster.Name) - fmt.Fprintf(out, "Labels:\t%s\n", labels.FormatLabels(cluster.Labels)) - - fmt.Fprintf(out, "ServerAddressByClientCIDRs:\n ClientCIDR\tServerAddress\n") - fmt.Fprintf(out, " ----\t----\n") - for _, cidrAddr := range cluster.Spec.ServerAddressByClientCIDRs { - fmt.Fprintf(out, " %v \t%v\n\n", cidrAddr.ClientCIDR, cidrAddr.ServerAddress) - } - - if len(cluster.Status.Conditions) > 0 { - fmt.Fprint(out, "Conditions:\n Type\tStatus\tLastUpdateTime\tLastTransitionTime\tReason\tMessage\n") - fmt.Fprint(out, " ----\t------\t-----------------\t------------------\t------\t-------\n") - for _, c := range cluster.Status.Conditions { - fmt.Fprintf(out, " %v \t%v \t%s \t%s \t%v \t%v\n", - c.Type, - c.Status, - c.LastProbeTime.Time.Format(time.RFC1123Z), - c.LastTransitionTime.Time.Format(time.RFC1123Z), - c.Reason, - c.Message) - } - } - return nil - }) -} - -// NetworkPolicyDescriber generates information about a NetworkPolicy -type NetworkPolicyDescriber struct { - client.Interface -} - -func (d *NetworkPolicyDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - c := d.Extensions().NetworkPolicies(namespace) - - networkPolicy, err := c.Get(name) - if err != nil { - return "", err - } - - return describeNetworkPolicy(networkPolicy) -} - -func describeNetworkPolicy(networkPolicy *extensions.NetworkPolicy) (string, error) { - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "Name:\t%s\n", networkPolicy.Name) - fmt.Fprintf(out, "Namespace:\t%s\n", networkPolicy.Namespace) - printLabelsMultiline(out, "Labels", networkPolicy.Labels) - printLabelsMultiline(out, "Annotations", networkPolicy.Annotations) - - return nil - }) -} - -// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types. -func newErrNoDescriber(types ...reflect.Type) error { - names := make([]string, 0, len(types)) - for _, t := range types { - names = append(names, t.String()) - } - return ErrNoDescriber{Types: names} -} - -// Describers implements ObjectDescriber against functions registered via Add. Those functions can -// be strongly typed. Types are exactly matched (no conversion or assignable checks). -type Describers struct { - searchFns map[reflect.Type][]typeFunc -} - -// DescribeObject implements ObjectDescriber and will attempt to print the provided object to a string, -// if at least one describer function has been registered with the exact types passed, or if any -// describer can print the exact object in its first argument (the remainder will be provided empty -// values). If no function registered with Add can satisfy the passed objects, an ErrNoDescriber will -// be returned -// TODO: reorder and partial match extra. -func (d *Describers) DescribeObject(exact interface{}, extra ...interface{}) (string, error) { - exactType := reflect.TypeOf(exact) - fns, ok := d.searchFns[exactType] - if !ok { - return "", newErrNoDescriber(exactType) - } - if len(extra) == 0 { - for _, typeFn := range fns { - if len(typeFn.Extra) == 0 { - return typeFn.Describe(exact, extra...) - } - } - typeFn := fns[0] - for _, t := range typeFn.Extra { - v := reflect.New(t).Elem() - extra = append(extra, v.Interface()) - } - return fns[0].Describe(exact, extra...) - } - - types := make([]reflect.Type, 0, len(extra)) - for _, obj := range extra { - types = append(types, reflect.TypeOf(obj)) - } - for _, typeFn := range fns { - if typeFn.Matches(types) { - return typeFn.Describe(exact, extra...) - } - } - return "", newErrNoDescriber(append([]reflect.Type{exactType}, types...)...) -} - -// Add adds one or more describer functions to the Describer. The passed function must -// match the signature: -// -// func(...) (string, error) -// -// Any number of arguments may be provided. -func (d *Describers) Add(fns ...interface{}) error { - for _, fn := range fns { - fv := reflect.ValueOf(fn) - ft := fv.Type() - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - numIn := ft.NumIn() - if numIn == 0 { - return fmt.Errorf("expected at least one 'in' params, got: %v", ft) - } - if ft.NumOut() != 2 { - return fmt.Errorf("expected two 'out' params - (string, error), got: %v", ft) - } - types := make([]reflect.Type, 0, numIn) - for i := 0; i < numIn; i++ { - types = append(types, ft.In(i)) - } - if ft.Out(0) != reflect.TypeOf(string("")) { - return fmt.Errorf("expected string return, got: %v", ft) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil. - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(1) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - - exact := types[0] - extra := types[1:] - if d.searchFns == nil { - d.searchFns = make(map[reflect.Type][]typeFunc) - } - fns := d.searchFns[exact] - fn := typeFunc{Extra: extra, Fn: fv} - fns = append(fns, fn) - d.searchFns[exact] = fns - } - return nil -} - -// typeFunc holds information about a describer function and the types it accepts -type typeFunc struct { - Extra []reflect.Type - Fn reflect.Value -} - -// Matches returns true when the passed types exactly match the Extra list. -func (fn typeFunc) Matches(types []reflect.Type) bool { - if len(fn.Extra) != len(types) { - return false - } - // reorder the items in array types and fn.Extra - // convert the type into string and sort them, check if they are matched - varMap := make(map[reflect.Type]bool) - for i := range fn.Extra { - varMap[fn.Extra[i]] = true - } - for i := range types { - if _, found := varMap[types[i]]; !found { - return false - } - } - return true -} - -// Describe invokes the nested function with the exact number of arguments. -func (fn typeFunc) Describe(exact interface{}, extra ...interface{}) (string, error) { - values := []reflect.Value{reflect.ValueOf(exact)} - for _, obj := range extra { - values = append(values, reflect.ValueOf(obj)) - } - out := fn.Fn.Call(values) - s := out[0].Interface().(string) - var err error - if !out[1].IsNil() { - err = out[1].Interface().(error) - } - return s, err -} - -// printLabelsMultiline prints multiple labels with a proper alignment. -func printLabelsMultiline(out io.Writer, title string, labels map[string]string) { - printLabelsMultilineWithIndent(out, "", title, "\t", labels) -} - -// printLabelsMultiline prints multiple labels with a user-defined alignment. -func printLabelsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, labels map[string]string) { - - fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) - - if labels == nil || len(labels) == 0 { - fmt.Fprintln(out, "<none>") - return - } - - // to print labels in the sorted order - keys := make([]string, 0, len(labels)) - for key := range labels { - keys = append(keys, key) - } - sort.Strings(keys) - - for i, key := range keys { - if i != 0 { - fmt.Fprint(out, initialIndent) - fmt.Fprint(out, innerIndent) - } - fmt.Fprintf(out, "%s=%s\n", key, labels[key]) - i++ - } -} - -// printTaintsMultiline prints multiple taints with a proper alignment. -func printTaintsInAnnotationMultiline(out io.Writer, title string, annotations map[string]string) { - taints, err := api.GetTaintsFromNodeAnnotations(annotations) - if err != nil { - taints = []api.Taint{} - } - printTaintsMultilineWithIndent(out, "", title, "\t", taints) -} - -// printTaintsMultilineWithIndent prints multiple taints with a user-defined alignment. -func printTaintsMultilineWithIndent(out io.Writer, initialIndent, title, innerIndent string, taints []api.Taint) { - fmt.Fprintf(out, "%s%s:%s", initialIndent, title, innerIndent) - - if taints == nil || len(taints) == 0 { - fmt.Fprintln(out, "<none>") - return - } - - // to print taints in the sorted order - keys := make([]string, 0, len(taints)) - for _, taint := range taints { - keys = append(keys, taint.Key) - } - sort.Strings(keys) - - for i, key := range keys { - for _, taint := range taints { - if taint.Key == key { - if i != 0 { - fmt.Fprint(out, initialIndent) - fmt.Fprint(out, innerIndent) - } - fmt.Fprintf(out, "%s=%s:%s\n", taint.Key, taint.Value, taint.Effect) - i++ - } - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go deleted file mode 100644 index cc34fba7d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package kubectl is a set of libraries that are used by the kubectl command line tool. -// They are separated out into a library to support unit testing. Most functionality should -// be included in this package, and the main kubectl should really just be an entry point. -package kubectl diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go b/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go deleted file mode 100644 index d8b9a1472..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/explain.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "strings" - - "github.com/emicklei/go-restful/swagger" - - "k8s.io/kubernetes/pkg/api/meta" - apiutil "k8s.io/kubernetes/pkg/api/util" -) - -var allModels = make(map[string]*swagger.NamedModel) -var recursive = false // this is global for convenience, can become int for multiple levels - -// SplitAndParseResourceRequest separates the users input into a model and fields -func SplitAndParseResourceRequest(inResource string, mapper meta.RESTMapper) (string, []string, error) { - inResource, fieldsPath := splitDotNotation(inResource) - inResource, _ = mapper.ResourceSingularizer(inResource) - return inResource, fieldsPath, nil -} - -// PrintModelDescription prints the description of a specific model or dot path -func PrintModelDescription(inModel string, fieldsPath []string, w io.Writer, swaggerSchema *swagger.ApiDeclaration, r bool) error { - recursive = r // this is global for convenience - apiVer := apiutil.GetVersion(swaggerSchema.ApiVersion) + "." - - var pointedModel *swagger.NamedModel - for i := range swaggerSchema.Models.List { - name := swaggerSchema.Models.List[i].Name - - allModels[name] = &swaggerSchema.Models.List[i] - if strings.ToLower(name) == strings.ToLower(apiVer+inModel) { - pointedModel = &swaggerSchema.Models.List[i] - } - } - if pointedModel == nil { - return fmt.Errorf("requested resource %q is not defined", inModel) - } - - if len(fieldsPath) == 0 { - return printTopLevelResourceInfo(w, pointedModel) - } - - var pointedModelAsProp *swagger.NamedModelProperty - for _, field := range fieldsPath { - if prop, nextModel, isModel := getField(pointedModel, field); prop != nil { - if isModel { - pointedModelAsProp = prop - pointedModel = allModels[nextModel] - } else { - return printPrimitive(w, prop) - } - } else { - return fmt.Errorf("field %q does not exist", field) - } - } - return printModelInfo(w, pointedModel, pointedModelAsProp) -} - -func splitDotNotation(model string) (string, []string) { - var fieldsPath []string - dotModel := strings.Split(model, ".") - if len(dotModel) >= 1 { - fieldsPath = dotModel[1:] - } - return dotModel[0], fieldsPath -} - -func getPointedModel(prop *swagger.ModelProperty) (string, bool) { - if prop.Ref != nil { - return *prop.Ref, true - } else if *prop.Type == "array" && prop.Items.Ref != nil { - return *prop.Items.Ref, true - } - return "", false -} - -func getField(model *swagger.NamedModel, sField string) (*swagger.NamedModelProperty, string, bool) { - for _, prop := range model.Model.Properties.List { - if prop.Name == sField { - pointedModel, isModel := getPointedModel(&prop.Property) - return &prop, pointedModel, isModel - } - } - return nil, "", false -} - -func printModelInfo(w io.Writer, model *swagger.NamedModel, modelProp *swagger.NamedModelProperty) error { - t, _ := getFieldType(&modelProp.Property) - fmt.Fprintf(w, "RESOURCE: %s <%s>\n\n", modelProp.Name, t) - fieldDesc, _ := wrapAndIndentText(modelProp.Property.Description, " ", 80) - fmt.Fprintf(w, "DESCRIPTION:\n%s\n\n%s\n", fieldDesc, indentText(model.Model.Description, " ")) - return printFields(w, model) -} - -func printPrimitive(w io.Writer, field *swagger.NamedModelProperty) error { - t, _ := getFieldType(&field.Property) - fmt.Fprintf(w, "FIELD: %s <%s>\n\n", field.Name, t) - d, _ := wrapAndIndentText(field.Property.Description, " ", 80) - fmt.Fprintf(w, "DESCRIPTION:\n%s\n", d) - return nil -} - -func printTopLevelResourceInfo(w io.Writer, model *swagger.NamedModel) error { - fmt.Fprintf(w, "DESCRIPTION:\n%s\n", model.Model.Description) - return printFields(w, model) -} - -func printFields(w io.Writer, model *swagger.NamedModel) error { - fmt.Fprint(w, "\nFIELDS:\n") - for _, field := range model.Model.Properties.List { - fieldType, err := getFieldType(&field.Property) - if err != nil { - return err - } - - if arrayContains(model.Model.Required, field.Name) { - fmt.Fprintf(w, " %s\t<%s> -required-\n", field.Name, fieldType) - } else { - fmt.Fprintf(w, " %s\t<%s>\n", field.Name, fieldType) - } - - if recursive { - pointedModel, isModel := getPointedModel(&field.Property) - if isModel { - for _, nestedField := range allModels[pointedModel].Model.Properties.List { - t, _ := getFieldType(&nestedField.Property) - fmt.Fprintf(w, " %s\t<%s>\n", nestedField.Name, t) - } - } - } else { - fieldDesc, _ := wrapAndIndentText(field.Property.Description, " ", 80) - fmt.Fprintf(w, "%s\n\n", fieldDesc) - } - } - fmt.Fprint(w, "\n") - return nil -} - -func getFieldType(prop *swagger.ModelProperty) (string, error) { - if prop.Type == nil { - return "Object", nil - } else if *prop.Type == "any" { - // Swagger Spec doesn't return information for maps. - return "map[string]string", nil - } else if *prop.Type == "array" { - if prop.Items == nil { - return "", fmt.Errorf("error in swagger spec. Property: %v contains an array without type", prop) - } - if prop.Items.Ref != nil { - fieldType := "[]Object" - return fieldType, nil - } - fieldType := "[]" + *prop.Items.Type - return fieldType, nil - } - return *prop.Type, nil -} - -func wrapAndIndentText(desc, indent string, lim int) (string, error) { - words := strings.Split(strings.Replace(strings.TrimSpace(desc), "\n", " ", -1), " ") - n := len(words) - - for i := 0; i < n; i++ { - if len(words[i]) > lim { - if strings.Contains(words[i], "/") { - s := breakURL(words[i]) - words = append(words[:i], append(s, words[i+1:]...)...) - i = i + len(s) - 1 - } else { - fmt.Println(len(words[i])) - return "", fmt.Errorf("there are words longer that the break limit is") - } - } - } - - var lines []string - line := []string{indent} - lineL := len(indent) - for i := 0; i < len(words); i++ { - w := words[i] - - if strings.HasSuffix(w, "/") && lineL+len(w)-1 < lim { - prev := line[len(line)-1] - if strings.HasSuffix(prev, "/") { - if i+1 < len(words)-1 && !strings.HasSuffix(words[i+1], "/") { - w = strings.TrimSuffix(w, "/") - } - - line[len(line)-1] = prev + w - lineL += len(w) - } else { - line = append(line, w) - lineL += len(w) + 1 - } - } else if lineL+len(w) < lim { - line = append(line, w) - lineL += len(w) + 1 - } else { - lines = append(lines, strings.Join(line, " ")) - line = []string{indent, w} - lineL = len(indent) + len(w) - } - } - lines = append(lines, strings.Join(line, " ")) - - return strings.Join(lines, "\n"), nil -} - -func breakURL(url string) []string { - var buf []string - for _, part := range strings.Split(url, "/") { - buf = append(buf, part+"/") - } - return buf -} - -func indentText(text, indent string) string { - lines := strings.Split(text, "\n") - for i := range lines { - lines[i] = indent + lines[i] - } - return strings.Join(lines, "\n") -} - -func arrayContains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go b/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go deleted file mode 100644 index ea254bcb5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/generate.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" -) - -// GeneratorParam is a parameter for a generator -// TODO: facilitate structured json generator input schemes -type GeneratorParam struct { - Name string - Required bool -} - -// Generator is an interface for things that can generate API objects from input parameters. -type Generator interface { - // Generate creates an API object given a set of parameters - Generate(params map[string]interface{}) (runtime.Object, error) - // ParamNames returns the list of parameters that this generator uses - ParamNames() []GeneratorParam -} - -// StructuredGenerator is an interface for things that can generate API objects not using parameter injection -type StructuredGenerator interface { - // StructuredGenerator creates an API object using pre-configured parameters - StructuredGenerate() (runtime.Object, error) -} - -func IsZero(i interface{}) bool { - if i == nil { - return true - } - return reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) -} - -// ValidateParams ensures that all required params are present in the params map -func ValidateParams(paramSpec []GeneratorParam, params map[string]interface{}) error { - allErrs := []error{} - for ix := range paramSpec { - if paramSpec[ix].Required { - value, found := params[paramSpec[ix].Name] - if !found || IsZero(value) { - allErrs = append(allErrs, fmt.Errorf("Parameter: %s is required", paramSpec[ix].Name)) - } - } - } - return utilerrors.NewAggregate(allErrs) -} - -// AnnotateFlags annotates all flags that are used by generators. -func AnnotateFlags(cmd *cobra.Command, generators map[string]Generator) { - // Iterate over all generators and mark any flags used by them. - for name, generator := range generators { - generatorParams := map[string]struct{}{} - for _, param := range generator.ParamNames() { - generatorParams[param.Name] = struct{}{} - } - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if _, found := generatorParams[flag.Name]; !found { - // This flag is not used by the current generator - // so skip it. - return - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - if annotations := flag.Annotations["generator"]; annotations == nil { - flag.Annotations["generator"] = []string{} - } - flag.Annotations["generator"] = append(flag.Annotations["generator"], name) - }) - } -} - -// EnsureFlagsValid ensures that no invalid flags are being used against a generator. -func EnsureFlagsValid(cmd *cobra.Command, generators map[string]Generator, generatorInUse string) error { - AnnotateFlags(cmd, generators) - - allErrs := []error{} - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - // If the flag hasn't changed, don't validate it. - if !flag.Changed { - return - } - // Look into the flag annotations for the generators that can use it. - if annotations := flag.Annotations["generator"]; len(annotations) > 0 { - annotationMap := map[string]struct{}{} - for _, ann := range annotations { - annotationMap[ann] = struct{}{} - } - // If the current generator is not annotated, then this flag shouldn't - // be used with it. - if _, found := annotationMap[generatorInUse]; !found { - allErrs = append(allErrs, fmt.Errorf("cannot use --%s with --generator=%s", flag.Name, generatorInUse)) - } - } - }) - return utilerrors.NewAggregate(allErrs) -} - -// MakeParams is a utility that creates generator parameters from a command line -func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]interface{} { - result := map[string]interface{}{} - for ix := range params { - f := cmd.Flags().Lookup(params[ix].Name) - if f != nil { - result[params[ix].Name] = f.Value.String() - } - } - return result -} - -func MakeProtocols(protocols map[string]string) string { - out := []string{} - for key, value := range protocols { - out = append(out, fmt.Sprintf("%s/%s", key, value)) - } - return strings.Join(out, ",") -} - -func ParseProtocols(protocols interface{}) (map[string]string, error) { - protocolsString, isString := protocols.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", protocols) - } - if len(protocolsString) == 0 { - return nil, fmt.Errorf("no protocols passed") - } - portProtocolMap := map[string]string{} - protocolsSlice := strings.Split(protocolsString, ",") - for ix := range protocolsSlice { - portProtocol := strings.Split(protocolsSlice[ix], "/") - if len(portProtocol) != 2 { - return nil, fmt.Errorf("unexpected port protocol mapping: %s", protocolsSlice[ix]) - } - portProtocolMap[portProtocol[0]] = portProtocol[1] - } - return portProtocolMap, nil -} - -func MakeLabels(labels map[string]string) string { - out := []string{} - for key, value := range labels { - out = append(out, fmt.Sprintf("%s=%s", key, value)) - } - return strings.Join(out, ",") -} - -// ParseLabels turns a string representation of a label set into a map[string]string -func ParseLabels(labelSpec interface{}) (map[string]string, error) { - labelString, isString := labelSpec.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", labelSpec) - } - if len(labelString) == 0 { - return nil, fmt.Errorf("no label spec passed") - } - labels := map[string]string{} - labelSpecs := strings.Split(labelString, ",") - for ix := range labelSpecs { - labelSpec := strings.Split(labelSpecs[ix], "=") - if len(labelSpec) != 2 { - return nil, fmt.Errorf("unexpected label spec: %s", labelSpecs[ix]) - } - labels[labelSpec[0]] = labelSpec[1] - } - return labels, nil -} - -func GetBool(params map[string]string, key string, defValue bool) (bool, error) { - if val, found := params[key]; !found { - return defValue, nil - } else { - return strconv.ParseBool(val) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go b/vendor/k8s.io/kubernetes/pkg/kubectl/history.go deleted file mode 100644 index 0e81ebf28..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/history.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/runtime" - deploymentutil "k8s.io/kubernetes/pkg/util/deployment" - sliceutil "k8s.io/kubernetes/pkg/util/slice" -) - -const ( - ChangeCauseAnnotation = "kubernetes.io/change-cause" -) - -// HistoryViewer provides an interface for resources that can be rolled back. -type HistoryViewer interface { - History(namespace, name string) (HistoryInfo, error) -} - -func HistoryViewerFor(kind unversioned.GroupKind, c clientset.Interface) (HistoryViewer, error) { - switch kind { - case extensions.Kind("Deployment"): - return &DeploymentHistoryViewer{c}, nil - } - return nil, fmt.Errorf("no history viewer has been implemented for %q", kind) -} - -// HistoryInfo stores the mapping from revision to podTemplate; -// note that change-cause annotation should be copied to podTemplate -type HistoryInfo struct { - RevisionToTemplate map[int64]*api.PodTemplateSpec -} - -type DeploymentHistoryViewer struct { - c clientset.Interface -} - -// History returns a revision-to-replicaset map as the revision history of a deployment -func (h *DeploymentHistoryViewer) History(namespace, name string) (HistoryInfo, error) { - historyInfo := HistoryInfo{ - RevisionToTemplate: make(map[int64]*api.PodTemplateSpec), - } - deployment, err := h.c.Extensions().Deployments(namespace).Get(name) - if err != nil { - return historyInfo, fmt.Errorf("failed to retrieve deployment %s: %v", name, err) - } - _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, h.c) - if err != nil { - return historyInfo, fmt.Errorf("failed to retrieve replica sets from deployment %s: %v", name, err) - } - allRSs := allOldRSs - if newRS != nil { - allRSs = append(allRSs, newRS) - } - for _, rs := range allRSs { - v, err := deploymentutil.Revision(rs) - if err != nil { - continue - } - historyInfo.RevisionToTemplate[v] = &rs.Spec.Template - changeCause := getChangeCause(rs) - if historyInfo.RevisionToTemplate[v].Annotations == nil { - historyInfo.RevisionToTemplate[v].Annotations = make(map[string]string) - } - if len(changeCause) > 0 { - historyInfo.RevisionToTemplate[v].Annotations[ChangeCauseAnnotation] = changeCause - } - } - return historyInfo, nil -} - -// PrintRolloutHistory prints a formatted table of the input revision history of the deployment -func PrintRolloutHistory(historyInfo HistoryInfo, resource, name string) (string, error) { - if len(historyInfo.RevisionToTemplate) == 0 { - return fmt.Sprintf("No rollout history found in %s %q", resource, name), nil - } - // Sort the revisionToChangeCause map by revision - revisions := make([]int64, 0, len(historyInfo.RevisionToTemplate)) - for r := range historyInfo.RevisionToTemplate { - revisions = append(revisions, r) - } - sliceutil.SortInts64(revisions) - - return tabbedString(func(out io.Writer) error { - fmt.Fprintf(out, "%s %q:\n", resource, name) - fmt.Fprintf(out, "REVISION\tCHANGE-CAUSE\n") - for _, r := range revisions { - // Find the change-cause of revision r - changeCause := historyInfo.RevisionToTemplate[r].Annotations[ChangeCauseAnnotation] - if len(changeCause) == 0 { - changeCause = "<none>" - } - fmt.Fprintf(out, "%d\t%s\n", r, changeCause) - } - return nil - }) -} - -// getChangeCause returns the change-cause annotation of the input object -func getChangeCause(obj runtime.Object) string { - accessor, err := meta.Accessor(obj) - if err != nil { - return "" - } - return accessor.GetAnnotations()[ChangeCauseAnnotation] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go deleted file mode 100644 index 8f1e6f197..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/interfaces.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/restclient" -) - -// RESTClient is a client helper for dealing with RESTful resources -// in a generic way. -type RESTClient interface { - Get() *client.Request - Post() *client.Request - Patch(api.PatchType) *client.Request - Delete() *client.Request - Put() *client.Request -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go b/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go deleted file mode 100644 index 9f5cb22ff..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/kubectl.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// A set of common functions needed by cmd/kubectl and pkg/kubectl packages. -package kubectl - -import ( - "errors" - "fmt" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" -) - -const ( - kubectlAnnotationPrefix = "kubectl.kubernetes.io/" - // TODO: auto-generate this - PossibleResourceTypes = `Possible resource types include (case insensitive): pods (po), services (svc), deployments, -replicasets (rs), replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), -persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), namespaces (ns), -serviceaccounts (sa), ingresses (ing), horizontalpodautoscalers (hpa), daemonsets (ds), configmaps, -componentstatuses (cs), endpoints (ep), and secrets.` -) - -type NamespaceInfo struct { - Namespace string -} - -func listOfImages(spec *api.PodSpec) []string { - images := make([]string, 0, len(spec.Containers)) - for _, container := range spec.Containers { - images = append(images, container.Image) - } - return images -} - -func makeImageList(spec *api.PodSpec) string { - return strings.Join(listOfImages(spec), ",") -} - -func NewThirdPartyResourceMapper(gvs []unversioned.GroupVersion, gvks []unversioned.GroupVersionKind) (meta.RESTMapper, error) { - mapper := meta.NewDefaultRESTMapper(gvs, func(gv unversioned.GroupVersion) (*meta.VersionInterfaces, error) { - for ix := range gvs { - if gvs[ix].Group == gv.Group && gvs[ix].Version == gv.Version { - return &meta.VersionInterfaces{ - ObjectConvertor: api.Scheme, - MetadataAccessor: meta.NewAccessor(), - }, nil - } - } - groupVersions := make([]string, 0, len(gvs)) - for ix := range gvs { - groupVersions = append(groupVersions, gvs[ix].String()) - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %s)", gv.String(), strings.Join(groupVersions, ", ")) - }) - for ix := range gvks { - mapper.Add(gvks[ix], meta.RESTScopeNamespace) - } - return mapper, nil -} - -// OutputVersionMapper is a RESTMapper that will prefer mappings that -// correspond to a preferred output version (if feasible) -type OutputVersionMapper struct { - meta.RESTMapper - - // output versions takes a list of preferred GroupVersions. Only the first - // hit for a given group will have effect. This allows different output versions - // depending upon the group of the kind being requested - OutputVersions []unversioned.GroupVersion -} - -// RESTMapping implements meta.RESTMapper by prepending the output version to the preferred version list. -func (m OutputVersionMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - for _, preferredVersion := range m.OutputVersions { - if gk.Group == preferredVersion.Group { - mapping, err := m.RESTMapper.RESTMapping(gk, preferredVersion.Version) - if err == nil { - return mapping, nil - } - - break - } - } - - return m.RESTMapper.RESTMapping(gk, versions...) -} - -// ShortcutExpander is a RESTMapper that can be used for Kubernetes -// resources. It expands the resource first, then invokes the wrapped RESTMapper -type ShortcutExpander struct { - RESTMapper meta.RESTMapper -} - -var _ meta.RESTMapper = &ShortcutExpander{} - -func (e ShortcutExpander) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - return e.RESTMapper.KindFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - return e.RESTMapper.KindsFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - return e.RESTMapper.ResourcesFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - return e.RESTMapper.ResourceFor(expandResourceShortcut(resource)) -} - -func (e ShortcutExpander) ResourceSingularizer(resource string) (string, error) { - return e.RESTMapper.ResourceSingularizer(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) -} - -func (e ShortcutExpander) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - return e.RESTMapper.RESTMapping(gk, versions...) -} - -func (e ShortcutExpander) AliasesForResource(resource string) ([]string, bool) { - return e.RESTMapper.AliasesForResource(expandResourceShortcut(unversioned.GroupVersionResource{Resource: resource}).Resource) -} - -// shortForms is the list of short names to their expanded names -var shortForms = map[string]string{ - // Please keep this alphabetized - // If you add an entry here, please also take a look at pkg/kubectl/cmd/cmd.go - // and add an entry to valid_resources when appropriate. - "cs": "componentstatuses", - "ds": "daemonsets", - "ep": "endpoints", - "ev": "events", - "hpa": "horizontalpodautoscalers", - "ing": "ingresses", - "limits": "limitranges", - "no": "nodes", - "ns": "namespaces", - "po": "pods", - "psp": "podSecurityPolicies", - "pvc": "persistentvolumeclaims", - "pv": "persistentvolumes", - "quota": "resourcequotas", - "rc": "replicationcontrollers", - "rs": "replicasets", - "sa": "serviceaccounts", - "svc": "services", -} - -// expandResourceShortcut will return the expanded version of resource -// (something that a pkg/api/meta.RESTMapper can understand), if it is -// indeed a shortcut. Otherwise, will return resource unmodified. -func expandResourceShortcut(resource unversioned.GroupVersionResource) unversioned.GroupVersionResource { - if expanded, ok := shortForms[resource.Resource]; ok { - // don't change the group or version that's already been specified - resource.Resource = expanded - } - return resource -} - -// ResourceAliases returns the resource shortcuts and plural forms for the given resources. -func ResourceAliases(rs []string) []string { - as := make([]string, 0, len(rs)) - plurals := make(map[string]struct{}, len(rs)) - for _, r := range rs { - var plural string - switch { - case r == "endpoints": - plural = r // exception. "endpoint" does not exist. Why? - case strings.HasSuffix(r, "y"): - plural = r[0:len(r)-1] + "ies" - case strings.HasSuffix(r, "s"): - plural = r + "es" - default: - plural = r + "s" - } - as = append(as, plural) - - plurals[plural] = struct{}{} - } - - for sf, r := range shortForms { - if _, found := plurals[r]; found { - as = append(as, sf) - } - } - return as -} - -// parseFileSource parses the source given. Acceptable formats include: -// -// 1. source-path: the basename will become the key name -// 2. source-name=source-path: the source-name will become the key name and source-path is the path to the key file -// -// Key names cannot include '='. -func parseFileSource(source string) (keyName, filePath string, err error) { - numSeparators := strings.Count(source, "=") - switch { - case numSeparators == 0: - return path.Base(source), source, nil - case numSeparators == 1 && strings.HasPrefix(source, "="): - return "", "", fmt.Errorf("key name for file path %v missing.", strings.TrimPrefix(source, "=")) - case numSeparators == 1 && strings.HasSuffix(source, "="): - return "", "", fmt.Errorf("file path for key name %v missing.", strings.TrimSuffix(source, "=")) - case numSeparators > 1: - return "", "", errors.New("Key names or file paths cannot contain '='.") - default: - components := strings.Split(source, "=") - return components[0], components[1], nil - } -} - -// parseLiteralSource parses the source key=val pair -func parseLiteralSource(source string) (keyName, value string, err error) { - // leading equal is invalid - if strings.Index(source, "=") == 0 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - // split after the first equal (so values can have the = character) - items := strings.SplitN(source, "=", 2) - if len(items) != 2 { - return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source) - } - - return items[0], items[1], nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go b/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go deleted file mode 100644 index c6011d38b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/namespace.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// NamespaceGeneratorV1 supports stable generation of a namespace -type NamespaceGeneratorV1 struct { - // Name of namespace - Name string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &NamespaceGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &NamespaceGeneratorV1{} - -// Generate returns a namespace using the specified parameters -func (g NamespaceGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(g.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &NamespaceGeneratorV1{Name: params["name"]} - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (g NamespaceGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - } -} - -// StructuredGenerate outputs a namespace object using the configured fields -func (g *NamespaceGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - namespace := &api.Namespace{} - namespace.Name = g.Name - return namespace, nil -} - -// validate validates required fields are set to support structured generation -func (g *NamespaceGeneratorV1) validate() error { - if len(g.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go b/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go deleted file mode 100644 index 082b542fc..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/proxy_server.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "regexp" - "strings" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util" -) - -const ( - DefaultHostAcceptRE = "^localhost$,^127\\.0\\.0\\.1$,^\\[::1\\]$" - DefaultPathAcceptRE = "^/.*" - DefaultPathRejectRE = "^/api/.*/exec,^/api/.*/run,^/api/.*/attach" - DefaultMethodRejectRE = "POST,PUT,PATCH" -) - -var ( - // The reverse proxy will periodically flush the io writer at this frequency. - // Only matters for long poll connections like the one used to watch. With an - // interval of 0 the reverse proxy will buffer content sent on any connection - // with transfer-encoding=chunked. - // TODO: Flush after each chunk so the client doesn't suffer a 100ms latency per - // watch event. - ReverseProxyFlushInterval = 100 * time.Millisecond -) - -// FilterServer rejects requests which don't match one of the specified regular expressions -type FilterServer struct { - // Only paths that match this regexp will be accepted - AcceptPaths []*regexp.Regexp - // Paths that match this regexp will be rejected, even if they match the above - RejectPaths []*regexp.Regexp - // Hosts are required to match this list of regexp - AcceptHosts []*regexp.Regexp - // Methods that match this regexp are rejected - RejectMethods []*regexp.Regexp - // The delegate to call to handle accepted requests. - delegate http.Handler -} - -// Splits a comma separated list of regexps into a array of Regexp objects. -func MakeRegexpArray(str string) ([]*regexp.Regexp, error) { - parts := strings.Split(str, ",") - result := make([]*regexp.Regexp, len(parts)) - for ix := range parts { - re, err := regexp.Compile(parts[ix]) - if err != nil { - return nil, err - } - result[ix] = re - } - return result, nil -} - -func MakeRegexpArrayOrDie(str string) []*regexp.Regexp { - result, err := MakeRegexpArray(str) - if err != nil { - glog.Fatalf("Error compiling re: %v", err) - } - return result -} - -func matchesRegexp(str string, regexps []*regexp.Regexp) bool { - for _, re := range regexps { - if re.MatchString(str) { - glog.V(6).Infof("%v matched %s", str, re) - return true - } - } - return false -} - -func (f *FilterServer) accept(method, path, host string) bool { - if matchesRegexp(path, f.RejectPaths) { - glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) - return false - } - if matchesRegexp(method, f.RejectMethods) { - glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) - return false - } - if matchesRegexp(path, f.AcceptPaths) && matchesRegexp(host, f.AcceptHosts) { - glog.V(3).Infof("Filter accepting %v %v %v", method, path, host) - return true - } - glog.V(3).Infof("Filter rejecting %v %v %v", method, path, host) - return false -} - -// Make a copy of f which passes requests along to the new delegate. -func (f *FilterServer) HandlerFor(delegate http.Handler) *FilterServer { - f2 := *f - f2.delegate = delegate - return &f2 -} - -// Get host from a host header value like "localhost" or "localhost:8080" -func extractHost(header string) (host string) { - host, _, err := net.SplitHostPort(header) - if err != nil { - host = header - } - return host -} - -func (f *FilterServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - host := extractHost(req.Host) - if f.accept(req.Method, req.URL.Path, host) { - f.delegate.ServeHTTP(rw, req) - return - } - rw.WriteHeader(http.StatusForbidden) - rw.Write([]byte("<h3>Unauthorized</h3>")) -} - -// ProxyServer is a http.Handler which proxies Kubernetes APIs to remote API server. -type ProxyServer struct { - handler http.Handler -} - -// NewProxyServer creates and installs a new ProxyServer. -// It automatically registers the created ProxyServer to http.DefaultServeMux. -// 'filter', if non-nil, protects requests to the api only. -func NewProxyServer(filebase string, apiProxyPrefix string, staticPrefix string, filter *FilterServer, cfg *restclient.Config) (*ProxyServer, error) { - host := cfg.Host - if !strings.HasSuffix(host, "/") { - host = host + "/" - } - target, err := url.Parse(host) - if err != nil { - return nil, err - } - proxy := newProxy(target) - if proxy.Transport, err = restclient.TransportFor(cfg); err != nil { - return nil, err - } - proxyServer := http.Handler(proxy) - if filter != nil { - proxyServer = filter.HandlerFor(proxyServer) - } - - if !strings.HasPrefix(apiProxyPrefix, "/api") { - proxyServer = stripLeaveSlash(apiProxyPrefix, proxyServer) - } - - mux := http.NewServeMux() - mux.Handle(apiProxyPrefix, proxyServer) - if filebase != "" { - // Require user to explicitly request this behavior rather than - // serving their working directory by default. - mux.Handle(staticPrefix, newFileHandler(staticPrefix, filebase)) - } - return &ProxyServer{handler: mux}, nil -} - -// Listen is a simple wrapper around net.Listen. -func (s *ProxyServer) Listen(address string, port int) (net.Listener, error) { - return net.Listen("tcp", fmt.Sprintf("%s:%d", address, port)) -} - -// ListenUnix does net.Listen for a unix socket -func (s *ProxyServer) ListenUnix(path string) (net.Listener, error) { - // Remove any socket, stale or not, but fall through for other files - fi, err := os.Stat(path) - if err == nil && (fi.Mode()&os.ModeSocket) != 0 { - os.Remove(path) - } - // Default to only user accessible socket, caller can open up later if desired - oldmask, _ := util.Umask(0077) - l, err := net.Listen("unix", path) - util.Umask(oldmask) - return l, err -} - -// Serve starts the server using given listener, loops forever. -func (s *ProxyServer) ServeOnListener(l net.Listener) error { - server := http.Server{ - Handler: s.handler, - } - return server.Serve(l) -} - -func newProxy(target *url.URL) *httputil.ReverseProxy { - director := func(req *http.Request) { - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) - } - return &httputil.ReverseProxy{Director: director, FlushInterval: ReverseProxyFlushInterval} -} - -func newFileHandler(prefix, base string) http.Handler { - return http.StripPrefix(prefix, http.FileServer(http.Dir(base))) -} - -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - switch { - case aslash && bslash: - return a + b[1:] - case !aslash && !bslash: - return a + "/" + b - } - return a + b -} - -// like http.StripPrefix, but always leaves an initial slash. (so that our -// regexps will work.) -func stripLeaveSlash(prefix string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - p := strings.TrimPrefix(req.URL.Path, prefix) - if len(p) >= len(req.URL.Path) { - http.NotFound(w, req) - return - } - if len(p) > 0 && p[:1] != "/" { - p = "/" + p - } - req.URL.Path = p - h.ServeHTTP(w, req) - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go deleted file mode 100644 index ae19557fd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/builder.go +++ /dev/null @@ -1,747 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "io" - "net/url" - "os" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" -) - -var FileExtensions = []string{".json", ".yaml", ".yml"} -var InputExtensions = append(FileExtensions, "stdin") - -const defaultHttpGetAttempts int = 3 - -// Builder provides convenience functions for taking arguments and parameters -// from the command line and converting them to a list of resources to iterate -// over using the Visitor interface. -type Builder struct { - mapper *Mapper - - errs []error - - paths []Visitor - stream bool - dir bool - - selector labels.Selector - selectAll bool - - resources []string - - namespace string - names []string - - resourceTuples []resourceTuple - - defaultNamespace bool - requireNamespace bool - - flatten bool - latest bool - - requireObject bool - - singleResourceType bool - continueOnError bool - - singular bool - - export bool - - schema validation.Schema -} - -type resourceTuple struct { - Resource string - Name string -} - -// NewBuilder creates a builder that operates on generic objects. -func NewBuilder(mapper meta.RESTMapper, typer runtime.ObjectTyper, clientMapper ClientMapper, decoder runtime.Decoder) *Builder { - return &Builder{ - mapper: &Mapper{typer, mapper, clientMapper, decoder}, - requireObject: true, - } -} - -func (b *Builder) Schema(schema validation.Schema) *Builder { - b.schema = schema - return b -} - -// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) -// If enforceNamespace is false, namespaces in the specs will be allowed to -// override the default namespace. If it is true, namespaces that don't match -// will cause an error. -// If ContinueOnError() is set prior to this method, objects on the path that are not -// recognized will be ignored (but logged at V(2)). -func (b *Builder) FilenameParam(enforceNamespace, recursive bool, paths ...string) *Builder { - for _, s := range paths { - switch { - case s == "-": - b.Stdin() - case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: - url, err := url.Parse(s) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) - continue - } - b.URL(defaultHttpGetAttempts, url) - default: - if !recursive { - b.singular = true - } - b.Path(recursive, s) - } - } - - if enforceNamespace { - b.RequireNamespace() - } - - return b -} - -// URL accepts a number of URLs directly. -func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { - for _, u := range urls { - b.paths = append(b.paths, &URLVisitor{ - URL: u, - StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), - HttpAttemptCount: httpAttemptCount, - }) - } - return b -} - -// Stdin will read objects from the standard input. If ContinueOnError() is set -// prior to this method being called, objects in the stream that are unrecognized -// will be ignored (but logged at V(2)). -func (b *Builder) Stdin() *Builder { - b.stream = true - b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) - return b -} - -// Stream will read objects from the provided reader, and if an error occurs will -// include the name string in the error message. If ContinueOnError() is set -// prior to this method being called, objects in the stream that are unrecognized -// will be ignored (but logged at V(2)). -func (b *Builder) Stream(r io.Reader, name string) *Builder { - b.stream = true - b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) - return b -} - -// Path accepts a set of paths that may be files, directories (all can containing -// one or more resources). Creates a FileVisitor for each file and then each -// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set -// prior to this method being called, objects on the path that are unrecognized will be -// ignored (but logged at V(2)). -func (b *Builder) Path(recursive bool, paths ...string) *Builder { - for _, p := range paths { - _, err := os.Stat(p) - if os.IsNotExist(err) { - b.errs = append(b.errs, fmt.Errorf("the path %q does not exist", p)) - continue - } - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) - continue - } - - visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) - } - if len(visitors) > 1 { - b.dir = true - } - - b.paths = append(b.paths, visitors...) - } - return b -} - -// ResourceTypes is a list of types of resources to operate on, when listing objects on -// the server or retrieving objects that match a selector. -func (b *Builder) ResourceTypes(types ...string) *Builder { - b.resources = append(b.resources, types...) - return b -} - -// ResourceNames accepts a default type and one or more names, and creates tuples of -// resources -func (b *Builder) ResourceNames(resource string, names ...string) *Builder { - for _, name := range names { - // See if this input string is of type/name format - tuple, ok, err := splitResourceTypeName(name) - if err != nil { - b.errs = append(b.errs, err) - return b - } - - if ok { - b.resourceTuples = append(b.resourceTuples, tuple) - continue - } - - // Use the given default type to create a resource tuple - b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) - } - return b -} - -// SelectorParam defines a selector that should be applied to the object types to load. -// This will not affect files loaded from disk or URL. If the parameter is empty it is -// a no-op - to select all resources invoke `b.Selector(labels.Everything)`. -func (b *Builder) SelectorParam(s string) *Builder { - selector, err := labels.Parse(s) - if err != nil { - b.errs = append(b.errs, fmt.Errorf("the provided selector %q is not valid: %v", s, err)) - return b - } - if selector.Empty() { - return b - } - if b.selectAll { - b.errs = append(b.errs, fmt.Errorf("found non empty selector %q with previously set 'all' parameter. ", s)) - return b - } - return b.Selector(selector) -} - -// Selector accepts a selector directly, and if non nil will trigger a list action. -func (b *Builder) Selector(selector labels.Selector) *Builder { - b.selector = selector - return b -} - -// ExportParam accepts the export boolean for these resources -func (b *Builder) ExportParam(export bool) *Builder { - b.export = export - return b -} - -// NamespaceParam accepts the namespace that these resources should be -// considered under from - used by DefaultNamespace() and RequireNamespace() -func (b *Builder) NamespaceParam(namespace string) *Builder { - b.namespace = namespace - return b -} - -// DefaultNamespace instructs the builder to set the namespace value for any object found -// to NamespaceParam() if empty. -func (b *Builder) DefaultNamespace() *Builder { - b.defaultNamespace = true - return b -} - -// AllNamespaces instructs the builder to use NamespaceAll as a namespace to request resources -// acroll all namespace. This overrides the namespace set by NamespaceParam(). -func (b *Builder) AllNamespaces(allNamespace bool) *Builder { - if allNamespace { - b.namespace = api.NamespaceAll - } - return b -} - -// RequireNamespace instructs the builder to set the namespace value for any object found -// to NamespaceParam() if empty, and if the value on the resource does not match -// NamespaceParam() an error will be returned. -func (b *Builder) RequireNamespace() *Builder { - b.requireNamespace = true - return b -} - -// SelectEverythingParam -func (b *Builder) SelectAllParam(selectAll bool) *Builder { - if selectAll && b.selector != nil { - b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) - return b - } - b.selectAll = selectAll - return b -} - -// ResourceTypeOrNameArgs indicates that the builder should accept arguments -// of the form `(<type1>[,<type2>,...]|<type> <name1>[,<name2>,...])`. When one argument is -// received, the types provided will be retrieved from the server (and be comma delimited). -// When two or more arguments are received, they must be a single type and resource name(s). -// The allowEmptySelector permits to select all the resources (via Everything func). -func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { - args = normalizeMultipleResourcesArgs(args) - if ok, err := hasCombinedTypeArgs(args); ok { - if err != nil { - b.errs = append(b.errs, err) - return b - } - for _, s := range args { - tuple, ok, err := splitResourceTypeName(s) - if err != nil { - b.errs = append(b.errs, err) - return b - } - if ok { - b.resourceTuples = append(b.resourceTuples, tuple) - } - } - return b - } - if len(args) > 0 { - // Try replacing aliases only in types - args[0] = b.replaceAliases(args[0]) - } - switch { - case len(args) > 2: - b.names = append(b.names, args[1:]...) - b.ResourceTypes(SplitResourceArgument(args[0])...) - case len(args) == 2: - b.names = append(b.names, args[1]) - b.ResourceTypes(SplitResourceArgument(args[0])...) - case len(args) == 1: - b.ResourceTypes(SplitResourceArgument(args[0])...) - if b.selector == nil && allowEmptySelector { - b.selector = labels.Everything() - } - case len(args) == 0: - default: - b.errs = append(b.errs, fmt.Errorf("when passing arguments, must be resource or resource and name")) - } - return b -} - -// replaceAliases accepts an argument and tries to expand any existing -// aliases found in it -func (b *Builder) replaceAliases(input string) string { - replaced := []string{} - for _, arg := range strings.Split(input, ",") { - if aliases, ok := b.mapper.AliasesForResource(arg); ok { - arg = strings.Join(aliases, ",") - } - replaced = append(replaced, arg) - } - return strings.Join(replaced, ",") -} - -func hasCombinedTypeArgs(args []string) (bool, error) { - hasSlash := 0 - for _, s := range args { - if strings.Contains(s, "/") { - hasSlash++ - } - } - switch { - case hasSlash > 0 && hasSlash == len(args): - return true, nil - case hasSlash > 0 && hasSlash != len(args): - return true, fmt.Errorf("when passing arguments in resource/name form, all arguments must include the resource") - default: - return false, nil - } -} - -// Normalize args convert multiple resources to resource tuples, a,b,c d -// as a transform to a/d b/d c/d -func normalizeMultipleResourcesArgs(args []string) []string { - if len(args) >= 2 { - resources := []string{} - resources = append(resources, SplitResourceArgument(args[0])...) - if len(resources) > 1 { - names := []string{} - names = append(names, args[1:]...) - newArgs := []string{} - for _, resource := range resources { - for _, name := range names { - newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) - } - } - return newArgs - } - } - return args -} - -// splitResourceTypeName handles type/name resource formats and returns a resource tuple -// (empty or not), whether it successfully found one, and an error -func splitResourceTypeName(s string) (resourceTuple, bool, error) { - if !strings.Contains(s, "/") { - return resourceTuple{}, false, nil - } - seg := strings.Split(s, "/") - if len(seg) != 2 { - return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") - } - resource, name := seg[0], seg[1] - if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { - return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") - } - return resourceTuple{Resource: resource, Name: name}, true, nil -} - -// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object -// compatible types into individual entries and give them their own items. The original object -// is not passed to any visitors. -func (b *Builder) Flatten() *Builder { - b.flatten = true - return b -} - -// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. -func (b *Builder) Latest() *Builder { - b.latest = true - return b -} - -// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. -func (b *Builder) RequireObject(require bool) *Builder { - b.requireObject = require - return b -} - -// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits -// return errors or some objects cannot be loaded. The default behavior is to terminate after -// the first error is returned from a VisitorFunc. -func (b *Builder) ContinueOnError() *Builder { - b.continueOnError = true - return b -} - -// SingleResourceType will cause the builder to error if the user specifies more than a single type -// of resource. -func (b *Builder) SingleResourceType() *Builder { - b.singleResourceType = true - return b -} - -// mappingFor returns the RESTMapping for the Kind referenced by the resource. -// prefers a fully specified GroupVersionResource match. If we don't have one match on GroupResource -func (b *Builder) mappingFor(resourceArg string) (*meta.RESTMapping, error) { - fullySpecifiedGVR, groupResource := unversioned.ParseResourceArg(resourceArg) - gvk := unversioned.GroupVersionKind{} - if fullySpecifiedGVR != nil { - gvk, _ = b.mapper.KindFor(*fullySpecifiedGVR) - } - if gvk.IsEmpty() { - var err error - gvk, err = b.mapper.KindFor(groupResource.WithVersion("")) - if err != nil { - return nil, err - } - } - - return b.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) -} - -func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { - if len(b.resources) > 1 && b.singleResourceType { - return nil, fmt.Errorf("you may only specify a single resource type") - } - mappings := []*meta.RESTMapping{} - for _, r := range b.resources { - mapping, err := b.mappingFor(r) - if err != nil { - return nil, err - } - - mappings = append(mappings, mapping) - } - return mappings, nil -} - -func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { - mappings := make(map[string]*meta.RESTMapping) - canonical := make(map[string]struct{}) - for _, r := range b.resourceTuples { - if _, ok := mappings[r.Resource]; ok { - continue - } - mapping, err := b.mappingFor(r.Resource) - if err != nil { - return nil, err - } - - mappings[mapping.Resource] = mapping - mappings[r.Resource] = mapping - canonical[mapping.Resource] = struct{}{} - } - if len(canonical) > 1 && b.singleResourceType { - return nil, fmt.Errorf("you may only specify a single resource type") - } - return mappings, nil -} - -func (b *Builder) visitorResult() *Result { - if len(b.errs) > 0 { - return &Result{err: utilerrors.NewAggregate(b.errs)} - } - - if b.selectAll { - b.selector = labels.Everything() - } - - // visit selectors - if b.selector != nil { - if len(b.names) != 0 { - return &Result{err: fmt.Errorf("name cannot be provided when a selector is specified")} - } - if len(b.resourceTuples) != 0 { - return &Result{err: fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")} - } - if len(b.resources) == 0 { - return &Result{err: fmt.Errorf("at least one resource must be specified to use a selector")} - } - // empty selector has different error message for paths being provided - if len(b.paths) != 0 { - if b.selector.Empty() { - return &Result{err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} - } else { - return &Result{err: fmt.Errorf("a selector may not be specified when path, URL, or stdin is provided as input")} - } - } - mappings, err := b.resourceMappings() - if err != nil { - return &Result{err: err} - } - - visitors := []Visitor{} - for _, mapping := range mappings { - client, err := b.mapper.ClientForMapping(mapping) - if err != nil { - return &Result{err: err} - } - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } - visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, b.selector, b.export)) - } - if b.continueOnError { - return &Result{visitor: EagerVisitorList(visitors), sources: visitors} - } - return &Result{visitor: VisitorList(visitors), sources: visitors} - } - - // visit items specified by resource and name - if len(b.resourceTuples) != 0 { - // if b.singular is false, this could be by default, so double-check length - // of resourceTuples to determine if in fact it is singular or not - isSingular := b.singular - if !isSingular { - isSingular = len(b.resourceTuples) == 1 - } - - if len(b.paths) != 0 { - return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} - } - if len(b.resources) != 0 { - return &Result{singular: isSingular, err: fmt.Errorf("you may not specify individual resources and bulk resources in the same call")} - } - - // retrieve one client for each resource - mappings, err := b.resourceTupleMappings() - if err != nil { - return &Result{singular: isSingular, err: err} - } - clients := make(map[string]RESTClient) - for _, mapping := range mappings { - s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource) - if _, ok := clients[s]; ok { - continue - } - client, err := b.mapper.ClientForMapping(mapping) - if err != nil { - return &Result{err: err} - } - clients[s] = client - } - - items := []Visitor{} - for _, tuple := range b.resourceTuples { - mapping, ok := mappings[tuple.Resource] - if !ok { - return &Result{singular: isSingular, err: fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)} - } - s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource) - client, ok := clients[s] - if !ok { - return &Result{singular: isSingular, err: fmt.Errorf("could not find a client for resource %q", tuple.Resource)} - } - - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } else { - if len(b.namespace) == 0 { - return &Result{singular: isSingular, err: fmt.Errorf("namespace may not be empty when retrieving a resource by name")} - } - } - - info := NewInfo(client, mapping, selectorNamespace, tuple.Name, b.export) - items = append(items, info) - } - - var visitors Visitor - if b.continueOnError { - visitors = EagerVisitorList(items) - } else { - visitors = VisitorList(items) - } - return &Result{singular: isSingular, visitor: visitors, sources: items} - } - - // visit items specified by name - if len(b.names) != 0 { - isSingular := len(b.names) == 1 - - if len(b.paths) != 0 { - return &Result{singular: isSingular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")} - } - if len(b.resources) == 0 { - return &Result{singular: isSingular, err: fmt.Errorf("you must provide a resource and a resource name together")} - } - if len(b.resources) > 1 { - return &Result{singular: isSingular, err: fmt.Errorf("you must specify only one resource")} - } - - mappings, err := b.resourceMappings() - if err != nil { - return &Result{singular: isSingular, err: err} - } - mapping := mappings[0] - - client, err := b.mapper.ClientForMapping(mapping) - if err != nil { - return &Result{err: err} - } - - selectorNamespace := b.namespace - if mapping.Scope.Name() != meta.RESTScopeNameNamespace { - selectorNamespace = "" - } else { - if len(b.namespace) == 0 { - return &Result{singular: isSingular, err: fmt.Errorf("namespace may not be empty when retrieving a resource by name")} - } - } - - visitors := []Visitor{} - for _, name := range b.names { - info := NewInfo(client, mapping, selectorNamespace, name, b.export) - visitors = append(visitors, info) - } - return &Result{singular: isSingular, visitor: VisitorList(visitors), sources: visitors} - } - - // visit items specified by paths - if len(b.paths) != 0 { - singular := !b.dir && !b.stream && len(b.paths) == 1 - if len(b.resources) != 0 { - return &Result{singular: singular, err: fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")} - } - - var visitors Visitor - if b.continueOnError { - visitors = EagerVisitorList(b.paths) - } else { - visitors = VisitorList(b.paths) - } - - // only items from disk can be refetched - if b.latest { - // must flatten lists prior to fetching - if b.flatten { - visitors = NewFlattenListVisitor(visitors, b.mapper) - } - // must set namespace prior to fetching - if b.defaultNamespace { - visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) - } - visitors = NewDecoratedVisitor(visitors, RetrieveLatest) - } - return &Result{singular: singular, visitor: visitors, sources: b.paths} - } - - return &Result{err: fmt.Errorf("you must provide one or more resources by argument or filename (%s)", strings.Join(InputExtensions, "|"))} -} - -// Do returns a Result object with a Visitor for the resources identified by the Builder. -// The visitor will respect the error behavior specified by ContinueOnError. Note that stream -// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list -// for further iteration. -func (b *Builder) Do() *Result { - r := b.visitorResult() - if r.err != nil { - return r - } - if b.flatten { - r.visitor = NewFlattenListVisitor(r.visitor, b.mapper) - } - helpers := []VisitorFunc{} - if b.defaultNamespace { - helpers = append(helpers, SetNamespace(b.namespace)) - } - if b.requireNamespace { - helpers = append(helpers, RequireNamespace(b.namespace)) - } - helpers = append(helpers, FilterNamespace) - if b.requireObject { - helpers = append(helpers, RetrieveLazy) - } - r.visitor = NewDecoratedVisitor(r.visitor, helpers...) - if b.continueOnError { - r.visitor = ContinueOnErrorVisitor{r.visitor} - } - return r -} - -// SplitResourceArgument splits the argument with commas and returns unique -// strings in the original order. -func SplitResourceArgument(arg string) []string { - out := []string{} - set := sets.NewString() - for _, s := range strings.Split(arg, ",") { - if set.Has(s) { - continue - } - set.Insert(s) - out = append(out, s) - } - return out -} - -// HasNames returns true if the provided args contain resource names -func HasNames(args []string) (bool, error) { - args = normalizeMultipleResourcesArgs(args) - hasCombinedTypes, err := hasCombinedTypeArgs(args) - if err != nil { - return false, err - } - return hasCombinedTypes || len(args) > 1, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go deleted file mode 100644 index 05b35cfdd..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package resource assists clients in dealing with RESTful objects that match the -// Kubernetes API conventions. The Helper object provides simple CRUD operations -// on resources. The Visitor interface makes it easy to deal with multiple resources -// in bulk for retrieval and operation. The Builder object simplifies converting -// standard command line arguments and parameters into a Visitor that can iterate -// over all of the identified resources, whether on the server or on the local -// filesystem. -package resource diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go deleted file mode 100644 index 849a6c040..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/helper.go +++ /dev/null @@ -1,166 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "strconv" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// Helper provides methods for retrieving or mutating a RESTful -// resource. -type Helper struct { - // The name of this resource as the server would recognize it - Resource string - // A RESTClient capable of mutating this resource. - RESTClient RESTClient - // An interface for reading or writing the resource version of this - // type. - Versioner runtime.ResourceVersioner - // True if the resource type is scoped to namespaces - NamespaceScoped bool -} - -// NewHelper creates a Helper from a ResourceMapping -func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { - return &Helper{ - Resource: mapping.Resource, - RESTClient: client, - Versioner: mapping.MetadataAccessor, - NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, - } -} - -func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) { - req := m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name) - if export { - req.Param("export", strconv.FormatBool(export)) - } - return req.Do().Get() -} - -// TODO: add field selector -func (m *Helper) List(namespace, apiVersion string, selector labels.Selector, export bool) (runtime.Object, error) { - req := m.RESTClient.Get(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - LabelsSelectorParam(selector) - if export { - req.Param("export", strconv.FormatBool(export)) - } - return req.Do().Get() -} - -func (m *Helper) Watch(namespace, resourceVersion, apiVersion string, labelSelector labels.Selector) (watch.Interface, error) { - return m.RESTClient.Get(). - Prefix("watch"). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Param("resourceVersion", resourceVersion). - LabelsSelectorParam(labelSelector). - Watch() -} - -func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { - return m.RESTClient.Get(). - Prefix("watch"). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Param("resourceVersion", resourceVersion). - Watch() -} - -func (m *Helper) Delete(namespace, name string) error { - return m.RESTClient.Delete(). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Do(). - Error() -} - -func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) { - if modify { - // Attempt to version the object based on client logic. - version, err := m.Versioner.ResourceVersion(obj) - if err != nil { - // We don't know how to clear the version on this object, so send it to the server as is - return m.createResource(m.RESTClient, m.Resource, namespace, obj) - } - if version != "" { - if err := m.Versioner.SetResourceVersion(obj, ""); err != nil { - return nil, err - } - } - } - - return m.createResource(m.RESTClient, m.Resource, namespace, obj) -} - -func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object) (runtime.Object, error) { - return c.Post().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Body(obj).Do().Get() -} -func (m *Helper) Patch(namespace, name string, pt api.PatchType, data []byte) (runtime.Object, error) { - return m.RESTClient.Patch(pt). - NamespaceIfScoped(namespace, m.NamespaceScoped). - Resource(m.Resource). - Name(name). - Body(data). - Do(). - Get() -} - -func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { - c := m.RESTClient - - // Attempt to version the object based on client logic. - version, err := m.Versioner.ResourceVersion(obj) - if err != nil { - // We don't know how to version this object, so send it to the server as is - return m.replaceResource(c, m.Resource, namespace, name, obj) - } - if version == "" && overwrite { - // Retrieve the current version of the object to overwrite the server object - serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).Do().Get() - if err != nil { - // The object does not exist, but we want it to be created - return m.replaceResource(c, m.Resource, namespace, name, obj) - } - serverVersion, err := m.Versioner.ResourceVersion(serverObj) - if err != nil { - return nil, err - } - if err := m.Versioner.SetResourceVersion(obj, serverVersion); err != nil { - return nil, err - } - } - - return m.replaceResource(c, m.Resource, namespace, name, obj) -} - -func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object) (runtime.Object, error) { - return c.Put().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Name(name).Body(obj).Do().Get() -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go deleted file mode 100644 index 2639a61ec..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/interfaces.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - client "k8s.io/kubernetes/pkg/client/restclient" -) - -// RESTClient is a client helper for dealing with RESTful resources -// in a generic way. -type RESTClient interface { - Get() *client.Request - Post() *client.Request - Patch(api.PatchType) *client.Request - Delete() *client.Request - Put() *client.Request -} - -// ClientMapper abstracts retrieving a Client for mapped objects. -type ClientMapper interface { - ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) -} - -// ClientMapperFunc implements ClientMapper for a function -type ClientMapperFunc func(mapping *meta.RESTMapping) (RESTClient, error) - -// ClientForMapping implements ClientMapper -func (f ClientMapperFunc) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { - return f(mapping) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go deleted file mode 100644 index 1ca922a05..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/mapper.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata" - "k8s.io/kubernetes/pkg/runtime" -) - -// DisabledClientForMapping allows callers to avoid allowing remote calls when handling -// resources. -type DisabledClientForMapping struct { - ClientMapper -} - -func (f DisabledClientForMapping) ClientForMapping(mapping *meta.RESTMapping) (RESTClient, error) { - return nil, nil -} - -// Mapper is a convenience struct for holding references to the three interfaces -// needed to create Info for arbitrary objects. -type Mapper struct { - runtime.ObjectTyper - meta.RESTMapper - ClientMapper - runtime.Decoder -} - -// InfoForData creates an Info object for the given data. An error is returned -// if any of the decoding or client lookup steps fail. Name and namespace will be -// set into Info if the mapping's MetadataAccessor can retrieve them. -func (m *Mapper) InfoForData(data []byte, source string) (*Info, error) { - versions := &runtime.VersionedObjects{} - _, gvk, err := m.Decode(data, nil, versions) - if err != nil { - return nil, fmt.Errorf("unable to decode %q: %v", source, err) - } - var obj runtime.Object - var versioned runtime.Object - if isThirdParty, gvkOut, err := thirdpartyresourcedata.IsThirdPartyObject(data, gvk); err != nil { - return nil, err - } else if isThirdParty { - obj, err = runtime.Decode(thirdpartyresourcedata.NewDecoder(nil, gvkOut.Kind), data) - versioned = obj - gvk = gvkOut - } else { - obj, versioned = versions.Last(), versions.First() - } - if err != nil { - return nil, fmt.Errorf("unable to decode %q: %v [%v]", source, err, gvk) - } - mapping, err := m.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, fmt.Errorf("unable to recognize %q: %v", source, err) - } - - client, err := m.ClientForMapping(mapping) - if err != nil { - return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) - } - - name, _ := mapping.MetadataAccessor.Name(obj) - namespace, _ := mapping.MetadataAccessor.Namespace(obj) - resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) - - return &Info{ - Mapping: mapping, - Client: client, - Namespace: namespace, - Name: name, - Source: source, - VersionedObject: versioned, - Object: obj, - ResourceVersion: resourceVersion, - }, nil -} - -// InfoForObject creates an Info object for the given Object. An error is returned -// if the object cannot be introspected. Name and namespace will be set into Info -// if the mapping's MetadataAccessor can retrieve them. -func (m *Mapper) InfoForObject(obj runtime.Object, preferredGVKs []unversioned.GroupVersionKind) (*Info, error) { - groupVersionKinds, _, err := m.ObjectKinds(obj) - if err != nil { - return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) - } - - groupVersionKind := groupVersionKinds[0] - if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { - groupVersionKind = preferredObjectKind(groupVersionKinds, preferredGVKs) - } - - mapping, err := m.RESTMapping(groupVersionKind.GroupKind(), groupVersionKind.Version) - if err != nil { - return nil, fmt.Errorf("unable to recognize %v: %v", groupVersionKind, err) - } - client, err := m.ClientForMapping(mapping) - if err != nil { - return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) - } - name, _ := mapping.MetadataAccessor.Name(obj) - namespace, _ := mapping.MetadataAccessor.Namespace(obj) - resourceVersion, _ := mapping.MetadataAccessor.ResourceVersion(obj) - return &Info{ - Mapping: mapping, - Client: client, - Namespace: namespace, - Name: name, - - Object: obj, - ResourceVersion: resourceVersion, - }, nil -} - -// preferredObjectKind picks the possibility that most closely matches the priority list in this order: -// GroupVersionKind matches (exact match) -// GroupKind matches -// Group matches -func preferredObjectKind(possibilities []unversioned.GroupVersionKind, preferences []unversioned.GroupVersionKind) unversioned.GroupVersionKind { - // Exact match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility == priority { - return possibility - } - } - } - - // GroupKind match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility.GroupKind() == priority.GroupKind() { - return possibility - } - } - } - - // Group match - for _, priority := range preferences { - for _, possibility := range possibilities { - if possibility.Group == priority.Group { - return possibility - } - } - } - - // Just pick the first - return possibilities[0] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go deleted file mode 100644 index 562fc0cc3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/result.go +++ /dev/null @@ -1,291 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/watch" -) - -// ErrMatchFunc can be used to filter errors that may not be true failures. -type ErrMatchFunc func(error) bool - -// Result contains helper methods for dealing with the outcome of a Builder. -type Result struct { - err error - visitor Visitor - - sources []Visitor - singular bool - - ignoreErrors []utilerrors.Matcher - - // populated by a call to Infos - info []*Info -} - -// IgnoreErrors will filter errors that occur when by visiting the result -// (but not errors that occur by creating the result in the first place), -// eliminating any that match fns. This is best used in combination with -// Builder.ContinueOnError(), where the visitors accumulate errors and return -// them after visiting as a slice of errors. If no errors remain after -// filtering, the various visitor methods on Result will return nil for -// err. -func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { - for _, fn := range fns { - r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) - } - return r -} - -// Err returns one or more errors (via a util.ErrorList) that occurred prior -// to visiting the elements in the visitor. To see all errors including those -// that occur during visitation, invoke Infos(). -func (r *Result) Err() error { - return r.err -} - -// Visit implements the Visitor interface on the items described in the Builder. -// Note that some visitor sources are not traversable more than once, or may -// return different results. If you wish to operate on the same set of resources -// multiple times, use the Infos() method. -func (r *Result) Visit(fn VisitorFunc) error { - if r.err != nil { - return r.err - } - err := r.visitor.Visit(fn) - return utilerrors.FilterOut(err, r.ignoreErrors...) -} - -// IntoSingular sets the provided boolean pointer to true if the Builder input -// reflected a single item, or multiple. -func (r *Result) IntoSingular(b *bool) *Result { - *b = r.singular - return r -} - -// Infos returns an array of all of the resource infos retrieved via traversal. -// Will attempt to traverse the entire set of visitors only once, and will return -// a cached list on subsequent calls. -func (r *Result) Infos() ([]*Info, error) { - if r.err != nil { - return nil, r.err - } - if r.info != nil { - return r.info, nil - } - - infos := []*Info{} - err := r.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - infos = append(infos, info) - return nil - }) - err = utilerrors.FilterOut(err, r.ignoreErrors...) - - r.info, r.err = infos, err - return infos, err -} - -// Object returns a single object representing the output of a single visit to all -// found resources. If the Builder was a singular context (expected to return a -// single resource by user input) and only a single resource was found, the resource -// will be returned as is. Otherwise, the returned resources will be part of an -// api.List. The ResourceVersion of the api.List will be set only if it is identical -// across all infos returned. -func (r *Result) Object() (runtime.Object, error) { - infos, err := r.Infos() - if err != nil { - return nil, err - } - - versions := sets.String{} - objects := []runtime.Object{} - for _, info := range infos { - if info.Object != nil { - objects = append(objects, info.Object) - versions.Insert(info.ResourceVersion) - } - } - - if len(objects) == 1 { - if r.singular { - return objects[0], nil - } - // if the item is a list already, don't create another list - if meta.IsListType(objects[0]) { - return objects[0], nil - } - } - - version := "" - if len(versions) == 1 { - version = versions.List()[0] - } - return &api.List{ - ListMeta: unversioned.ListMeta{ - ResourceVersion: version, - }, - Items: objects, - }, err -} - -// ResourceMapping returns a single meta.RESTMapping representing the -// resources located by the builder, or an error if more than one -// mapping was found. -func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { - if r.err != nil { - return nil, r.err - } - mappings := map[string]*meta.RESTMapping{} - for i := range r.sources { - m, ok := r.sources[i].(ResourceMapping) - if !ok { - return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) - } - mapping := m.ResourceMapping() - mappings[mapping.Resource] = mapping - } - if len(mappings) != 1 { - return nil, fmt.Errorf("expected only a single resource type") - } - for _, mapping := range mappings { - return mapping, nil - } - return nil, nil -} - -// Watch retrieves changes that occur on the server to the specified resource. -// It currently supports watching a single source - if the resource source -// (selectors or pure types) can be watched, they will be, otherwise the list -// will be visited (equivalent to the Infos() call) and if there is a single -// resource present, it will be watched, otherwise an error will be returned. -func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { - if r.err != nil { - return nil, r.err - } - if len(r.sources) != 1 { - return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") - } - w, ok := r.sources[0].(Watchable) - if !ok { - info, err := r.Infos() - if err != nil { - return nil, err - } - if len(info) != 1 { - return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) - } - return info[0].Watch(resourceVersion) - } - return w.Watch(resourceVersion) -} - -// AsVersionedObject converts a list of infos into a single object - either a List containing -// the objects as children, or if only a single Object is present, as that object. The provided -// version will be preferred as the conversion target, but the Object's mapping version will be -// used if that version is not present. -func AsVersionedObject(infos []*Info, forceList bool, version unversioned.GroupVersion, encoder runtime.Encoder) (runtime.Object, error) { - objects, err := AsVersionedObjects(infos, version, encoder) - if err != nil { - return nil, err - } - - var object runtime.Object - if len(objects) == 1 && !forceList { - object = objects[0] - } else { - object = &api.List{Items: objects} - converted, err := tryConvert(api.Scheme, object, version, registered.GroupOrDie(api.GroupName).GroupVersion) - if err != nil { - return nil, err - } - object = converted - } - return object, nil -} - -// AsVersionedObjects converts a list of infos into versioned objects. The provided -// version will be preferred as the conversion target, but the Object's mapping version will be -// used if that version is not present. -func AsVersionedObjects(infos []*Info, version unversioned.GroupVersion, encoder runtime.Encoder) ([]runtime.Object, error) { - objects := []runtime.Object{} - for _, info := range infos { - if info.Object == nil { - continue - } - - // TODO: use info.VersionedObject as the value? - switch obj := info.Object.(type) { - case *extensions.ThirdPartyResourceData: - objects = append(objects, &runtime.Unknown{Raw: obj.Data}) - continue - } - - // objects that are not part of api.Scheme must be converted to JSON - // TODO: convert to map[string]interface{}, attach to runtime.Unknown? - if !version.IsEmpty() { - if _, _, err := api.Scheme.ObjectKinds(info.Object); runtime.IsNotRegisteredError(err) { - // TODO: ideally this would encode to version, but we don't expose multiple codecs here. - data, err := runtime.Encode(encoder, info.Object) - if err != nil { - return nil, err - } - // TODO: Set ContentEncoding and ContentType. - objects = append(objects, &runtime.Unknown{Raw: data}) - continue - } - } - - converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.GroupVersionKind.GroupVersion()) - if err != nil { - return nil, err - } - objects = append(objects, converted) - } - return objects, nil -} - -// tryConvert attempts to convert the given object to the provided versions in order. This function assumes -// the object is in internal version. -func tryConvert(convertor runtime.ObjectConvertor, object runtime.Object, versions ...unversioned.GroupVersion) (runtime.Object, error) { - var last error - for _, version := range versions { - if version.IsEmpty() { - return object, nil - } - obj, err := convertor.ConvertToVersion(object, version) - if err != nil { - last = err - continue - } - return obj, nil - } - return nil, last -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go deleted file mode 100644 index 047a814ae..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/selector.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/watch" -) - -// Selector is a Visitor for resources that match a label selector. -type Selector struct { - Client RESTClient - Mapping *meta.RESTMapping - Namespace string - Selector labels.Selector - Export bool -} - -// NewSelector creates a resource selector which hides details of getting items by their label selector. -func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace string, selector labels.Selector, export bool) *Selector { - return &Selector{ - Client: client, - Mapping: mapping, - Namespace: namespace, - Selector: selector, - Export: export, - } -} - -// Visit implements Visitor -func (r *Selector) Visit(fn VisitorFunc) error { - list, err := NewHelper(r.Client, r.Mapping).List(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector, r.Export) - if err != nil { - if errors.IsBadRequest(err) || errors.IsNotFound(err) { - if r.Selector.Empty() { - return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) - } else { - return fmt.Errorf("Unable to find %q that match the selector %q: %v", r.Mapping.Resource, r.Selector, err) - } - } - return err - } - accessor := r.Mapping.MetadataAccessor - resourceVersion, _ := accessor.ResourceVersion(list) - info := &Info{ - Client: r.Client, - Mapping: r.Mapping, - Namespace: r.Namespace, - - Object: list, - ResourceVersion: resourceVersion, - } - return fn(info, nil) -} - -func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, resourceVersion, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), r.Selector) -} - -// ResourceMapping returns the mapping for this resource and implements ResourceMapping -func (r *Selector) ResourceMapping() *meta.RESTMapping { - return r.Mapping -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go deleted file mode 100644 index 2430b312d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource/visitor.go +++ /dev/null @@ -1,643 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/yaml" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - constSTDINstr string = "STDIN" - stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" -) - -// Visitor lets clients walk a list of resources. -type Visitor interface { - Visit(VisitorFunc) error -} - -// VisitorFunc implements the Visitor interface for a matching function. -// If there was a problem walking a list of resources, the incoming error -// will describe the problem and the function can decide how to handle that error. -// A nil returned indicates to accept an error to continue loops even when errors happen. -// This is useful for ignoring certain kinds of errors or aggregating errors in some way. -type VisitorFunc func(*Info, error) error - -// Watchable describes a resource that can be watched for changes that occur on the server, -// beginning after the provided resource version. -type Watchable interface { - Watch(resourceVersion string) (watch.Interface, error) -} - -// ResourceMapping allows an object to return the resource mapping associated with -// the resource or resources it represents. -type ResourceMapping interface { - ResourceMapping() *meta.RESTMapping -} - -// Info contains temporary info to execute a REST call, or show the results -// of an already completed REST call. -type Info struct { - Client RESTClient - Mapping *meta.RESTMapping - Namespace string - Name string - - // Optional, Source is the filename or URL to template file (.json or .yaml), - // or stdin to use to handle the resource - Source string - // Optional, this is the provided object in a versioned type before defaulting - // and conversions into its corresponding internal type. This is useful for - // reflecting on user intent which may be lost after defaulting and conversions. - VersionedObject interface{} - // Optional, this is the most recent value returned by the server if available - runtime.Object - // Optional, this is the most recent resource version the server knows about for - // this type of resource. It may not match the resource version of the object, - // but if set it should be equal to or newer than the resource version of the - // object (however the server defines resource version). - ResourceVersion string - // Optional, should this resource be exported, stripped of cluster-specific and instance specific fields - Export bool -} - -// NewInfo returns a new info object -func NewInfo(client RESTClient, mapping *meta.RESTMapping, namespace, name string, export bool) *Info { - return &Info{ - Client: client, - Mapping: mapping, - Namespace: namespace, - Name: name, - Export: export, - } -} - -// Visit implements Visitor -func (i *Info) Visit(fn VisitorFunc) error { - return fn(i, nil) -} - -// Get retrieves the object from the Namespace and Name fields -func (i *Info) Get() (err error) { - obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export) - if err != nil { - return err - } - i.Object = obj - i.ResourceVersion, _ = i.Mapping.MetadataAccessor.ResourceVersion(obj) - return nil -} - -// Refresh updates the object with another object. If ignoreError is set -// the Object will be updated even if name, namespace, or resourceVersion -// attributes cannot be loaded from the object. -func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { - name, err := i.Mapping.MetadataAccessor.Name(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.Name = name - } - namespace, err := i.Mapping.MetadataAccessor.Namespace(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.Namespace = namespace - } - version, err := i.Mapping.MetadataAccessor.ResourceVersion(obj) - if err != nil { - if !ignoreError { - return err - } - } else { - i.ResourceVersion = version - } - i.Object = obj - return nil -} - -// Namespaced returns true if the object belongs to a namespace -func (i *Info) Namespaced() bool { - return i.Mapping != nil && i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace -} - -// Watch returns server changes to this object after it was retrieved. -func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { - return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) -} - -// ResourceMapping returns the mapping for this resource and implements ResourceMapping -func (i *Info) ResourceMapping() *meta.RESTMapping { - return i.Mapping -} - -// VisitorList implements Visit for the sub visitors it contains. The first error -// returned from a child Visitor will terminate iteration. -type VisitorList []Visitor - -// Visit implements Visitor -func (l VisitorList) Visit(fn VisitorFunc) error { - for i := range l { - if err := l[i].Visit(fn); err != nil { - return err - } - } - return nil -} - -// EagerVisitorList implements Visit for the sub visitors it contains. All errors -// will be captured and returned at the end of iteration. -type EagerVisitorList []Visitor - -// Visit implements Visitor, and gathers errors that occur during processing until -// all sub visitors have been visited. -func (l EagerVisitorList) Visit(fn VisitorFunc) error { - errs := []error(nil) - for i := range l { - if err := l[i].Visit(func(info *Info, err error) error { - if err != nil { - errs = append(errs, err) - return nil - } - if err := fn(info, nil); err != nil { - errs = append(errs, err) - } - return nil - }); err != nil { - errs = append(errs, err) - } - } - return utilerrors.NewAggregate(errs) -} - -func ValidateSchema(data []byte, schema validation.Schema) error { - if schema == nil { - return nil - } - data, err := yaml.ToJSON(data) - if err != nil { - return fmt.Errorf("error converting to YAML: %v", err) - } - if err := schema.ValidateBytes(data); err != nil { - return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) - } - return nil -} - -// URLVisitor downloads the contents of a URL, and if successful, returns -// an info object representing the downloaded object. -type URLVisitor struct { - URL *url.URL - *StreamVisitor - HttpAttemptCount int -} - -func (v *URLVisitor) Visit(fn VisitorFunc) error { - body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) - if err != nil { - return err - } - defer body.Close() - v.StreamVisitor.Reader = body - return v.StreamVisitor.Visit(fn) -} - -// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. -func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { - var err error - var body io.ReadCloser - if attempts <= 0 { - return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) - } - for i := 0; i < attempts; i++ { - var statusCode int - var status string - if i > 0 { - time.Sleep(duration) - } - - // Try to get the URL - statusCode, status, body, err = get(u) - - // Retry Errors - if err != nil { - continue - } - - // Error - Set the error condition from the StatusCode - if statusCode != 200 { - err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) - } - - if statusCode >= 500 && statusCode < 600 { - // Retry 500's - continue - } else { - // Don't retry other StatusCodes - break - } - } - return body, err -} - -// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. -type httpget func(url string) (int, string, io.ReadCloser, error) - -// httpgetImpl Implements a function to retrieve a url and return the results. -func httpgetImpl(url string) (int, string, io.ReadCloser, error) { - resp, err := http.Get(url) - if err != nil { - return 0, "", nil, err - } - return resp.StatusCode, resp.Status, resp.Body, nil -} - -// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function -// passed to Visit. An error will terminate the visit. -type DecoratedVisitor struct { - visitor Visitor - decorators []VisitorFunc -} - -// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before -// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info -// object or terminate early with an error. -func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { - if len(fn) == 0 { - return v - } - return DecoratedVisitor{v, fn} -} - -// Visit implements Visitor -func (v DecoratedVisitor) Visit(fn VisitorFunc) error { - return v.visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - for i := range v.decorators { - if err := v.decorators[i](info, nil); err != nil { - return err - } - } - return fn(info, nil) - }) -} - -// ContinueOnErrorVisitor visits each item and, if an error occurs on -// any individual item, returns an aggregate error after all items -// are visited. -type ContinueOnErrorVisitor struct { - Visitor -} - -// Visit returns nil if no error occurs during traversal, a regular -// error if one occurs, or if multiple errors occur, an aggregate -// error. If the provided visitor fails on any individual item it -// will not prevent the remaining items from being visited. An error -// returned by the visitor directly may still result in some items -// not being visited. -func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { - errs := []error{} - err := v.Visitor.Visit(func(info *Info, err error) error { - if err != nil { - errs = append(errs, err) - return nil - } - if err := fn(info, nil); err != nil { - errs = append(errs, err) - } - return nil - }) - if err != nil { - errs = append(errs, err) - } - if len(errs) == 1 { - return errs[0] - } - return utilerrors.NewAggregate(errs) -} - -// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list -// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying -// that interface - into multiple Infos. An error on any sub item (for instance, if a List -// contains an object that does not have a registered client or resource) will terminate -// the visit. -// TODO: allow errors to be aggregated? -type FlattenListVisitor struct { - Visitor - *Mapper -} - -// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects -// into individual items and then visit them individually. -func NewFlattenListVisitor(v Visitor, mapper *Mapper) Visitor { - return FlattenListVisitor{v, mapper} -} - -func (v FlattenListVisitor) Visit(fn VisitorFunc) error { - return v.Visitor.Visit(func(info *Info, err error) error { - if err != nil { - return err - } - if info.Object == nil { - return fn(info, nil) - } - items, err := meta.ExtractList(info.Object) - if err != nil { - return fn(info, nil) - } - if errs := runtime.DecodeList(items, struct { - runtime.ObjectTyper - runtime.Decoder - }{v.Mapper, v.Mapper.Decoder}); len(errs) > 0 { - return utilerrors.NewAggregate(errs) - } - - // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list - var preferredGVKs []unversioned.GroupVersionKind - if info.Mapping != nil && !info.Mapping.GroupVersionKind.IsEmpty() { - preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) - } - - for i := range items { - item, err := v.InfoForObject(items[i], preferredGVKs) - if err != nil { - return err - } - if len(info.ResourceVersion) != 0 { - item.ResourceVersion = info.ResourceVersion - } - if err := fn(item, nil); err != nil { - return err - } - } - return nil - }) -} - -func ignoreFile(path string, extensions []string) bool { - if len(extensions) == 0 { - return false - } - ext := filepath.Ext(path) - for _, s := range extensions { - if s == ext { - return false - } - } - return true -} - -// FileVisitorForSTDIN return a special FileVisitor just for STDIN -func FileVisitorForSTDIN(mapper *Mapper, schema validation.Schema) Visitor { - return &FileVisitor{ - Path: constSTDINstr, - StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), - } -} - -// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. -// After FileVisitors open the files, they will pass a io.Reader to a StreamVisitor to do the reading. (stdin -// is also taken care of). Paths argument also accepts a single file, and will return a single visitor -func ExpandPathsToFileVisitors(mapper *Mapper, paths string, recursive bool, extensions []string, schema validation.Schema) ([]Visitor, error) { - var visitors []Visitor - err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if fi.IsDir() { - if path != paths && !recursive { - return filepath.SkipDir - } - return nil - } - // Don't check extension if the filepath was passed explicitly - if path != paths && ignoreFile(path, extensions) { - return nil - } - - visitor := &FileVisitor{ - Path: path, - StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), - } - - visitors = append(visitors, visitor) - return nil - }) - - if err != nil { - return nil, err - } - return visitors, nil -} - -// FileVisitor is wrapping around a StreamVisitor, to handle open/close files -type FileVisitor struct { - Path string - *StreamVisitor -} - -// Visit in a FileVisitor is just taking care of opening/closing files -func (v *FileVisitor) Visit(fn VisitorFunc) error { - var f *os.File - if v.Path == constSTDINstr { - f = os.Stdin - } else { - var err error - if f, err = os.Open(v.Path); err != nil { - return err - } - } - defer f.Close() - v.StreamVisitor.Reader = f - - return v.StreamVisitor.Visit(fn) -} - -// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be -// visited once. -// TODO: depends on objects being in JSON format before being passed to decode - need to implement -// a stream decoder method on runtime.Codec to properly handle this. -type StreamVisitor struct { - io.Reader - *Mapper - - Source string - Schema validation.Schema -} - -// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. -func NewStreamVisitor(r io.Reader, mapper *Mapper, source string, schema validation.Schema) *StreamVisitor { - return &StreamVisitor{ - Reader: r, - Mapper: mapper, - Source: source, - Schema: schema, - } -} - -// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. -func (v *StreamVisitor) Visit(fn VisitorFunc) error { - d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) - for { - ext := runtime.RawExtension{} - if err := d.Decode(&ext); err != nil { - if err == io.EOF { - return nil - } - return err - } - // TODO: This needs to be able to handle object in other encodings and schemas. - ext.Raw = bytes.TrimSpace(ext.Raw) - if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { - continue - } - if err := ValidateSchema(ext.Raw, v.Schema); err != nil { - return fmt.Errorf("error validating %q: %v", v.Source, err) - } - info, err := v.InfoForData(ext.Raw, v.Source) - if err != nil { - if fnErr := fn(info, err); fnErr != nil { - return fnErr - } - continue - } - if err := fn(info, nil); err != nil { - return err - } - } -} - -func UpdateObjectNamespace(info *Info, err error) error { - if err != nil { - return err - } - if info.Object != nil { - return info.Mapping.MetadataAccessor.SetNamespace(info.Object, info.Namespace) - } - return nil -} - -// FilterNamespace omits the namespace if the object is not namespace scoped -func FilterNamespace(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - info.Namespace = "" - UpdateObjectNamespace(info, nil) - } - return nil -} - -// SetNamespace ensures that every Info object visited will have a namespace -// set. If info.Object is set, it will be mutated as well. -func SetNamespace(namespace string) VisitorFunc { - return func(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - return nil - } - if len(info.Namespace) == 0 { - info.Namespace = namespace - UpdateObjectNamespace(info, nil) - } - return nil - } -} - -// RequireNamespace will either set a namespace if none is provided on the -// Info object, or if the namespace is set and does not match the provided -// value, returns an error. This is intended to guard against administrators -// accidentally operating on resources outside their namespace. -func RequireNamespace(namespace string) VisitorFunc { - return func(info *Info, err error) error { - if err != nil { - return err - } - if !info.Namespaced() { - return nil - } - if len(info.Namespace) == 0 { - info.Namespace = namespace - UpdateObjectNamespace(info, nil) - return nil - } - if info.Namespace != namespace { - return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) - } - return nil - } -} - -// RetrieveLatest updates the Object on each Info by invoking a standard client -// Get. -func RetrieveLatest(info *Info, err error) error { - if err != nil { - return err - } - if meta.IsListType(info.Object) { - return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") - } - if len(info.Name) == 0 { - return nil - } - if info.Namespaced() && len(info.Namespace) == 0 { - return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) - } - obj, err := NewHelper(info.Client, info.Mapping).Get(info.Namespace, info.Name, info.Export) - if err != nil { - return err - } - info.Object = obj - info.ResourceVersion, _ = info.Mapping.MetadataAccessor.ResourceVersion(obj) - return nil -} - -// RetrieveLazy updates the object if it has not been loaded yet. -func RetrieveLazy(info *Info, err error) error { - if err != nil { - return err - } - if info.Object == nil { - return info.Get() - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go deleted file mode 100644 index 54bc7b4e5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go +++ /dev/null @@ -1,2198 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "sort" - "strings" - "text/tabwriter" - "text/template" - "time" - - "github.com/ghodss/yaml" - "github.com/golang/glog" - "k8s.io/kubernetes/federation/apis/federation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/autoscaling" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/rbac" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/jsonpath" - "k8s.io/kubernetes/pkg/util/sets" -) - -const ( - tabwriterMinWidth = 10 - tabwriterWidth = 4 - tabwriterPadding = 3 - tabwriterPadChar = ' ' - tabwriterFlags = 0 - loadBalancerWidth = 16 -) - -// GetPrinter takes a format type, an optional format argument. It will return true -// if the format is generic (untyped), otherwise it will return false. The printer -// is agnostic to schema versions, so you must send arguments to PrintObj in the -// version you wish them to be shown using a VersionedPrinter (typically when -// generic is true). -func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { - var printer ResourcePrinter - switch format { - case "json": - printer = &JSONPrinter{} - case "yaml": - printer = &YAMLPrinter{} - case "name": - printer = &NamePrinter{ - // TODO: this is wrong, these should be provided as an argument to GetPrinter - Typer: api.Scheme, - Decoder: api.Codecs.UniversalDecoder(), - } - case "template", "go-template": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("template format specified but no template given") - } - var err error - printer, err = NewTemplatePrinter([]byte(formatArgument)) - if err != nil { - return nil, false, fmt.Errorf("error parsing template %s, %v\n", formatArgument, err) - } - case "templatefile", "go-template-file": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("templatefile format specified but no template file given") - } - data, err := ioutil.ReadFile(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) - } - printer, err = NewTemplatePrinter(data) - if err != nil { - return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) - } - case "jsonpath": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("jsonpath template format specified but no template given") - } - var err error - printer, err = NewJSONPathPrinter(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error parsing jsonpath %s, %v\n", formatArgument, err) - } - case "jsonpath-file": - if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("jsonpath file format specified but no template file file given") - } - data, err := ioutil.ReadFile(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) - } - printer, err = NewJSONPathPrinter(string(data)) - if err != nil { - return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) - } - case "custom-columns": - var err error - if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, api.Codecs.UniversalDecoder()); err != nil { - return nil, false, err - } - case "custom-columns-file": - file, err := os.Open(formatArgument) - if err != nil { - return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) - } - if printer, err = NewCustomColumnsPrinterFromTemplate(file, api.Codecs.UniversalDecoder()); err != nil { - return nil, false, err - } - case "wide": - fallthrough - case "": - return nil, false, nil - default: - return nil, false, fmt.Errorf("output format %q not recognized", format) - } - return printer, true, nil -} - -// ResourcePrinter is an interface that knows how to print runtime objects. -type ResourcePrinter interface { - // Print receives a runtime object, formats it and prints it to a writer. - PrintObj(runtime.Object, io.Writer) error - HandledResources() []string -} - -// ResourcePrinterFunc is a function that can print objects -type ResourcePrinterFunc func(runtime.Object, io.Writer) error - -// PrintObj implements ResourcePrinter -func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { - return fn(obj, w) -} - -// TODO: implement HandledResources() -func (fn ResourcePrinterFunc) HandledResources() []string { - return []string{} -} - -// VersionedPrinter takes runtime objects and ensures they are converted to a given API version -// prior to being passed to a nested printer. -type VersionedPrinter struct { - printer ResourcePrinter - convertor runtime.ObjectConvertor - versions []unversioned.GroupVersion -} - -// NewVersionedPrinter wraps a printer to convert objects to a known API version prior to printing. -func NewVersionedPrinter(printer ResourcePrinter, convertor runtime.ObjectConvertor, versions ...unversioned.GroupVersion) ResourcePrinter { - return &VersionedPrinter{ - printer: printer, - convertor: convertor, - versions: versions, - } -} - -// PrintObj implements ResourcePrinter -func (p *VersionedPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if len(p.versions) == 0 { - return fmt.Errorf("no version specified, object cannot be converted") - } - for _, version := range p.versions { - if version.IsEmpty() { - continue - } - converted, err := p.convertor.ConvertToVersion(obj, version) - if runtime.IsNotRegisteredError(err) { - continue - } - if err != nil { - return err - } - return p.printer.PrintObj(converted, w) - } - return fmt.Errorf("the object cannot be converted to any of the versions: %v", p.versions) -} - -// TODO: implement HandledResources() -func (p *VersionedPrinter) HandledResources() []string { - return []string{} -} - -// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. -type NamePrinter struct { - Decoder runtime.Decoder - Typer runtime.ObjectTyper -} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object -// and print "resource/name" pair. If the object is a List, print all items in it. -func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - if meta.IsListType(obj) { - items, err := meta.ExtractList(obj) - if err != nil { - return err - } - if errs := runtime.DecodeList(items, p.Decoder, runtime.UnstructuredJSONScheme); len(errs) > 0 { - return utilerrors.NewAggregate(errs) - } - for _, obj := range items { - if err := p.PrintObj(obj, w); err != nil { - return err - } - } - return nil - } - - // TODO: this is wrong, runtime.Unknown and runtime.Unstructured are not handled properly here. - - name := "<unknown>" - if acc, err := meta.Accessor(obj); err == nil { - if n := acc.GetName(); len(n) > 0 { - name = n - } - } - - if gvks, _, err := p.Typer.ObjectKinds(obj); err == nil { - // TODO: this is wrong, it assumes that meta knows about all Kinds - should take a RESTMapper - _, resource := meta.KindToResource(gvks[0]) - fmt.Fprintf(w, "%s/%s\n", resource.Resource, name) - } else { - fmt.Fprintf(w, "<unknown>/%s\n", name) - } - - return nil -} - -// TODO: implement HandledResources() -func (p *NamePrinter) HandledResources() []string { - return []string{} -} - -// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. -type JSONPrinter struct { -} - -// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. -func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - switch obj := obj.(type) { - case *runtime.Unknown: - _, err := w.Write(obj.Raw) - return err - } - - data, err := json.Marshal(obj) - if err != nil { - return err - } - dst := bytes.Buffer{} - err = json.Indent(&dst, data, "", " ") - dst.WriteByte('\n') - _, err = w.Write(dst.Bytes()) - return err -} - -// TODO: implement HandledResources() -func (p *JSONPrinter) HandledResources() []string { - return []string{} -} - -// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. -// The input object is assumed to be in the internal version of an API and is converted -// to the given version first. -type YAMLPrinter struct { - version string - convertor runtime.ObjectConvertor -} - -// PrintObj prints the data as YAML. -func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - switch obj := obj.(type) { - case *runtime.Unknown: - data, err := yaml.JSONToYAML(obj.Raw) - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - - output, err := yaml.Marshal(obj) - if err != nil { - return err - } - _, err = fmt.Fprint(w, string(output)) - return err -} - -// TODO: implement HandledResources() -func (p *YAMLPrinter) HandledResources() []string { - return []string{} -} - -type handlerEntry struct { - columns []string - printFunc reflect.Value -} - -type PrintOptions struct { - NoHeaders bool - WithNamespace bool - Wide bool - ShowAll bool - ShowLabels bool - AbsoluteTimestamps bool - ColumnLabels []string -} - -// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide -// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers -// will only be printed if the object type changes. This makes it useful for printing items -// received from watches. -type HumanReadablePrinter struct { - handlerMap map[reflect.Type]*handlerEntry - options PrintOptions - lastType reflect.Type -} - -// NewHumanReadablePrinter creates a HumanReadablePrinter. -func NewHumanReadablePrinter(noHeaders, withNamespace bool, wide bool, showAll bool, showLabels bool, absoluteTimestamps bool, columnLabels []string) *HumanReadablePrinter { - printer := &HumanReadablePrinter{ - handlerMap: make(map[reflect.Type]*handlerEntry), - options: PrintOptions{ - NoHeaders: noHeaders, - WithNamespace: withNamespace, - Wide: wide, - ShowAll: showAll, - ShowLabels: showLabels, - AbsoluteTimestamps: absoluteTimestamps, - ColumnLabels: columnLabels, - }, - } - printer.addDefaultHandlers() - return printer -} - -// Handler adds a print handler with a given set of columns to HumanReadablePrinter instance. -// See validatePrintHandlerFunc for required method signature. -func (h *HumanReadablePrinter) Handler(columns []string, printFunc interface{}) error { - printFuncValue := reflect.ValueOf(printFunc) - if err := h.validatePrintHandlerFunc(printFuncValue); err != nil { - glog.Errorf("Unable to add print handler: %v", err) - return err - } - objType := printFuncValue.Type().In(0) - h.handlerMap[objType] = &handlerEntry{ - columns: columns, - printFunc: printFuncValue, - } - return nil -} - -// validatePrintHandlerFunc validates print handler signature. -// printFunc is the function that will be called to print an object. -// It must be of the following type: -// func printFunc(object ObjectType, w io.Writer, options PrintOptions) error -// where ObjectType is the type of the object that will be printed. -func (h *HumanReadablePrinter) validatePrintHandlerFunc(printFunc reflect.Value) error { - if printFunc.Kind() != reflect.Func { - return fmt.Errorf("invalid print handler. %#v is not a function", printFunc) - } - funcType := printFunc.Type() - if funcType.NumIn() != 3 || funcType.NumOut() != 1 { - return fmt.Errorf("invalid print handler." + - "Must accept 3 parameters and return 1 value.") - } - if funcType.In(1) != reflect.TypeOf((*io.Writer)(nil)).Elem() || - funcType.In(2) != reflect.TypeOf((*PrintOptions)(nil)).Elem() || - funcType.Out(0) != reflect.TypeOf((*error)(nil)).Elem() { - return fmt.Errorf("invalid print handler. The expected signature is: "+ - "func handler(obj %v, w io.Writer, options PrintOptions) error", funcType.In(0)) - } - return nil -} - -func (h *HumanReadablePrinter) HandledResources() []string { - keys := make([]string, 0) - - for k := range h.handlerMap { - // k.String looks like "*api.PodList" and we want just "pod" - api := strings.Split(k.String(), ".") - resource := api[len(api)-1] - if strings.HasSuffix(resource, "List") { - continue - } - resource = strings.ToLower(resource) - keys = append(keys, resource) - } - return keys -} - -// NOTE: When adding a new resource type here, please update the list -// pkg/kubectl/cmd/get.go to reflect the new resource type. -var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"} -var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"} -var replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} -var replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} -var jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"} -var serviceColumns = []string{"NAME", "CLUSTER-IP", "EXTERNAL-IP", "PORT(S)", "AGE"} -var ingressColumns = []string{"NAME", "HOSTS", "ADDRESS", "PORTS", "AGE"} -var petSetColumns = []string{"NAME", "DESIRED", "CURRENT", "AGE"} -var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"} -var nodeColumns = []string{"NAME", "STATUS", "AGE"} -var daemonSetColumns = []string{"NAME", "DESIRED", "CURRENT", "NODE-SELECTOR", "AGE"} -var eventColumns = []string{"LASTSEEN", "FIRSTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "TYPE", "REASON", "SOURCE", "MESSAGE"} -var limitRangeColumns = []string{"NAME", "AGE"} -var resourceQuotaColumns = []string{"NAME", "AGE"} -var namespaceColumns = []string{"NAME", "STATUS", "AGE"} -var secretColumns = []string{"NAME", "TYPE", "DATA", "AGE"} -var serviceAccountColumns = []string{"NAME", "SECRETS", "AGE"} -var persistentVolumeColumns = []string{"NAME", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM", "REASON", "AGE"} -var persistentVolumeClaimColumns = []string{"NAME", "STATUS", "VOLUME", "CAPACITY", "ACCESSMODES", "AGE"} -var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} -var thirdPartyResourceColumns = []string{"NAME", "DESCRIPTION", "VERSION(S)"} -var roleColumns = []string{"NAME", "AGE"} -var roleBindingColumns = []string{"NAME", "AGE"} -var clusterRoleColumns = []string{"NAME", "AGE"} -var clusterRoleBindingColumns = []string{"NAME", "AGE"} - -// TODO: consider having 'KIND' for third party resource data -var thirdPartyResourceDataColumns = []string{"NAME", "LABELS", "DATA"} -var horizontalPodAutoscalerColumns = []string{"NAME", "REFERENCE", "TARGET", "CURRENT", "MINPODS", "MAXPODS", "AGE"} -var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. -var deploymentColumns = []string{"NAME", "DESIRED", "CURRENT", "UP-TO-DATE", "AVAILABLE", "AGE"} -var configMapColumns = []string{"NAME", "DATA", "AGE"} -var podSecurityPolicyColumns = []string{"NAME", "PRIV", "CAPS", "VOLUMEPLUGINS", "SELINUX", "RUNASUSER"} -var clusterColumns = []string{"NAME", "STATUS", "VERSION", "AGE"} -var networkPolicyColumns = []string{"NAME", "POD-SELECTOR", "AGE"} - -// addDefaultHandlers adds print handlers for default Kubernetes types. -func (h *HumanReadablePrinter) addDefaultHandlers() { - h.Handler(podColumns, printPod) - h.Handler(podColumns, printPodList) - h.Handler(podTemplateColumns, printPodTemplate) - h.Handler(podTemplateColumns, printPodTemplateList) - h.Handler(replicationControllerColumns, printReplicationController) - h.Handler(replicationControllerColumns, printReplicationControllerList) - h.Handler(replicaSetColumns, printReplicaSet) - h.Handler(replicaSetColumns, printReplicaSetList) - h.Handler(daemonSetColumns, printDaemonSet) - h.Handler(daemonSetColumns, printDaemonSetList) - h.Handler(jobColumns, printJob) - h.Handler(jobColumns, printJobList) - h.Handler(serviceColumns, printService) - h.Handler(serviceColumns, printServiceList) - h.Handler(ingressColumns, printIngress) - h.Handler(ingressColumns, printIngressList) - h.Handler(petSetColumns, printPetSet) - h.Handler(petSetColumns, printPetSetList) - h.Handler(endpointColumns, printEndpoints) - h.Handler(endpointColumns, printEndpointsList) - h.Handler(nodeColumns, printNode) - h.Handler(nodeColumns, printNodeList) - h.Handler(eventColumns, printEvent) - h.Handler(eventColumns, printEventList) - h.Handler(limitRangeColumns, printLimitRange) - h.Handler(limitRangeColumns, printLimitRangeList) - h.Handler(resourceQuotaColumns, printResourceQuota) - h.Handler(resourceQuotaColumns, printResourceQuotaList) - h.Handler(namespaceColumns, printNamespace) - h.Handler(namespaceColumns, printNamespaceList) - h.Handler(secretColumns, printSecret) - h.Handler(secretColumns, printSecretList) - h.Handler(serviceAccountColumns, printServiceAccount) - h.Handler(serviceAccountColumns, printServiceAccountList) - h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaim) - h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaimList) - h.Handler(persistentVolumeColumns, printPersistentVolume) - h.Handler(persistentVolumeColumns, printPersistentVolumeList) - h.Handler(componentStatusColumns, printComponentStatus) - h.Handler(componentStatusColumns, printComponentStatusList) - h.Handler(thirdPartyResourceColumns, printThirdPartyResource) - h.Handler(thirdPartyResourceColumns, printThirdPartyResourceList) - h.Handler(deploymentColumns, printDeployment) - h.Handler(deploymentColumns, printDeploymentList) - h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscaler) - h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscalerList) - h.Handler(configMapColumns, printConfigMap) - h.Handler(configMapColumns, printConfigMapList) - h.Handler(podSecurityPolicyColumns, printPodSecurityPolicy) - h.Handler(podSecurityPolicyColumns, printPodSecurityPolicyList) - h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceData) - h.Handler(thirdPartyResourceDataColumns, printThirdPartyResourceDataList) - h.Handler(clusterColumns, printCluster) - h.Handler(clusterColumns, printClusterList) - h.Handler(networkPolicyColumns, printNetworkPolicy) - h.Handler(networkPolicyColumns, printNetworkPolicyList) - h.Handler(roleColumns, printRole) - h.Handler(roleColumns, printRoleList) - h.Handler(roleBindingColumns, printRoleBinding) - h.Handler(roleBindingColumns, printRoleBindingList) - h.Handler(clusterRoleColumns, printClusterRole) - h.Handler(clusterRoleColumns, printClusterRoleList) - h.Handler(clusterRoleBindingColumns, printClusterRoleBinding) - h.Handler(clusterRoleBindingColumns, printClusterRoleBindingList) -} - -func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { - _, err := fmt.Fprintf(w, "Unknown object: %s", string(data)) - return err -} - -func (h *HumanReadablePrinter) printHeader(columnNames []string, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil { - return err - } - return nil -} - -// Pass ports=nil for all ports. -func formatEndpoints(endpoints *api.Endpoints, ports sets.String) string { - if len(endpoints.Subsets) == 0 { - return "<none>" - } - list := []string{} - max := 3 - more := false - count := 0 - for i := range endpoints.Subsets { - ss := &endpoints.Subsets[i] - for i := range ss.Ports { - port := &ss.Ports[i] - if ports == nil || ports.Has(port.Name) { - for i := range ss.Addresses { - if len(list) == max { - more = true - } - addr := &ss.Addresses[i] - if !more { - list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) - } - count++ - } - } - } - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, count-max) - } - return ret -} - -func shortHumanDuration(d time.Duration) string { - // Allow deviation no more than 2 seconds(excluded) to tolerate machine time - // inconsistence, it can be considered as almost now. - if seconds := int(d.Seconds()); seconds < -1 { - return fmt.Sprintf("<invalid>") - } else if seconds < 0 { - return fmt.Sprintf("0s") - } else if seconds < 60 { - return fmt.Sprintf("%ds", seconds) - } else if minutes := int(d.Minutes()); minutes < 60 { - return fmt.Sprintf("%dm", minutes) - } else if hours := int(d.Hours()); hours < 24 { - return fmt.Sprintf("%dh", hours) - } else if hours < 24*364 { - return fmt.Sprintf("%dd", hours/24) - } - return fmt.Sprintf("%dy", int(d.Hours()/24/365)) -} - -// translateTimestamp returns the elapsed time since timestamp in -// human-readable approximation. -func translateTimestamp(timestamp unversioned.Time) string { - if timestamp.IsZero() { - return "<unknown>" - } - return shortHumanDuration(time.Now().Sub(timestamp.Time)) -} - -func printPod(pod *api.Pod, w io.Writer, options PrintOptions) error { - return printPodBase(pod, w, options) -} - -func printPodBase(pod *api.Pod, w io.Writer, options PrintOptions) error { - name := pod.Name - namespace := pod.Namespace - - restarts := 0 - totalContainers := len(pod.Spec.Containers) - readyContainers := 0 - - reason := string(pod.Status.Phase) - // if not printing all pods, skip terminated pods (default) - if !options.ShowAll && (reason == string(api.PodSucceeded) || reason == string(api.PodFailed)) { - return nil - } - if pod.Status.Reason != "" { - reason = pod.Status.Reason - } - - initializing := false - for i := range pod.Status.InitContainerStatuses { - container := pod.Status.InitContainerStatuses[i] - switch { - case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: - continue - case container.State.Terminated != nil: - // initialization is failed - if len(container.State.Terminated.Reason) == 0 { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) - } - } else { - reason = "Init:" + container.State.Terminated.Reason - } - initializing = true - case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": - reason = "Init:" + container.State.Waiting.Reason - initializing = true - default: - reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) - initializing = true - } - break - } - if !initializing { - for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { - container := pod.Status.ContainerStatuses[i] - - restarts += int(container.RestartCount) - if container.State.Waiting != nil && container.State.Waiting.Reason != "" { - reason = container.State.Waiting.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { - reason = container.State.Terminated.Reason - } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { - if container.State.Terminated.Signal != 0 { - reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) - } else { - reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) - } - } else if container.Ready && container.State.Running != nil { - readyContainers++ - } - } - } - if pod.DeletionTimestamp != nil { - reason = "Terminating" - } - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%d/%d\t%s\t%d\t%s", - name, - readyContainers, - totalContainers, - reason, - restarts, - translateTimestamp(pod.CreationTimestamp), - ); err != nil { - return err - } - - if options.Wide { - nodeName := pod.Spec.NodeName - podIP := pod.Status.PodIP - if podIP == "" { - podIP = "<none>" - } - if _, err := fmt.Fprintf(w, "\t%s\t%s", - podIP, - nodeName, - ); err != nil { - return err - } - } - - if _, err := fmt.Fprint(w, AppendLabels(pod.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pod.Labels)); err != nil { - return err - } - - return nil -} - -func printPodList(podList *api.PodList, w io.Writer, options PrintOptions) error { - for _, pod := range podList.Items { - if err := printPodBase(&pod, w, options); err != nil { - return err - } - } - return nil -} - -func printPodTemplate(pod *api.PodTemplate, w io.Writer, options PrintOptions) error { - name := pod.Name - namespace := pod.Namespace - - containers := pod.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s", name); err != nil { - return err - } - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(pod.Template.Labels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(pod.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pod.Labels)); err != nil { - return err - } - - return nil -} - -func printPodTemplateList(podList *api.PodTemplateList, w io.Writer, options PrintOptions) error { - for _, pod := range podList.Items { - if err := printPodTemplate(&pod, w, options); err != nil { - return err - } - } - return nil -} - -// TODO(AdoHe): try to put wide output in a single method -func printReplicationController(controller *api.ReplicationController, w io.Writer, options PrintOptions) error { - name := controller.Name - namespace := controller.Namespace - containers := controller.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - desiredReplicas := controller.Spec.Replicas - currentReplicas := controller.Status.Replicas - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", - name, - desiredReplicas, - currentReplicas, - translateTimestamp(controller.CreationTimestamp), - ); err != nil { - return err - } - - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(controller.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(controller.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, controller.Labels)); err != nil { - return err - } - - return nil -} - -func printReplicationControllerList(list *api.ReplicationControllerList, w io.Writer, options PrintOptions) error { - for _, controller := range list.Items { - if err := printReplicationController(&controller, w, options); err != nil { - return err - } - } - return nil -} - -func printReplicaSet(rs *extensions.ReplicaSet, w io.Writer, options PrintOptions) error { - name := rs.Name - namespace := rs.Namespace - containers := rs.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - desiredReplicas := rs.Spec.Replicas - currentReplicas := rs.Status.Replicas - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", - name, - desiredReplicas, - currentReplicas, - translateTimestamp(rs.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(rs.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(rs.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, rs.Labels)); err != nil { - return err - } - - return nil -} - -func printReplicaSetList(list *extensions.ReplicaSetList, w io.Writer, options PrintOptions) error { - for _, rs := range list.Items { - if err := printReplicaSet(&rs, w, options); err != nil { - return err - } - } - return nil -} - -func printCluster(c *federation.Cluster, w io.Writer, options PrintOptions) error { - var statuses []string - for _, condition := range c.Status.Conditions { - if condition.Status == api.ConditionTrue { - statuses = append(statuses, string(condition.Type)) - } else { - statuses = append(statuses, "Not"+string(condition.Type)) - } - } - if len(statuses) == 0 { - statuses = append(statuses, "Unknown") - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", - c.Name, - strings.Join(statuses, ","), - translateTimestamp(c.CreationTimestamp), - ); err != nil { - return err - } - return nil -} -func printClusterList(list *federation.ClusterList, w io.Writer, options PrintOptions) error { - for _, rs := range list.Items { - if err := printCluster(&rs, w, options); err != nil { - return err - } - } - return nil -} - -func printJob(job *batch.Job, w io.Writer, options PrintOptions) error { - name := job.Name - namespace := job.Namespace - containers := job.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - selector, err := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - if job.Spec.Completions != nil { - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", - name, - *job.Spec.Completions, - job.Status.Succeeded, - translateTimestamp(job.CreationTimestamp), - ); err != nil { - return err - } - } else { - if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s", - name, - "<none>", - job.Status.Succeeded, - translateTimestamp(job.CreationTimestamp), - ); err != nil { - return err - } - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", selector.String()); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(job.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, job.Labels)); err != nil { - return err - } - - return nil -} - -func printJobList(list *batch.JobList, w io.Writer, options PrintOptions) error { - for _, job := range list.Items { - if err := printJob(&job, w, options); err != nil { - return err - } - } - return nil -} - -// loadBalancerStatusStringer behaves mostly like a string interface and converts the given status to a string. -// `wide` indicates whether the returned value is meant for --o=wide output. If not, it's clipped to 16 bytes. -func loadBalancerStatusStringer(s api.LoadBalancerStatus, wide bool) string { - ingress := s.Ingress - result := []string{} - for i := range ingress { - if ingress[i].IP != "" { - result = append(result, ingress[i].IP) - } else if ingress[i].Hostname != "" { - result = append(result, ingress[i].Hostname) - } - } - r := strings.Join(result, ",") - if !wide && len(r) > loadBalancerWidth { - r = r[0:(loadBalancerWidth-3)] + "..." - } - return r -} - -func getServiceExternalIP(svc *api.Service, wide bool) string { - switch svc.Spec.Type { - case api.ServiceTypeClusterIP: - if len(svc.Spec.ExternalIPs) > 0 { - return strings.Join(svc.Spec.ExternalIPs, ",") - } - return "<none>" - case api.ServiceTypeNodePort: - if len(svc.Spec.ExternalIPs) > 0 { - return strings.Join(svc.Spec.ExternalIPs, ",") - } - return "<nodes>" - case api.ServiceTypeLoadBalancer: - lbIps := loadBalancerStatusStringer(svc.Status.LoadBalancer, wide) - if len(svc.Spec.ExternalIPs) > 0 { - result := append(strings.Split(lbIps, ","), svc.Spec.ExternalIPs...) - return strings.Join(result, ",") - } - if len(lbIps) > 0 { - return lbIps - } - return "<pending>" - } - return "<unknown>" -} - -func makePortString(ports []api.ServicePort) string { - pieces := make([]string, len(ports)) - for ix := range ports { - port := &ports[ix] - pieces[ix] = fmt.Sprintf("%d/%s", port.Port, port.Protocol) - } - return strings.Join(pieces, ",") -} - -func printService(svc *api.Service, w io.Writer, options PrintOptions) error { - name := svc.Name - namespace := svc.Namespace - - internalIP := svc.Spec.ClusterIP - externalIP := getServiceExternalIP(svc, options.Wide) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", - name, - internalIP, - externalIP, - makePortString(svc.Spec.Ports), - translateTimestamp(svc.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if _, err := fmt.Fprintf(w, "\t%s", labels.FormatLabels(svc.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(svc.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, svc.Labels)) - return err -} - -func printServiceList(list *api.ServiceList, w io.Writer, options PrintOptions) error { - for _, svc := range list.Items { - if err := printService(&svc, w, options); err != nil { - return err - } - } - return nil -} - -// backendStringer behaves just like a string interface and converts the given backend to a string. -func backendStringer(backend *extensions.IngressBackend) string { - if backend == nil { - return "" - } - return fmt.Sprintf("%v:%v", backend.ServiceName, backend.ServicePort.String()) -} - -func formatHosts(rules []extensions.IngressRule) string { - list := []string{} - max := 3 - more := false - for _, rule := range rules { - if len(list) == max { - more = true - } - if !more && len(rule.Host) != 0 { - list = append(list, rule.Host) - } - } - if len(list) == 0 { - return "*" - } - ret := strings.Join(list, ",") - if more { - return fmt.Sprintf("%s + %d more...", ret, len(rules)-max) - } - return ret -} - -func formatPorts(tls []extensions.IngressTLS) string { - if len(tls) != 0 { - return "80, 443" - } - return "80" -} - -func printIngress(ingress *extensions.Ingress, w io.Writer, options PrintOptions) error { - name := ingress.Name - namespace := ingress.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf(w, "%s\t%v\t%v\t%v\t%s", - name, - formatHosts(ingress.Spec.Rules), - loadBalancerStatusStringer(ingress.Status.LoadBalancer, options.Wide), - formatPorts(ingress.Spec.TLS), - translateTimestamp(ingress.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(ingress.Labels, options.ColumnLabels)); err != nil { - return err - } - - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ingress.Labels)); err != nil { - return err - } - return nil -} - -func printIngressList(ingressList *extensions.IngressList, w io.Writer, options PrintOptions) error { - for _, ingress := range ingressList.Items { - if err := printIngress(&ingress, w, options); err != nil { - return err - } - } - return nil -} - -func printPetSet(ps *apps.PetSet, w io.Writer, options PrintOptions) error { - name := ps.Name - namespace := ps.Namespace - containers := ps.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - desiredReplicas := ps.Spec.Replicas - currentReplicas := ps.Status.Replicas - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s", - name, - desiredReplicas, - currentReplicas, - translateTimestamp(ps.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", unversioned.FormatLabelSelector(ps.Spec.Selector)); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(ps.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ps.Labels)); err != nil { - return err - } - - return nil -} - -func printPetSetList(petSetList *apps.PetSetList, w io.Writer, options PrintOptions) error { - for _, ps := range petSetList.Items { - if err := printPetSet(&ps, w, options); err != nil { - return err - } - } - return nil -} - -func printDaemonSet(ds *extensions.DaemonSet, w io.Writer, options PrintOptions) error { - name := ds.Name - namespace := ds.Namespace - - containers := ds.Spec.Template.Spec.Containers - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - desiredScheduled := ds.Status.DesiredNumberScheduled - currentScheduled := ds.Status.CurrentNumberScheduled - selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - // this shouldn't happen if LabelSelector passed validation - return err - } - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%s\t%s", - name, - desiredScheduled, - currentScheduled, - labels.FormatLabels(ds.Spec.Template.Spec.NodeSelector), - translateTimestamp(ds.CreationTimestamp), - ); err != nil { - return err - } - if options.Wide { - if err := layoutContainers(containers, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "\t%s", selector.String()); err != nil { - return err - } - } - if _, err := fmt.Fprint(w, AppendLabels(ds.Labels, options.ColumnLabels)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, ds.Labels)); err != nil { - return err - } - - return nil -} - -func printDaemonSetList(list *extensions.DaemonSetList, w io.Writer, options PrintOptions) error { - for _, ds := range list.Items { - if err := printDaemonSet(&ds, w, options); err != nil { - return err - } - } - return nil -} - -func printEndpoints(endpoints *api.Endpoints, w io.Writer, options PrintOptions) error { - name := endpoints.Name - namespace := endpoints.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, formatEndpoints(endpoints, nil), translateTimestamp(endpoints.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(endpoints.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, endpoints.Labels)) - return err -} - -func printEndpointsList(list *api.EndpointsList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printEndpoints(&item, w, options); err != nil { - return err - } - } - return nil -} - -func printNamespace(item *api.Namespace, w io.Writer, options PrintOptions) error { - if options.WithNamespace { - return fmt.Errorf("namespace is not namespaced") - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s", item.Name, item.Status.Phase, translateTimestamp(item.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printNamespaceList(list *api.NamespaceList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printNamespace(&item, w, options); err != nil { - return err - } - } - return nil -} - -func printSecret(item *api.Secret, w io.Writer, options PrintOptions) error { - name := item.Name - namespace := item.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%v\t%s", name, item.Type, len(item.Data), translateTimestamp(item.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printSecretList(list *api.SecretList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printSecret(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printServiceAccount(item *api.ServiceAccount, w io.Writer, options PrintOptions) error { - name := item.Name - namespace := item.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%d\t%s", name, len(item.Secrets), translateTimestamp(item.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printServiceAccountList(list *api.ServiceAccountList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printServiceAccount(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printNode(node *api.Node, w io.Writer, options PrintOptions) error { - if options.WithNamespace { - return fmt.Errorf("node is not namespaced") - } - conditionMap := make(map[api.NodeConditionType]*api.NodeCondition) - NodeAllConditions := []api.NodeConditionType{api.NodeReady} - for i := range node.Status.Conditions { - cond := node.Status.Conditions[i] - conditionMap[cond.Type] = &cond - } - var status []string - for _, validCondition := range NodeAllConditions { - if condition, ok := conditionMap[validCondition]; ok { - if condition.Status == api.ConditionTrue { - status = append(status, string(condition.Type)) - } else { - status = append(status, "Not"+string(condition.Type)) - } - } - } - if len(status) == 0 { - status = append(status, "Unknown") - } - if node.Spec.Unschedulable { - status = append(status, "SchedulingDisabled") - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s", node.Name, strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil { - return err - } - // Display caller specify column labels first. - if _, err := fmt.Fprint(w, AppendLabels(node.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, node.Labels)) - return err -} - -func printNodeList(list *api.NodeList, w io.Writer, options PrintOptions) error { - for _, node := range list.Items { - if err := printNode(&node, w, options); err != nil { - return err - } - } - return nil -} - -func printPersistentVolume(pv *api.PersistentVolume, w io.Writer, options PrintOptions) error { - if options.WithNamespace { - return fmt.Errorf("persistentVolume is not namespaced") - } - name := pv.Name - - claimRefUID := "" - if pv.Spec.ClaimRef != nil { - claimRefUID += pv.Spec.ClaimRef.Namespace - claimRefUID += "/" - claimRefUID += pv.Spec.ClaimRef.Name - } - - modesStr := api.GetAccessModesAsString(pv.Spec.AccessModes) - - aQty := pv.Spec.Capacity[api.ResourceStorage] - aSize := aQty.String() - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s", - name, - aSize, modesStr, - pv.Status.Phase, - claimRefUID, - pv.Status.Reason, - translateTimestamp(pv.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(pv.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pv.Labels)) - return err -} - -func printPersistentVolumeList(list *api.PersistentVolumeList, w io.Writer, options PrintOptions) error { - for _, pv := range list.Items { - if err := printPersistentVolume(&pv, w, options); err != nil { - return err - } - } - return nil -} - -func printPersistentVolumeClaim(pvc *api.PersistentVolumeClaim, w io.Writer, options PrintOptions) error { - name := pvc.Name - namespace := pvc.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - phase := pvc.Status.Phase - storage := pvc.Spec.Resources.Requests[api.ResourceStorage] - capacity := "" - accessModes := "" - if pvc.Spec.VolumeName != "" { - accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) - storage = pvc.Status.Capacity[api.ResourceStorage] - capacity = storage.String() - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", name, phase, pvc.Spec.VolumeName, capacity, accessModes, translateTimestamp(pvc.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(pvc.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, pvc.Labels)) - return err -} - -func printPersistentVolumeClaimList(list *api.PersistentVolumeClaimList, w io.Writer, options PrintOptions) error { - for _, psd := range list.Items { - if err := printPersistentVolumeClaim(&psd, w, options); err != nil { - return err - } - } - return nil -} - -func printEvent(event *api.Event, w io.Writer, options PrintOptions) error { - namespace := event.Namespace - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - // While watching event, we should print absolute time. - var FirstTimestamp, LastTimestamp string - if options.AbsoluteTimestamps { - FirstTimestamp = event.FirstTimestamp.String() - LastTimestamp = event.LastTimestamp.String() - } else { - FirstTimestamp = translateTimestamp(event.FirstTimestamp) - LastTimestamp = translateTimestamp(event.LastTimestamp) - } - - if _, err := fmt.Fprintf( - w, "%s\t%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s", - LastTimestamp, - FirstTimestamp, - event.Count, - event.InvolvedObject.Name, - event.InvolvedObject.Kind, - event.InvolvedObject.FieldPath, - event.Type, - event.Reason, - event.Source, - event.Message, - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(event.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, event.Labels)) - return err -} - -// Sorts and prints the EventList in a human-friendly format. -func printEventList(list *api.EventList, w io.Writer, options PrintOptions) error { - sort.Sort(SortableEvents(list.Items)) - for i := range list.Items { - if err := printEvent(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printLimitRange(limitRange *api.LimitRange, w io.Writer, options PrintOptions) error { - return printObjectMeta(limitRange.ObjectMeta, w, options, true) -} - -// Prints the LimitRangeList in a human-friendly format. -func printLimitRangeList(list *api.LimitRangeList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printLimitRange(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -// printObjectMeta prints the object metadata of a given resource. -func printObjectMeta(meta api.ObjectMeta, w io.Writer, options PrintOptions, namespaced bool) error { - if namespaced && options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", meta.Namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf( - w, "%s\t%s", - meta.Name, - translateTimestamp(meta.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(meta.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, meta.Labels)) - return err -} - -func printResourceQuota(resourceQuota *api.ResourceQuota, w io.Writer, options PrintOptions) error { - return printObjectMeta(resourceQuota.ObjectMeta, w, options, true) -} - -// Prints the ResourceQuotaList in a human-friendly format. -func printResourceQuotaList(list *api.ResourceQuotaList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printResourceQuota(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printRole(role *rbac.Role, w io.Writer, options PrintOptions) error { - return printObjectMeta(role.ObjectMeta, w, options, true) -} - -// Prints the Role in a human-friendly format. -func printRoleList(list *rbac.RoleList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printRole(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printRoleBinding(roleBinding *rbac.RoleBinding, w io.Writer, options PrintOptions) error { - return printObjectMeta(roleBinding.ObjectMeta, w, options, true) -} - -// Prints the RoleBinding in a human-friendly format. -func printRoleBindingList(list *rbac.RoleBindingList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printRoleBinding(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printClusterRole(clusterRole *rbac.ClusterRole, w io.Writer, options PrintOptions) error { - return printObjectMeta(clusterRole.ObjectMeta, w, options, false) -} - -// Prints the ClusterRole in a human-friendly format. -func printClusterRoleList(list *rbac.ClusterRoleList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printClusterRole(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printClusterRoleBinding(clusterRoleBinding *rbac.ClusterRoleBinding, w io.Writer, options PrintOptions) error { - return printObjectMeta(clusterRoleBinding.ObjectMeta, w, options, false) -} - -// Prints the ClusterRoleBinding in a human-friendly format. -func printClusterRoleBindingList(list *rbac.ClusterRoleBindingList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printClusterRoleBinding(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printComponentStatus(item *api.ComponentStatus, w io.Writer, options PrintOptions) error { - if options.WithNamespace { - return fmt.Errorf("componentStatus is not namespaced") - } - status := "Unknown" - message := "" - error := "" - for _, condition := range item.Conditions { - if condition.Type == api.ComponentHealthy { - if condition.Status == api.ConditionTrue { - status = "Healthy" - } else { - status = "Unhealthy" - } - message = condition.Message - error = condition.Error - break - } - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s", item.Name, status, message, error); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(item.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, item.Labels)) - return err -} - -func printComponentStatusList(list *api.ComponentStatusList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printComponentStatus(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printThirdPartyResource(rsrc *extensions.ThirdPartyResource, w io.Writer, options PrintOptions) error { - versions := make([]string, len(rsrc.Versions)) - for ix := range rsrc.Versions { - version := &rsrc.Versions[ix] - versions[ix] = fmt.Sprintf("%s", version.Name) - } - versionsString := strings.Join(versions, ",") - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", rsrc.Name, rsrc.Description, versionsString); err != nil { - return err - } - return nil -} - -func printThirdPartyResourceList(list *extensions.ThirdPartyResourceList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printThirdPartyResource(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func truncate(str string, maxLen int) string { - if len(str) > maxLen { - return str[0:maxLen] + "..." - } - return str -} - -func printThirdPartyResourceData(rsrc *extensions.ThirdPartyResourceData, w io.Writer, options PrintOptions) error { - l := labels.FormatLabels(rsrc.Labels) - truncateCols := 50 - if options.Wide { - truncateCols = 100 - } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", rsrc.Name, l, truncate(string(rsrc.Data), truncateCols)); err != nil { - return err - } - return nil -} - -func printThirdPartyResourceDataList(list *extensions.ThirdPartyResourceDataList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printThirdPartyResourceData(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printDeployment(deployment *extensions.Deployment, w io.Writer, options PrintOptions) error { - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", deployment.Namespace); err != nil { - return err - } - } - - desiredReplicas := deployment.Spec.Replicas - currentReplicas := deployment.Status.Replicas - updatedReplicas := deployment.Status.UpdatedReplicas - availableReplicas := deployment.Status.AvailableReplicas - age := translateTimestamp(deployment.CreationTimestamp) - if _, err := fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%d\t%s", deployment.Name, desiredReplicas, currentReplicas, updatedReplicas, availableReplicas, age); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(deployment.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, deployment.Labels)) - return err -} - -func printDeploymentList(list *extensions.DeploymentList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printDeployment(&item, w, options); err != nil { - return err - } - } - return nil -} - -func printHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, w io.Writer, options PrintOptions) error { - namespace := hpa.Namespace - name := hpa.Name - reference := fmt.Sprintf("%s/%s", - hpa.Spec.ScaleTargetRef.Kind, - hpa.Spec.ScaleTargetRef.Name) - target := "<unset>" - if hpa.Spec.TargetCPUUtilizationPercentage != nil { - target = fmt.Sprintf("%d%%", *hpa.Spec.TargetCPUUtilizationPercentage) - } - current := "<waiting>" - if hpa.Status.CurrentCPUUtilizationPercentage != nil { - current = fmt.Sprintf("%d%%", *hpa.Status.CurrentCPUUtilizationPercentage) - } - minPods := "<unset>" - if hpa.Spec.MinReplicas != nil { - minPods = fmt.Sprintf("%d", *hpa.Spec.MinReplicas) - } - maxPods := hpa.Spec.MaxReplicas - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%d\t%s", - name, - reference, - target, - current, - minPods, - maxPods, - translateTimestamp(hpa.CreationTimestamp), - ); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(hpa.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, hpa.Labels)) - return err -} - -func printHorizontalPodAutoscalerList(list *autoscaling.HorizontalPodAutoscalerList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printHorizontalPodAutoscaler(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printConfigMap(configMap *api.ConfigMap, w io.Writer, options PrintOptions) error { - name := configMap.Name - namespace := configMap.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, len(configMap.Data), translateTimestamp(configMap.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(configMap.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, configMap.Labels)) - return err -} - -func printConfigMapList(list *api.ConfigMapList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printConfigMap(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func printPodSecurityPolicy(item *extensions.PodSecurityPolicy, w io.Writer, options PrintOptions) error { - _, err := fmt.Fprintf(w, "%s\t%t\t%v\t%s\t%s\t%s\t%s\t%t\t%v\n", item.Name, item.Spec.Privileged, - item.Spec.AllowedCapabilities, item.Spec.SELinux.Rule, - item.Spec.RunAsUser.Rule, item.Spec.FSGroup.Rule, item.Spec.SupplementalGroups.Rule, item.Spec.ReadOnlyRootFilesystem, item.Spec.Volumes) - return err -} - -func printPodSecurityPolicyList(list *extensions.PodSecurityPolicyList, w io.Writer, options PrintOptions) error { - for _, item := range list.Items { - if err := printPodSecurityPolicy(&item, w, options); err != nil { - return err - } - } - - return nil -} - -func printNetworkPolicy(networkPolicy *extensions.NetworkPolicy, w io.Writer, options PrintOptions) error { - name := networkPolicy.Name - namespace := networkPolicy.Namespace - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { - return err - } - } - if _, err := fmt.Fprintf(w, "%s\t%v\t%s", name, unversioned.FormatLabelSelector(&networkPolicy.Spec.PodSelector), translateTimestamp(networkPolicy.CreationTimestamp)); err != nil { - return err - } - if _, err := fmt.Fprint(w, AppendLabels(networkPolicy.Labels, options.ColumnLabels)); err != nil { - return err - } - _, err := fmt.Fprint(w, AppendAllLabels(options.ShowLabels, networkPolicy.Labels)) - return err -} - -func printNetworkPolicyList(list *extensions.NetworkPolicyList, w io.Writer, options PrintOptions) error { - for i := range list.Items { - if err := printNetworkPolicy(&list.Items[i], w, options); err != nil { - return err - } - } - return nil -} - -func AppendLabels(itemLabels map[string]string, columnLabels []string) string { - var buffer bytes.Buffer - - for _, cl := range columnLabels { - buffer.WriteString(fmt.Sprint("\t")) - if il, ok := itemLabels[cl]; ok { - buffer.WriteString(fmt.Sprint(il)) - } else { - buffer.WriteString("<none>") - } - } - - return buffer.String() -} - -// Append all labels to a single column. We need this even when show-labels flag* is -// false, since this adds newline delimiter to the end of each row. -func AppendAllLabels(showLabels bool, itemLabels map[string]string) string { - var buffer bytes.Buffer - - if showLabels { - buffer.WriteString(fmt.Sprint("\t")) - buffer.WriteString(labels.FormatLabels(itemLabels)) - } - buffer.WriteString("\n") - - return buffer.String() -} - -// Append a set of tabs for each label column. We need this in the case where -// we have extra lines so that the tabwriter will still line things up. -func AppendLabelTabs(columnLabels []string) string { - var buffer bytes.Buffer - - for range columnLabels { - buffer.WriteString("\t") - } - buffer.WriteString("\n") - - return buffer.String() -} - -// Lay out all the containers on one line if use wide output. -func layoutContainers(containers []api.Container, w io.Writer) error { - var namesBuffer bytes.Buffer - var imagesBuffer bytes.Buffer - - for i, container := range containers { - namesBuffer.WriteString(container.Name) - imagesBuffer.WriteString(container.Image) - if i != len(containers)-1 { - namesBuffer.WriteString(",") - imagesBuffer.WriteString(",") - } - } - _, err := fmt.Fprintf(w, "\t%s\t%s", namesBuffer.String(), imagesBuffer.String()) - if err != nil { - return err - } - return nil -} - -func formatLabelHeaders(columnLabels []string) []string { - formHead := make([]string, len(columnLabels)) - for i, l := range columnLabels { - p := strings.Split(l, "/") - formHead[i] = strings.ToUpper((p[len(p)-1])) - } - return formHead -} - -// headers for -o wide -func formatWideHeaders(wide bool, t reflect.Type) []string { - if wide { - if t.String() == "*api.Pod" || t.String() == "*api.PodList" { - return []string{"IP", "NODE"} - } - if t.String() == "*api.ReplicationController" || t.String() == "*api.ReplicationControllerList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*batch.Job" || t.String() == "*batch.JobList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*api.Service" || t.String() == "*api.ServiceList" { - return []string{"SELECTOR"} - } - if t.String() == "*extensions.DaemonSet" || t.String() == "*extensions.DaemonSetList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - if t.String() == "*extensions.ReplicaSet" || t.String() == "*extensions.ReplicaSetList" { - return []string{"CONTAINER(S)", "IMAGE(S)", "SELECTOR"} - } - } - return nil -} - -// headers for --show-labels=true -func formatShowLabelsHeader(showLabels bool, t reflect.Type) []string { - if showLabels { - if t.String() != "*api.ThirdPartyResource" && t.String() != "*api.ThirdPartyResourceList" { - return []string{"LABELS"} - } - } - return nil -} - -// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text. -func GetNewTabWriter(output io.Writer) *tabwriter.Writer { - return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags) -} - -// PrintObj prints the obj in a human-friendly format according to the type of the obj. -func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { - // if output is a tabwriter (when it's called by kubectl get), we use it; create a new tabwriter otherwise - w, found := output.(*tabwriter.Writer) - if !found { - w = GetNewTabWriter(output) - defer w.Flush() - } - t := reflect.TypeOf(obj) - if handler := h.handlerMap[t]; handler != nil { - if !h.options.NoHeaders && t != h.lastType { - headers := append(handler.columns, formatWideHeaders(h.options.Wide, t)...) - headers = append(headers, formatLabelHeaders(h.options.ColumnLabels)...) - // LABELS is always the last column. - headers = append(headers, formatShowLabelsHeader(h.options.ShowLabels, t)...) - if h.options.WithNamespace { - headers = append(withNamespacePrefixColumns, headers...) - } - h.printHeader(headers, w) - h.lastType = t - } - args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(w), reflect.ValueOf(h.options)} - resultValue := handler.printFunc.Call(args)[0] - if resultValue.IsNil() { - return nil - } - return resultValue.Interface().(error) - } - return fmt.Errorf("error: unknown type %#v", obj) -} - -// TemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. -type TemplatePrinter struct { - rawTemplate string - template *template.Template -} - -func NewTemplatePrinter(tmpl []byte) (*TemplatePrinter, error) { - t, err := template.New("output"). - Funcs(template.FuncMap{"exists": exists}). - Parse(string(tmpl)) - if err != nil { - return nil, err - } - return &TemplatePrinter{ - rawTemplate: string(tmpl), - template: t, - }, nil -} - -// PrintObj formats the obj with the Go Template. -func (p *TemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { - data, err := json.Marshal(obj) - if err != nil { - return err - } - out := map[string]interface{}{} - if err := json.Unmarshal(data, &out); err != nil { - return err - } - if err = p.safeExecute(w, out); err != nil { - // It is way easier to debug this stuff when it shows up in - // stdout instead of just stdin. So in addition to returning - // a nice error, also print useful stuff with the writer. - fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate) - fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data)) - fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out) - return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err) - } - return nil -} - -// TODO: implement HandledResources() -func (p *TemplatePrinter) HandledResources() []string { - return []string{} -} - -// safeExecute tries to execute the template, but catches panics and returns an error -// should the template engine panic. -func (p *TemplatePrinter) safeExecute(w io.Writer, obj interface{}) error { - var panicErr error - // Sorry for the double anonymous function. There's probably a clever way - // to do this that has the defer'd func setting the value to be returned, but - // that would be even less obvious. - retErr := func() error { - defer func() { - if x := recover(); x != nil { - panicErr = fmt.Errorf("caught panic: %+v", x) - } - }() - return p.template.Execute(w, obj) - }() - if panicErr != nil { - return panicErr - } - return retErr -} - -func tabbedString(f func(io.Writer) error) (string, error) { - out := new(tabwriter.Writer) - buf := &bytes.Buffer{} - out.Init(buf, 0, 8, 1, '\t', 0) - - err := f(out) - if err != nil { - return "", err - } - - out.Flush() - str := string(buf.String()) - return str, nil -} - -// exists returns true if it would be possible to call the index function -// with these arguments. -// -// TODO: how to document this for users? -// -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func exists(item interface{}, indices ...interface{}) bool { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return false - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return false - } - if x < 0 || x >= int64(v.Len()) { - return false - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return false - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return false - } - } - if _, isNil := indirect(v); isNil { - return false - } - return true -} - -// stolen from text/template -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression. -type JSONPathPrinter struct { - rawTemplate string - *jsonpath.JSONPath -} - -func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) { - j := jsonpath.New("out") - if err := j.Parse(tmpl); err != nil { - return nil, err - } - return &JSONPathPrinter{tmpl, j}, nil -} - -// PrintObj formats the obj with the JSONPath Template. -func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { - var queryObj interface{} = obj - if meta.IsListType(obj) { - data, err := json.Marshal(obj) - if err != nil { - return err - } - queryObj = map[string]interface{}{} - if err := json.Unmarshal(data, &queryObj); err != nil { - return err - } - } - - if err := j.JSONPath.Execute(w, queryObj); err != nil { - fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) - fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) - fmt.Fprintf(w, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) - return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, err) - } - return nil -} - -// TODO: implement HandledResources() -func (p *JSONPathPrinter) HandledResources() []string { - return []string{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go deleted file mode 100644 index 2e4f92b30..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollback.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "os" - "os/signal" - "syscall" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/runtime" - deploymentutil "k8s.io/kubernetes/pkg/util/deployment" - "k8s.io/kubernetes/pkg/watch" -) - -// Rollbacker provides an interface for resources that can be rolled back. -type Rollbacker interface { - Rollback(namespace, name string, updatedAnnotations map[string]string, toRevision int64, obj runtime.Object) (string, error) -} - -func RollbackerFor(kind unversioned.GroupKind, c client.Interface) (Rollbacker, error) { - switch kind { - case extensions.Kind("Deployment"): - return &DeploymentRollbacker{c}, nil - } - return nil, fmt.Errorf("no rollbacker has been implemented for %q", kind) -} - -type DeploymentRollbacker struct { - c client.Interface -} - -func (r *DeploymentRollbacker) Rollback(namespace, name string, updatedAnnotations map[string]string, toRevision int64, obj runtime.Object) (string, error) { - d := obj.(*extensions.Deployment) - if d.Spec.Paused { - return "", fmt.Errorf("you cannot rollback a paused deployment; resume it first with 'kubectl rollout resume' and try again") - } - deploymentRollback := &extensions.DeploymentRollback{ - Name: name, - UpdatedAnnotations: updatedAnnotations, - RollbackTo: extensions.RollbackConfig{ - Revision: toRevision, - }, - } - result := "" - - // Get current events - events, err := r.c.Events(namespace).List(api.ListOptions{}) - if err != nil { - return result, err - } - // Do the rollback - if err := r.c.Extensions().Deployments(namespace).Rollback(deploymentRollback); err != nil { - return result, err - } - // Watch for the changes of events - watch, err := r.c.Events(namespace).Watch(api.ListOptions{Watch: true, ResourceVersion: events.ResourceVersion}) - if err != nil { - return result, err - } - result = watchRollbackEvent(watch) - return result, err -} - -// watchRollbackEvent watches for rollback events and returns rollback result -func watchRollbackEvent(w watch.Interface) string { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt, os.Kill, syscall.SIGTERM) - for { - select { - case event, ok := <-w.ResultChan(): - if !ok { - return "" - } - obj, ok := event.Object.(*api.Event) - if !ok { - w.Stop() - return "" - } - isRollback, result := isRollbackEvent(obj) - if isRollback { - w.Stop() - return result - } - case <-signals: - w.Stop() - } - } -} - -// isRollbackEvent checks if the input event is about rollback, and returns true and -// related result string back if it is. -func isRollbackEvent(e *api.Event) (bool, string) { - rollbackEventReasons := []string{deploymentutil.RollbackRevisionNotFound, deploymentutil.RollbackTemplateUnchanged, deploymentutil.RollbackDone} - for _, reason := range rollbackEventReasons { - if e.Reason == reason { - if reason == deploymentutil.RollbackDone { - return true, "rolled back" - } - return true, fmt.Sprintf("skipped rollback (%s: %s)", e.Reason, e.Message) - } - } - return false, "" -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go deleted file mode 100644 index 0af1253ad..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rolling_updater.go +++ /dev/null @@ -1,810 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - goerrors "errors" - "fmt" - "io" - "strconv" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/deployment" - "k8s.io/kubernetes/pkg/util/integer" - "k8s.io/kubernetes/pkg/util/intstr" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - sourceIdAnnotation = kubectlAnnotationPrefix + "update-source-id" - desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" - originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" - nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" -) - -// RollingUpdaterConfig is the configuration for a rolling deployment process. -type RollingUpdaterConfig struct { - // Out is a writer for progress output. - Out io.Writer - // OldRC is an existing controller to be replaced. - OldRc *api.ReplicationController - // NewRc is a controller that will take ownership of updated pods (will be - // created if needed). - NewRc *api.ReplicationController - // UpdatePeriod is the time to wait between individual pod updates. - UpdatePeriod time.Duration - // Interval is the time to wait between polling controller status after - // update. - Interval time.Duration - // Timeout is the time to wait for controller updates before giving up. - Timeout time.Duration - // CleanupPolicy defines the cleanup action to take after the deployment is - // complete. - CleanupPolicy RollingUpdaterCleanupPolicy - // MaxUnavailable is the maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - MaxUnavailable intstr.IntOrString - // MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately - // when the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, new RC can be scaled up - // further, ensuring that total number of pods running at any time during - // the update is atmost 130% of desired pods. - MaxSurge intstr.IntOrString - // OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or - // abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value - // is a synthetic "progress" calculation that represents the approximate percentage completion. - OnProgress func(oldRc, newRc *api.ReplicationController, percentage int) error -} - -// RollingUpdaterCleanupPolicy is a cleanup action to take after the -// deployment is complete. -type RollingUpdaterCleanupPolicy string - -const ( - // DeleteRollingUpdateCleanupPolicy means delete the old controller. - DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete" - // PreserveRollingUpdateCleanupPolicy means keep the old controller. - PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve" - // RenameRollingUpdateCleanupPolicy means delete the old controller, and rename - // the new controller to the name of the old controller. - RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename" -) - -// RollingUpdater provides methods for updating replicated pods in a predictable, -// fault-tolerant way. -type RollingUpdater struct { - // Client interface for creating and updating controllers - c client.Interface - // Namespace for resources - ns string - // scaleAndWait scales a controller and returns its updated state. - scaleAndWait func(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) - //getOrCreateTargetController gets and validates an existing controller or - //makes a new one. - getOrCreateTargetController func(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) - // cleanup performs post deployment cleanup tasks for newRc and oldRc. - cleanup func(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error - // getReadyPods returns the amount of old and new ready pods. - getReadyPods func(oldRc, newRc *api.ReplicationController) (int32, int32, error) -} - -// NewRollingUpdater creates a RollingUpdater from a client. -func NewRollingUpdater(namespace string, client client.Interface) *RollingUpdater { - updater := &RollingUpdater{ - c: client, - ns: namespace, - } - // Inject real implementations. - updater.scaleAndWait = updater.scaleAndWaitWithScaler - updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient - updater.getReadyPods = updater.readyPods - updater.cleanup = updater.cleanupWithClients - return updater -} - -// Update all pods for a ReplicationController (oldRc) by creating a new -// controller (newRc) with 0 replicas, and synchronously scaling oldRc and -// newRc until oldRc has 0 replicas and newRc has the original # of desired -// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy. -// -// Each interval, the updater will attempt to make progress however it can -// without violating any availability constraints defined by the config. This -// means the amount scaled up or down each interval will vary based on the -// timeliness of readiness and the updater will always try to make progress, -// even slowly. -// -// If an update from newRc to oldRc is already in progress, we attempt to -// drive it to completion. If an error occurs at any step of the update, the -// error will be returned. -// -// A scaling event (either up or down) is considered progress; if no progress -// is made within the config.Timeout, an error is returned. -// -// TODO: make this handle performing a rollback of a partially completed -// rollout. -func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { - out := config.Out - oldRc := config.OldRc - scaleRetryParams := NewRetryParams(config.Interval, config.Timeout) - - // Find an existing controller (for continuing an interrupted update) or - // create a new one if necessary. - sourceId := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) - newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceId) - if err != nil { - return err - } - if existed { - fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name) - } else { - fmt.Fprintf(out, "Created %s\n", newRc.Name) - } - // Extract the desired replica count from the controller. - desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) - if err != nil { - return fmt.Errorf("Unable to parse annotation for %s: %s=%s", - newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) - } - desired := int32(desiredAnnotation) - // Extract the original replica count from the old controller, adding the - // annotation if it doesn't yet exist. - _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] - if !hasOriginalAnnotation { - existing, err := r.c.ReplicationControllers(oldRc.Namespace).Get(oldRc.Name) - if err != nil { - return err - } - originReplicas := strconv.Itoa(int(existing.Spec.Replicas)) - applyUpdate := func(rc *api.ReplicationController) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[originalReplicasAnnotation] = originReplicas - } - if oldRc, err = updateRcWithRetries(r.c, existing.Namespace, existing, applyUpdate); err != nil { - return err - } - } - // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods - // that can be unavailable during a rollout. - maxSurge, maxUnavailable, err := deployment.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) - if err != nil { - return err - } - // Validate maximums. - if desired > 0 && maxUnavailable == 0 && maxSurge == 0 { - return fmt.Errorf("one of maxSurge or maxUnavailable must be specified") - } - // The minumum pods which must remain available througout the update - // calculated for internal convenience. - minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) - // If the desired new scale is 0, then the max unavailable is necessarily - // the effective scale of the old RC regardless of the configuration - // (equivalent to 100% maxUnavailable). - if desired == 0 { - maxUnavailable = oldRc.Spec.Replicas - minAvailable = 0 - } - - fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n", - newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, desired+maxSurge) - - // give a caller incremental notification and allow them to exit early - goal := desired - newRc.Spec.Replicas - if goal < 0 { - goal = -goal - } - progress := func(complete bool) error { - if config.OnProgress == nil { - return nil - } - progress := desired - newRc.Spec.Replicas - if progress < 0 { - progress = -progress - } - percentage := 100 - if !complete && goal > 0 { - percentage = int((goal - progress) * 100 / goal) - } - return config.OnProgress(oldRc, newRc, percentage) - } - - // Scale newRc and oldRc until newRc has the desired number of replicas and - // oldRc has 0 replicas. - progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds() - for newRc.Spec.Replicas != desired || oldRc.Spec.Replicas != 0 { - // Store the existing replica counts for progress timeout tracking. - newReplicas := newRc.Spec.Replicas - oldReplicas := oldRc.Spec.Replicas - - // Scale up as much as possible. - scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config) - if err != nil { - return err - } - newRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // Wait between scaling operations for things to settle. - time.Sleep(config.UpdatePeriod) - - // Scale down as much as possible. - scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config) - if err != nil { - return err - } - oldRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // If we are making progress, continue to advance the progress deadline. - // Otherwise, time out with an error. - progressMade := (newRc.Spec.Replicas != newReplicas) || (oldRc.Spec.Replicas != oldReplicas) - if progressMade { - progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds() - } else if time.Now().UnixNano() > progressDeadline { - return fmt.Errorf("timed out waiting for any update progress to be made") - } - } - - // notify the caller if necessary - if err := progress(true); err != nil { - return err - } - - // Housekeeping and cleanup policy execution. - return r.cleanup(oldRc, newRc, config) -} - -// scaleUp scales up newRc to desired by whatever increment is possible given -// the configured surge threshold. scaleUp will safely no-op as necessary when -// it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) { - // If we're already at the desired, do nothing. - if newRc.Spec.Replicas == desired { - return newRc, nil - } - - // Scale up as far as we can based on the surge limit. - increment := (desired + maxSurge) - (oldRc.Spec.Replicas + newRc.Spec.Replicas) - // If the old is already scaled down, go ahead and scale all the way up. - if oldRc.Spec.Replicas == 0 { - increment = desired - newRc.Spec.Replicas - } - // We can't scale up without violating the surge limit, so do nothing. - if increment <= 0 { - return newRc, nil - } - // Increase the replica count, and deal with fenceposts. - newRc.Spec.Replicas += increment - if newRc.Spec.Replicas > desired { - newRc.Spec.Replicas = desired - } - // Perform the scale-up. - fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, newRc.Spec.Replicas) - scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scaleDown scales down oldRc to 0 at whatever decrement possible given the -// thresholds defined on the config. scaleDown will safely no-op as necessary -// when it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*api.ReplicationController, error) { - // Already scaled down; do nothing. - if oldRc.Spec.Replicas == 0 { - return oldRc, nil - } - // Get ready pods. We shouldn't block, otherwise in case both old and new - // pods are unavailable then the rolling update process blocks. - // Timeout-wise we are already covered by the progress check. - _, newAvailable, err := r.getReadyPods(oldRc, newRc) - if err != nil { - return nil, err - } - // The old controller is considered as part of the total because we want to - // maintain minimum availability even with a volatile old controller. - // Scale down as much as possible while maintaining minimum availability - allPods := oldRc.Spec.Replicas + newRc.Spec.Replicas - newUnavailable := newRc.Spec.Replicas - newAvailable - decrement := allPods - minAvailable - newUnavailable - // The decrement normally shouldn't drop below 0 because the available count - // always starts below the old replica count, but the old replica count can - // decrement due to externalities like pods death in the replica set. This - // will be considered a transient condition; do nothing and try again later - // with new readiness values. - // - // If the most we can scale is 0, it means we can't scale down without - // violating the minimum. Do nothing and try again later when conditions may - // have changed. - if decrement <= 0 { - return oldRc, nil - } - // Reduce the replica count, and deal with fenceposts. - oldRc.Spec.Replicas -= decrement - if oldRc.Spec.Replicas < 0 { - oldRc.Spec.Replicas = 0 - } - // If the new is already fully scaled and available up to the desired size, go - // ahead and scale old all the way down. - if newRc.Spec.Replicas == desired && newAvailable == desired { - oldRc.Spec.Replicas = 0 - } - // Perform the scale-down. - fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, oldRc.Spec.Replicas) - retryWait := &RetryParams{config.Interval, config.Timeout} - scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scalerScaleAndWait scales a controller using a Scaler and a real client. -func (r *RollingUpdater) scaleAndWaitWithScaler(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) { - scaler, err := ScalerFor(api.Kind("ReplicationController"), r.c) - if err != nil { - return nil, fmt.Errorf("Couldn't make scaler: %s", err) - } - if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil { - return nil, err - } - return r.c.ReplicationControllers(rc.Namespace).Get(rc.Name) -} - -// readyPods returns the old and new ready counts for their pods. -// If a pod is observed as being ready, it's considered ready even -// if it later becomes notReady. -func (r *RollingUpdater) readyPods(oldRc, newRc *api.ReplicationController) (int32, int32, error) { - controllers := []*api.ReplicationController{oldRc, newRc} - oldReady := int32(0) - newReady := int32(0) - - for i := range controllers { - controller := controllers[i] - selector := labels.Set(controller.Spec.Selector).AsSelector() - options := api.ListOptions{LabelSelector: selector} - pods, err := r.c.Pods(controller.Namespace).List(options) - if err != nil { - return 0, 0, err - } - for _, pod := range pods.Items { - if api.IsPodReady(&pod) { - switch controller.Name { - case oldRc.Name: - oldReady++ - case newRc.Name: - newReady++ - } - } - } - } - return oldReady, newReady, nil -} - -// getOrCreateTargetControllerWithClient looks for an existing controller with -// sourceId. If found, the existing controller is returned with true -// indicating that the controller already exists. If the controller isn't -// found, a new one is created and returned along with false indicating the -// controller was created. -// -// Existing controllers are validated to ensure their sourceIdAnnotation -// matches sourceId; if there's a mismatch, an error is returned. -func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { - existingRc, err := r.existingController(controller) - if err != nil { - if !errors.IsNotFound(err) { - // There was an error trying to find the controller; don't assume we - // should create it. - return nil, false, err - } - if controller.Spec.Replicas <= 0 { - return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d\n", controller.Name, controller.Spec.Replicas) - } - // The controller wasn't found, so create it. - if controller.Annotations == nil { - controller.Annotations = map[string]string{} - } - controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", controller.Spec.Replicas) - controller.Annotations[sourceIdAnnotation] = sourceId - controller.Spec.Replicas = 0 - newRc, err := r.c.ReplicationControllers(r.ns).Create(controller) - return newRc, false, err - } - // Validate and use the existing controller. - annotations := existingRc.Annotations - source := annotations[sourceIdAnnotation] - _, ok := annotations[desiredReplicasAnnotation] - if source != sourceId || !ok { - return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) - } - return existingRc, true, nil -} - -// existingController verifies if the controller already exists -func (r *RollingUpdater) existingController(controller *api.ReplicationController) (*api.ReplicationController, error) { - // without rc name but generate name, there's no existing rc - if len(controller.Name) == 0 && len(controller.GenerateName) > 0 { - return nil, errors.NewNotFound(api.Resource("replicationcontrollers"), controller.Name) - } - // controller name is required to get rc back - return r.c.ReplicationControllers(controller.Namespace).Get(controller.Name) -} - -// cleanupWithClients performs cleanup tasks after the rolling update. Update -// process related annotations are removed from oldRc and newRc. The -// CleanupPolicy on config is executed. -func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *api.ReplicationController, config *RollingUpdaterConfig) error { - // Clean up annotations - var err error - newRc, err = r.c.ReplicationControllers(r.ns).Get(newRc.Name) - if err != nil { - return err - } - applyUpdate := func(rc *api.ReplicationController) { - delete(rc.Annotations, sourceIdAnnotation) - delete(rc.Annotations, desiredReplicasAnnotation) - } - if newRc, err = updateRcWithRetries(r.c, r.ns, newRc, applyUpdate); err != nil { - return err - } - - if err = wait.Poll(config.Interval, config.Timeout, client.ControllerHasDesiredReplicas(r.c, newRc)); err != nil { - return err - } - newRc, err = r.c.ReplicationControllers(r.ns).Get(newRc.Name) - if err != nil { - return err - } - - switch config.CleanupPolicy { - case DeleteRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name) - return r.c.ReplicationControllers(r.ns).Delete(oldRc.Name) - case RenameRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name) - if err := r.c.ReplicationControllers(r.ns).Delete(oldRc.Name); err != nil { - return err - } - fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name) - return Rename(r.c, newRc, oldRc.Name) - case PreserveRollingUpdateCleanupPolicy: - return nil - default: - return nil - } -} - -func Rename(c client.ReplicationControllersNamespacer, rc *api.ReplicationController, newName string) error { - oldName := rc.Name - rc.Name = newName - rc.ResourceVersion = "" - - _, err := c.ReplicationControllers(rc.Namespace).Create(rc) - if err != nil { - return err - } - err = c.ReplicationControllers(rc.Namespace).Delete(oldName) - if err != nil && !errors.IsNotFound(err) { - return err - } - return nil -} - -func LoadExistingNextReplicationController(c client.ReplicationControllersNamespacer, namespace, newName string) (*api.ReplicationController, error) { - if len(newName) == 0 { - return nil, nil - } - newRc, err := c.ReplicationControllers(namespace).Get(newName) - if err != nil && errors.IsNotFound(err) { - return nil, nil - } - return newRc, err -} - -type NewControllerConfig struct { - Namespace string - OldName, NewName string - Image string - Container string - DeploymentKey string - PullPolicy api.PullPolicy -} - -func CreateNewControllerFromCurrentController(c client.Interface, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { - containerIndex := 0 - // load the old RC into the "new" RC - newRc, err := c.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) - if err != nil { - return nil, err - } - - if len(cfg.Container) != 0 { - containerFound := false - - for i, c := range newRc.Spec.Template.Spec.Containers { - if c.Name == cfg.Container { - containerIndex = i - containerFound = true - break - } - } - - if !containerFound { - return nil, fmt.Errorf("container %s not found in pod", cfg.Container) - } - } - - if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { - return nil, goerrors.New("Must specify container to update when updating a multi-container pod") - } - - if len(newRc.Spec.Template.Spec.Containers) == 0 { - return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) - } - newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image - if len(cfg.PullPolicy) != 0 { - newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy - } - - newHash, err := api.HashObject(newRc, codec) - if err != nil { - return nil, err - } - - if len(cfg.NewName) == 0 { - cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) - } - newRc.Name = cfg.NewName - - newRc.Spec.Selector[cfg.DeploymentKey] = newHash - newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash - // Clear resource version after hashing so that identical updates get different hashes. - newRc.ResourceVersion = "" - return newRc, nil -} - -func AbortRollingUpdate(c *RollingUpdaterConfig) error { - // Swap the controllers - tmp := c.OldRc - c.OldRc = c.NewRc - c.NewRc = tmp - - if c.NewRc.Annotations == nil { - c.NewRc.Annotations = map[string]string{} - } - c.NewRc.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) - - // Use the original value since the replica count change from old to new - // could be asymmetric. If we don't know the original count, we can't safely - // roll back to a known good size. - originalSize, foundOriginal := tmp.Annotations[originalReplicasAnnotation] - if !foundOriginal { - return fmt.Errorf("couldn't find original replica count of %q", tmp.Name) - } - fmt.Fprintf(c.Out, "Setting %q replicas to %s\n", c.NewRc.Name, originalSize) - c.NewRc.Annotations[desiredReplicasAnnotation] = originalSize - c.CleanupPolicy = DeleteRollingUpdateCleanupPolicy - return nil -} - -func GetNextControllerAnnotation(rc *api.ReplicationController) (string, bool) { - res, found := rc.Annotations[nextControllerAnnotation] - return res, found -} - -func SetNextControllerAnnotation(rc *api.ReplicationController, name string) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[nextControllerAnnotation] = name -} - -func UpdateExistingReplicationController(c client.Interface, oldRc *api.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*api.ReplicationController, error) { - if _, found := oldRc.Spec.Selector[deploymentKey]; !found { - SetNextControllerAnnotation(oldRc, newName) - return AddDeploymentKeyToReplicationController(oldRc, c, deploymentKey, deploymentValue, namespace, out) - } else { - // If we didn't need to update the controller for the deployment key, we still need to write - // the "next" controller. - applyUpdate := func(rc *api.ReplicationController) { - SetNextControllerAnnotation(rc, newName) - } - return updateRcWithRetries(c, namespace, oldRc, applyUpdate) - } -} - -func AddDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client client.Interface, deploymentKey, deploymentValue, namespace string, out io.Writer) (*api.ReplicationController, error) { - var err error - // First, update the template label. This ensures that any newly created pods will have the new label - applyUpdate := func(rc *api.ReplicationController) { - if rc.Spec.Template.Labels == nil { - rc.Spec.Template.Labels = map[string]string{} - } - rc.Spec.Template.Labels[deploymentKey] = deploymentValue - } - if oldRc, err = updateRcWithRetries(client, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Update all pods managed by the rc to have the new hash label, so they are correctly adopted - // TODO: extract the code from the label command and re-use it here. - selector := labels.SelectorFromSet(oldRc.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} - podList, err := client.Pods(namespace).List(options) - if err != nil { - return nil, err - } - for ix := range podList.Items { - pod := &podList.Items[ix] - applyUpdate := func(p *api.Pod) { - if p.Labels == nil { - p.Labels = map[string]string{ - deploymentKey: deploymentValue, - } - } else { - p.Labels[deploymentKey] = deploymentValue - } - } - if pod, err = updatePodWithRetries(client, namespace, pod, applyUpdate); err != nil { - return nil, err - } - } - - if oldRc.Spec.Selector == nil { - oldRc.Spec.Selector = map[string]string{} - } - // Copy the old selector, so that we can scrub out any orphaned pods - selectorCopy := map[string]string{} - for k, v := range oldRc.Spec.Selector { - selectorCopy[k] = v - } - applyUpdate = func(rc *api.ReplicationController) { - rc.Spec.Selector[deploymentKey] = deploymentValue - } - // Update the selector of the rc so it manages all the pods we updated above - if oldRc, err = updateRcWithRetries(client, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Clean up any orphaned pods that don't have the new label, this can happen if the rc manager - // doesn't see the update to its pod template and creates a new pod with the old labels after - // we've finished re-adopting existing pods to the rc. - selector = labels.SelectorFromSet(selectorCopy) - options = api.ListOptions{LabelSelector: selector} - podList, err = client.Pods(namespace).List(options) - for ix := range podList.Items { - pod := &podList.Items[ix] - if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue { - if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil { - return nil, err - } - } - } - - return oldRc, nil -} - -type updateRcFunc func(controller *api.ReplicationController) - -// updateRcWithRetries retries updating the given rc on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updateRcWithRetries(c client.Interface, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) { - // Deep copy the rc in case we failed on Get during retry loop - obj, err := api.Scheme.Copy(rc) - if err != nil { - return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err) - } - oldRc := obj.(*api.ReplicationController) - err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(rc) - if rc, e = c.ReplicationControllers(namespace).Update(rc); e == nil { - // rc contains the latest controller post update - return - } - updateErr := e - // Update the controller with the latest resource version, if the update failed we - // can't trust rc so use oldRc.Name. - if rc, e = c.ReplicationControllers(namespace).Get(oldRc.Name); e != nil { - // The Get failed: Value in rc cannot be trusted. - rc = oldRc - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return rc, err -} - -type updatePodFunc func(controller *api.Pod) - -// updatePodWithRetries retries updating the given pod on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updatePodWithRetries(c client.Interface, namespace string, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, error) { - // Deep copy the pod in case we failed on Get during retry loop - obj, err := api.Scheme.Copy(pod) - if err != nil { - return nil, fmt.Errorf("failed to deep copy pod before updating it: %v", err) - } - oldPod := obj.(*api.Pod) - err = client.RetryOnConflict(client.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(pod) - if pod, e = c.Pods(namespace).Update(pod); e == nil { - return - } - updateErr := e - if pod, e = c.Pods(namespace).Get(oldPod.Name); e != nil { - pod = oldPod - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return pod, err -} - -func FindSourceController(r client.ReplicationControllersNamespacer, namespace, name string) (*api.ReplicationController, error) { - list, err := r.ReplicationControllers(namespace).List(api.ListOptions{}) - if err != nil { - return nil, err - } - for ix := range list.Items { - rc := &list.Items[ix] - if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIdAnnotation], name) { - return rc, nil - } - } - return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go b/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go deleted file mode 100644 index dc39865d4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/rollout_status.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/unversioned" -) - -// StatusViewer provides an interface for resources that provides rollout status. -type StatusViewer interface { - Status(namespace, name string) (string, bool, error) -} - -func StatusViewerFor(kind unversioned.GroupKind, c client.Interface) (StatusViewer, error) { - switch kind { - case extensions.Kind("Deployment"): - return &DeploymentStatusViewer{c.Extensions()}, nil - } - return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) -} - -type DeploymentStatusViewer struct { - c client.ExtensionsInterface -} - -// Status returns a message describing deployment status, and a bool value indicating if the status is considered done -func (s *DeploymentStatusViewer) Status(namespace, name string) (string, bool, error) { - deployment, err := s.c.Deployments(namespace).Get(name) - if err != nil { - return "", false, err - } - if deployment.Generation <= deployment.Status.ObservedGeneration { - if deployment.Status.UpdatedReplicas == deployment.Spec.Replicas { - return fmt.Sprintf("deployment %s successfully rolled out\n", name), true, nil - } - return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas have been updated...\n", deployment.Status.UpdatedReplicas, deployment.Spec.Replicas), false, nil - } - return fmt.Sprintf("Waiting for deployment spec update to be observed...\n"), false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go b/vendor/k8s.io/kubernetes/pkg/kubectl/run.go deleted file mode 100644 index 4e076c071..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/run.go +++ /dev/null @@ -1,869 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apis/batch" - batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation" -) - -type DeploymentV1Beta1 struct{} - -func (DeploymentV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - } -} - -func (DeploymentV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - if err = updatePodContainers(params, args, envs, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - // TODO: use versioned types for generators so that we don't need to - // set default values manually (see issue #17384) - deployment := extensions.Deployment{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: extensions.DeploymentSpec{ - Replicas: int32(count), - Selector: &unversioned.LabelSelector{MatchLabels: labels}, - Template: api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &deployment, nil -} - -func getLabels(params map[string]string, defaultRunLabel bool, name string) (map[string]string, error) { - labelString, found := params["labels"] - var labels map[string]string - var err error - if found && len(labelString) > 0 { - labels, err = ParseLabels(labelString) - if err != nil { - return nil, err - } - } else if defaultRunLabel { - labels = map[string]string{ - "run": name, - } - } - return labels, nil -} - -func getName(params map[string]string) (string, error) { - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return "", fmt.Errorf("'name' is a required parameter.") - } - } - return name, nil -} - -func getParams(genericParams map[string]interface{}) (map[string]string, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - return params, nil -} - -func getArgs(genericParams map[string]interface{}) ([]string, error) { - args := []string{} - val, found := genericParams["args"] - if found { - var isArray bool - args, isArray = val.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found: %v", val) - } - delete(genericParams, "args") - } - return args, nil -} - -func getEnvs(genericParams map[string]interface{}) ([]api.EnvVar, error) { - var envs []api.EnvVar - envStrings, found := genericParams["env"] - if found { - if envStringArray, isArray := envStrings.([]string); isArray { - var err error - envs, err = parseEnvs(envStringArray) - if err != nil { - return nil, err - } - delete(genericParams, "env") - } else { - return nil, fmt.Errorf("expected []string, found: %v", envStrings) - } - } - return envs, nil -} - -func getV1Envs(genericParams map[string]interface{}) ([]v1.EnvVar, error) { - var envs []v1.EnvVar - envStrings, found := genericParams["env"] - if found { - if envStringArray, isArray := envStrings.([]string); isArray { - var err error - envs, err = parseV1Envs(envStringArray) - if err != nil { - return nil, err - } - delete(genericParams, "env") - } else { - return nil, fmt.Errorf("expected []string, found: %v", envStrings) - } - } - return envs, nil -} - -type JobV1Beta1 struct{} - -func (JobV1Beta1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - } -} - -func (JobV1Beta1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - if err = updatePodContainers(params, args, envs, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := api.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = api.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - job := batch.Job{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batch.JobSpec{ - Selector: &unversioned.LabelSelector{ - MatchLabels: labels, - }, - ManualSelector: newBool(true), - Template: api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - - return &job, nil -} - -type JobV1 struct{} - -func (JobV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - {"restart", false}, - } -} - -func (JobV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getV1Envs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - podSpec, err := makeV1PodSpec(params, name) - if err != nil { - return nil, err - } - - if err = updateV1PodContainers(params, args, envs, podSpec); err != nil { - return nil, err - } - - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - podSpec.Containers[0].StdinOnce = !leaveStdinOpen && podSpec.Containers[0].Stdin - - if err := updateV1PodPorts(params, podSpec); err != nil { - return nil, err - } - - restartPolicy := v1.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = v1.RestartPolicyNever - } - podSpec.RestartPolicy = restartPolicy - - job := batchv1.Job{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Template: v1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - - return &job, nil -} - -type BasicReplicationController struct{} - -func (BasicReplicationController) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"replicas", true}, - {"image", true}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"tty", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - } -} - -// populateResourceList takes strings of form <resourceName1>=<value1>,<resourceName1>=<value2> -func populateResourceList(spec string) (api.ResourceList, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - result := api.ResourceList{} - resourceStatements := strings.Split(spec, ",") - for _, resourceStatement := range resourceStatements { - parts := strings.Split(resourceStatement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid argument syntax %v, expected <resource>=<value>", resourceStatement) - } - resourceName := api.ResourceName(parts[0]) - resourceQuantity, err := resource.ParseQuantity(parts[1]) - if err != nil { - return nil, err - } - result[resourceName] = resourceQuantity - } - return result, nil -} - -// populateResourceList takes strings of form <resourceName1>=<value1>,<resourceName1>=<value2> -func populateV1ResourceList(spec string) (v1.ResourceList, error) { - // empty input gets a nil response to preserve generator test expected behaviors - if spec == "" { - return nil, nil - } - - result := v1.ResourceList{} - resourceStatements := strings.Split(spec, ",") - for _, resourceStatement := range resourceStatements { - parts := strings.Split(resourceStatement, "=") - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid argument syntax %v, expected <resource>=<value>", resourceStatement) - } - resourceName := v1.ResourceName(parts[0]) - resourceQuantity, err := resource.ParseQuantity(parts[1]) - if err != nil { - return nil, err - } - result[resourceName] = resourceQuantity - } - return result, nil -} - -// HandleResourceRequirements parses the limits and requests parameters if specified -func HandleResourceRequirements(params map[string]string) (api.ResourceRequirements, error) { - result := api.ResourceRequirements{} - limits, err := populateResourceList(params["limits"]) - if err != nil { - return result, err - } - result.Limits = limits - requests, err := populateResourceList(params["requests"]) - if err != nil { - return result, err - } - result.Requests = requests - return result, nil -} - -// HandleResourceRequirements parses the limits and requests parameters if specified -func handleV1ResourceRequirements(params map[string]string) (v1.ResourceRequirements, error) { - result := v1.ResourceRequirements{} - limits, err := populateV1ResourceList(params["limits"]) - if err != nil { - return result, err - } - result.Limits = limits - requests, err := populateV1ResourceList(params["requests"]) - if err != nil { - return result, err - } - result.Requests = requests - return result, nil -} - -func makePodSpec(params map[string]string, name string) (*api.PodSpec, error) { - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := HandleResourceRequirements(params) - if err != nil { - return nil, err - } - - spec := api.PodSpec{ - Containers: []api.Container{ - { - Name: name, - Image: params["image"], - Stdin: stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - } - return &spec, nil -} - -func makeV1PodSpec(params map[string]string, name string) (*v1.PodSpec, error) { - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := handleV1ResourceRequirements(params) - if err != nil { - return nil, err - } - - spec := v1.PodSpec{ - Containers: []v1.Container{ - { - Name: name, - Image: params["image"], - Stdin: stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - } - return &spec, nil -} - -func (BasicReplicationController) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, true, name) - if err != nil { - return nil, err - } - - count, err := strconv.Atoi(params["replicas"]) - if err != nil { - return nil, err - } - - podSpec, err := makePodSpec(params, name) - if err != nil { - return nil, err - } - - if err = updatePodContainers(params, args, envs, podSpec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, podSpec); err != nil { - return nil, err - } - - controller := api.ReplicationController{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.ReplicationControllerSpec{ - Replicas: int32(count), - Selector: labels, - Template: &api.PodTemplateSpec{ - ObjectMeta: api.ObjectMeta{ - Labels: labels, - }, - Spec: *podSpec, - }, - }, - } - return &controller, nil -} - -func updatePodContainers(params map[string]string, args []string, envs []api.EnvVar, podSpec *api.PodSpec) error { - if len(args) > 0 { - command, err := GetBool(params, "command", false) - if err != nil { - return err - } - if command { - podSpec.Containers[0].Command = args - } else { - podSpec.Containers[0].Args = args - } - } - - if len(envs) > 0 { - podSpec.Containers[0].Env = envs - } - return nil -} - -func updateV1PodContainers(params map[string]string, args []string, envs []v1.EnvVar, podSpec *v1.PodSpec) error { - if len(args) > 0 { - command, err := GetBool(params, "command", false) - if err != nil { - return err - } - if command { - podSpec.Containers[0].Command = args - } else { - podSpec.Containers[0].Args = args - } - } - - if len(envs) > 0 { - podSpec.Containers[0].Env = envs - } - return nil -} - -func updatePodPorts(params map[string]string, podSpec *api.PodSpec) (err error) { - port := -1 - hostPort := -1 - if len(params["port"]) > 0 { - port, err = strconv.Atoi(params["port"]) - if err != nil { - return err - } - } - - if len(params["hostport"]) > 0 { - hostPort, err = strconv.Atoi(params["hostport"]) - if err != nil { - return err - } - if hostPort > 0 && port < 0 { - return fmt.Errorf("--hostport requires --port to be specified") - } - } - - // Don't include the port if it was not specified. - if port > 0 { - podSpec.Containers[0].Ports = []api.ContainerPort{ - { - ContainerPort: int32(port), - }, - } - if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) - } - } - return nil -} - -func updateV1PodPorts(params map[string]string, podSpec *v1.PodSpec) (err error) { - port := -1 - hostPort := -1 - if len(params["port"]) > 0 { - port, err = strconv.Atoi(params["port"]) - if err != nil { - return err - } - } - - if len(params["hostport"]) > 0 { - hostPort, err = strconv.Atoi(params["hostport"]) - if err != nil { - return err - } - if hostPort > 0 && port < 0 { - return fmt.Errorf("--hostport requires --port to be specified") - } - } - - // Don't include the port if it was not specified. - if port > 0 { - podSpec.Containers[0].Ports = []v1.ContainerPort{ - { - ContainerPort: int32(port), - }, - } - if hostPort > 0 { - podSpec.Containers[0].Ports[0].HostPort = int32(hostPort) - } - } - return nil -} - -type BasicPod struct{} - -func (BasicPod) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"labels", false}, - {"default-name", false}, - {"name", true}, - {"image", true}, - {"port", false}, - {"hostport", false}, - {"stdin", false}, - {"leave-stdin-open", false}, - {"tty", false}, - {"restart", false}, - {"command", false}, - {"args", false}, - {"env", false}, - {"requests", false}, - {"limits", false}, - } -} - -func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - args, err := getArgs(genericParams) - if err != nil { - return nil, err - } - - envs, err := getEnvs(genericParams) - if err != nil { - return nil, err - } - - params, err := getParams(genericParams) - if err != nil { - return nil, err - } - - name, err := getName(params) - if err != nil { - return nil, err - } - - labels, err := getLabels(params, false, name) - if err != nil { - return nil, err - } - - stdin, err := GetBool(params, "stdin", false) - if err != nil { - return nil, err - } - leaveStdinOpen, err := GetBool(params, "leave-stdin-open", false) - if err != nil { - return nil, err - } - - tty, err := GetBool(params, "tty", false) - if err != nil { - return nil, err - } - - resourceRequirements, err := HandleResourceRequirements(params) - if err != nil { - return nil, err - } - - restartPolicy := api.RestartPolicy(params["restart"]) - if len(restartPolicy) == 0 { - restartPolicy = api.RestartPolicyAlways - } - pod := api.Pod{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Name: name, - Image: params["image"], - ImagePullPolicy: api.PullIfNotPresent, - Stdin: stdin, - StdinOnce: !leaveStdinOpen && stdin, - TTY: tty, - Resources: resourceRequirements, - }, - }, - DNSPolicy: api.DNSClusterFirst, - RestartPolicy: restartPolicy, - }, - } - if err = updatePodContainers(params, args, envs, &pod.Spec); err != nil { - return nil, err - } - - if err := updatePodPorts(params, &pod.Spec); err != nil { - return nil, err - } - return &pod, nil -} - -func parseEnvs(envArray []string) ([]api.EnvVar, error) { - envs := make([]api.EnvVar, 0, len(envArray)) - for _, env := range envArray { - pos := strings.Index(env, "=") - if pos == -1 { - return nil, fmt.Errorf("invalid env: %v", env) - } - name := env[:pos] - value := env[pos+1:] - if len(name) == 0 || !validation.IsCIdentifier(name) || len(value) == 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - envVar := api.EnvVar{Name: name, Value: value} - envs = append(envs, envVar) - } - return envs, nil -} - -func parseV1Envs(envArray []string) ([]v1.EnvVar, error) { - envs := []v1.EnvVar{} - for _, env := range envArray { - pos := strings.Index(env, "=") - if pos == -1 { - return nil, fmt.Errorf("invalid env: %v", env) - } - name := env[:pos] - value := env[pos+1:] - if len(name) == 0 || !validation.IsCIdentifier(name) || len(value) == 0 { - return nil, fmt.Errorf("invalid env: %v", env) - } - envVar := v1.EnvVar{Name: name, Value: value} - envs = append(envs, envVar) - } - return envs, nil -} - -func newBool(val bool) *bool { - p := new(bool) - *p = val - return p -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go b/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go deleted file mode 100644 index bef93d990..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/scale.go +++ /dev/null @@ -1,382 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/util/wait" -) - -// Scaler provides an interface for resources that can be scaled. -type Scaler interface { - // Scale scales the named resource after checking preconditions. It optionally - // retries in the event of resource version mismatch (if retry is not nil), - // and optionally waits until the status of the resource matches newSize (if wait is not nil) - Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error - // ScaleSimple does a simple one-shot attempt at scaling - not useful on it's own, but - // a necessary building block for Scale - ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error -} - -func ScalerFor(kind unversioned.GroupKind, c client.Interface) (Scaler, error) { - switch kind { - case api.Kind("ReplicationController"): - return &ReplicationControllerScaler{c}, nil - case extensions.Kind("ReplicaSet"): - return &ReplicaSetScaler{c.Extensions()}, nil - case extensions.Kind("Job"), batch.Kind("Job"): - return &JobScaler{c.Batch()}, nil // Either kind of job can be scaled with Batch interface. - case extensions.Kind("Deployment"): - return &DeploymentScaler{c.Extensions()}, nil - } - return nil, fmt.Errorf("no scaler has been implemented for %q", kind) -} - -// ScalePrecondition describes a condition that must be true for the scale to take place -// If CurrentSize == -1, it is ignored. -// If CurrentResourceVersion is the empty string, it is ignored. -// Otherwise they must equal the values in the resource for it to be valid. -type ScalePrecondition struct { - Size int - ResourceVersion string -} - -// A PreconditionError is returned when a resource fails to match -// the scale preconditions passed to kubectl. -type PreconditionError struct { - Precondition string - ExpectedValue string - ActualValue string -} - -func (pe PreconditionError) Error() string { - return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue) -} - -type ScaleErrorType int - -const ( - ScaleGetFailure ScaleErrorType = iota - ScaleUpdateFailure - ScaleUpdateInvalidFailure -) - -// A ScaleError is returned when a scale request passes -// preconditions but fails to actually scale the controller. -type ScaleError struct { - FailureType ScaleErrorType - ResourceVersion string - ActualError error -} - -func (c ScaleError) Error() string { - return fmt.Sprintf( - "Scaling the resource failed with: %v; Current resource version %s", - c.ActualError, c.ResourceVersion) -} - -// RetryParams encapsulates the retry parameters used by kubectl's scaler. -type RetryParams struct { - Interval, Timeout time.Duration -} - -func NewRetryParams(interval, timeout time.Duration) *RetryParams { - return &RetryParams{interval, timeout} -} - -// ScaleCondition is a closure around Scale that facilitates retries via util.wait -func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint) wait.ConditionFunc { - return func() (bool, error) { - err := r.ScaleSimple(namespace, name, precondition, count) - switch e, _ := err.(ScaleError); err.(type) { - case nil: - return true, nil - case ScaleError: - // if it's invalid we shouldn't keep waiting - if e.FailureType == ScaleUpdateInvalidFailure { - return false, err - } - if e.FailureType == ScaleUpdateFailure { - return false, nil - } - } - return false, err - } -} - -// ValidateReplicationController ensures that the preconditions match. Returns nil if they are valid, an error otherwise -func (precondition *ScalePrecondition) ValidateReplicationController(controller *api.ReplicationController) error { - if precondition.Size != -1 && int(controller.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(controller.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && controller.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion} - } - return nil -} - -type ReplicationControllerScaler struct { - c client.Interface -} - -func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { - controller, err := scaler.c.ReplicationControllers(namespace).Get(name) - if err != nil { - return ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateReplicationController(controller); err != nil { - return err - } - } - controller.Spec.Replicas = int32(newSize) - // TODO: do retry on 409 errors here? - if _, err := scaler.c.ReplicationControllers(namespace).Update(controller); err != nil { - if errors.IsInvalid(err) { - return ScaleError{ScaleUpdateInvalidFailure, controller.ResourceVersion, err} - } - return ScaleError{ScaleUpdateFailure, controller.ResourceVersion, err} - } - return nil -} - -// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value -// (if wait is not nil). -func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - rc, err := scaler.c.ReplicationControllers(namespace).Get(name) - if err != nil { - return err - } - return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, - client.ControllerHasDesiredReplicas(scaler.c, rc)) - } - return nil -} - -// ValidateReplicaSet ensures that the preconditions match. Returns nil if they are valid, an error otherwise -func (precondition *ScalePrecondition) ValidateReplicaSet(replicaSet *extensions.ReplicaSet) error { - if precondition.Size != -1 && int(replicaSet.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(replicaSet.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && replicaSet.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, replicaSet.ResourceVersion} - } - return nil -} - -type ReplicaSetScaler struct { - c client.ExtensionsInterface -} - -func (scaler *ReplicaSetScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { - rs, err := scaler.c.ReplicaSets(namespace).Get(name) - if err != nil { - return ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateReplicaSet(rs); err != nil { - return err - } - } - rs.Spec.Replicas = int32(newSize) - // TODO: do retry on 409 errors here? - if _, err := scaler.c.ReplicaSets(namespace).Update(rs); err != nil { - if errors.IsInvalid(err) { - return ScaleError{ScaleUpdateInvalidFailure, rs.ResourceVersion, err} - } - return ScaleError{ScaleUpdateFailure, rs.ResourceVersion, err} - } - return nil -} - -// Scale updates a ReplicaSet to a new size, with optional precondition check (if preconditions is -// not nil), optional retries (if retry is not nil), and then optionally waits for it's replica -// count to reach the new value (if wait is not nil). -func (scaler *ReplicaSetScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - rs, err := scaler.c.ReplicaSets(namespace).Get(name) - if err != nil { - return err - } - return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, - client.ReplicaSetHasDesiredReplicas(scaler.c, rs)) - } - return nil -} - -// ValidateJob ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateJob(job *batch.Job) error { - if precondition.Size != -1 && job.Spec.Parallelism == nil { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), "nil"} - } - if precondition.Size != -1 && int(*job.Spec.Parallelism) != precondition.Size { - return PreconditionError{"parallelism", strconv.Itoa(precondition.Size), strconv.Itoa(int(*job.Spec.Parallelism))} - } - if len(precondition.ResourceVersion) != 0 && job.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, job.ResourceVersion} - } - return nil -} - -type JobScaler struct { - c client.BatchInterface -} - -// ScaleSimple is responsible for updating job's parallelism. -func (scaler *JobScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { - job, err := scaler.c.Jobs(namespace).Get(name) - if err != nil { - return ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateJob(job); err != nil { - return err - } - } - parallelism := int32(newSize) - job.Spec.Parallelism = ¶llelism - if _, err := scaler.c.Jobs(namespace).Update(job); err != nil { - if errors.IsInvalid(err) { - return ScaleError{ScaleUpdateInvalidFailure, job.ResourceVersion, err} - } - return ScaleError{ScaleUpdateFailure, job.ResourceVersion, err} - } - return nil -} - -// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired -// number, which can be less than requested based on job's current progress. -func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - job, err := scaler.c.Jobs(namespace).Get(name) - if err != nil { - return err - } - return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, - client.JobHasDesiredParallelism(scaler.c, job)) - } - return nil -} - -// ValidateDeployment ensures that the preconditions match. Returns nil if they are valid, an error otherwise. -func (precondition *ScalePrecondition) ValidateDeployment(deployment *extensions.Deployment) error { - if precondition.Size != -1 && int(deployment.Spec.Replicas) != precondition.Size { - return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(int(deployment.Spec.Replicas))} - } - if len(precondition.ResourceVersion) != 0 && deployment.ResourceVersion != precondition.ResourceVersion { - return PreconditionError{"resource version", precondition.ResourceVersion, deployment.ResourceVersion} - } - return nil -} - -type DeploymentScaler struct { - c client.ExtensionsInterface -} - -// ScaleSimple is responsible for updating a deployment's desired replicas count. -func (scaler *DeploymentScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) error { - deployment, err := scaler.c.Deployments(namespace).Get(name) - if err != nil { - return ScaleError{ScaleGetFailure, "Unknown", err} - } - if preconditions != nil { - if err := preconditions.ValidateDeployment(deployment); err != nil { - return err - } - } - - // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). - // For now I'm falling back to regular Deployment update operation. - deployment.Spec.Replicas = int32(newSize) - if _, err := scaler.c.Deployments(namespace).Update(deployment); err != nil { - if errors.IsInvalid(err) { - return ScaleError{ScaleUpdateInvalidFailure, deployment.ResourceVersion, err} - } - return ScaleError{ScaleUpdateFailure, deployment.ResourceVersion, err} - } - return nil -} - -// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil), -// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count. -func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error { - if preconditions == nil { - preconditions = &ScalePrecondition{-1, ""} - } - if retry == nil { - // Make it try only once, immediately - retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond} - } - cond := ScaleCondition(scaler, preconditions, namespace, name, newSize) - if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil { - return err - } - if waitForReplicas != nil { - deployment, err := scaler.c.Deployments(namespace).Get(name) - if err != nil { - return err - } - return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, - client.DeploymentHasDesiredReplicas(scaler.c, deployment)) - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go deleted file mode 100644 index e5b7cc33e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" -) - -// SecretGeneratorV1 supports stable generation of an opaque secret -type SecretGeneratorV1 struct { - // Name of secret (required) - Name string - // Type of secret (optional) - Type string - // FileSources to derive the secret from (optional) - FileSources []string - // LiteralSources to derive the secret from (optional) - LiteralSources []string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - delegate := &SecretGeneratorV1{} - fromFileStrings, found := genericParams["from-file"] - if found { - fromFileArray, isArray := fromFileStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.FileSources = fromFileArray - delete(genericParams, "from-file") - } - fromLiteralStrings, found := genericParams["from-literal"] - if found { - fromLiteralArray, isArray := fromLiteralStrings.([]string) - if !isArray { - return nil, fmt.Errorf("expected []string, found :%v", fromFileStrings) - } - delegate.LiteralSources = fromLiteralArray - delete(genericParams, "from-literal") - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate.Name = params["name"] - delegate.Type = params["type"] - return delegate.StructuredGenerate() -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"type", false}, - {"from-file", false}, - {"from-literal", false}, - {"force", false}, - } -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - secret := &api.Secret{} - secret.Name = s.Name - secret.Data = map[string][]byte{} - if len(s.Type) > 0 { - secret.Type = api.SecretType(s.Type) - } - if len(s.FileSources) > 0 { - if err := handleFromFileSources(secret, s.FileSources); err != nil { - return nil, err - } - } - if len(s.LiteralSources) > 0 { - if err := handleFromLiteralSources(secret, s.LiteralSources); err != nil { - return nil, err - } - } - return secret, nil -} - -// validate validates required fields are set to support structured generation -func (s SecretGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} - -// handleFromLiteralSources adds the specified literal source information into the provided secret -func handleFromLiteralSources(secret *api.Secret, literalSources []string) error { - for _, literalSource := range literalSources { - keyName, value, err := parseLiteralSource(literalSource) - if err != nil { - return err - } - err = addKeyFromLiteralToSecret(secret, keyName, []byte(value)) - if err != nil { - return err - } - } - return nil -} - -// handleFromFileSources adds the specified file source information into the provided secret -func handleFromFileSources(secret *api.Secret, fileSources []string) error { - for _, fileSource := range fileSources { - keyName, filePath, err := parseFileSource(fileSource) - if err != nil { - return err - } - info, err := os.Stat(filePath) - if err != nil { - switch err := err.(type) { - case *os.PathError: - return fmt.Errorf("error reading %s: %v", filePath, err.Err) - default: - return fmt.Errorf("error reading %s: %v", filePath, err) - } - } - if info.IsDir() { - if strings.Contains(fileSource, "=") { - return fmt.Errorf("cannot give a key name for a directory path.") - } - fileList, err := ioutil.ReadDir(filePath) - if err != nil { - return fmt.Errorf("error listing files in %s: %v", filePath, err) - } - for _, item := range fileList { - itemPath := path.Join(filePath, item.Name()) - if item.Mode().IsRegular() { - keyName = item.Name() - err = addKeyFromFileToSecret(secret, keyName, itemPath) - if err != nil { - return err - } - } - } - } else { - err = addKeyFromFileToSecret(secret, keyName, filePath) - if err != nil { - return err - } - } - } - - return nil -} - -func addKeyFromFileToSecret(secret *api.Secret, keyName, filePath string) error { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - return addKeyFromLiteralToSecret(secret, keyName, data) -} - -func addKeyFromLiteralToSecret(secret *api.Secret, keyName string, data []byte) error { - if !validation.IsSecretKey(keyName) { - return fmt.Errorf("%v is not a valid key name for a secret", keyName) - } - if _, entryExists := secret.Data[keyName]; entryExists { - return fmt.Errorf("cannot add key %s, another key by that name already exists: %v.", keyName, secret.Data) - } - secret.Data[keyName] = data - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go deleted file mode 100644 index 773bde386..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_docker_registry.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "encoding/json" - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/credentialprovider" - "k8s.io/kubernetes/pkg/runtime" -) - -// SecretForDockerRegistryGeneratorV1 supports stable generation of a docker registry secret -type SecretForDockerRegistryGeneratorV1 struct { - // Name of secret (required) - Name string - // Username for registry (required) - Username string - // Email for registry (required) - Email string - // Password for registry (required) - Password string - // Server for registry (required) - Server string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForDockerRegistryGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForDockerRegistryGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretForDockerRegistryGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &SecretForDockerRegistryGeneratorV1{ - Name: params["name"], - Username: params["docker-username"], - Email: params["docker-email"], - Password: params["docker-password"], - Server: params["docker-server"], - } - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretForDockerRegistryGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - dockercfgContent, err := handleDockercfgContent(s.Username, s.Password, s.Email, s.Server) - if err != nil { - return nil, err - } - secret := &api.Secret{} - secret.Name = s.Name - secret.Type = api.SecretTypeDockercfg - secret.Data = map[string][]byte{} - secret.Data[api.DockerConfigKey] = dockercfgContent - return secret, nil -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForDockerRegistryGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"docker-username", true}, - {"docker-email", true}, - {"docker-password", true}, - {"docker-server", true}, - } -} - -// validate validates required fields are set to support structured generation -func (s SecretForDockerRegistryGeneratorV1) validate() error { - if len(s.Name) == 0 { - return fmt.Errorf("name must be specified") - } - if len(s.Username) == 0 { - return fmt.Errorf("username must be specified") - } - if len(s.Email) == 0 { - return fmt.Errorf("email must be specified") - } - if len(s.Password) == 0 { - return fmt.Errorf("password must be specified") - } - if len(s.Server) == 0 { - return fmt.Errorf("server must be specified") - } - return nil -} - -// handleDockercfgContent serializes a dockercfg json file -func handleDockercfgContent(username, password, email, server string) ([]byte, error) { - dockercfgAuth := credentialprovider.DockerConfigEntry{ - Username: username, - Password: password, - Email: email, - } - - dockerCfg := map[string]credentialprovider.DockerConfigEntry{server: dockercfgAuth} - - return json.Marshal(dockerCfg) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go b/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go deleted file mode 100644 index 05061d259..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/secret_for_tls.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// SecretForTLSGeneratorV1 supports stable generation of a TLS secret. -type SecretForTLSGeneratorV1 struct { - // Name is the name of this TLS secret. - Name string - // Key is the path to the user's private key. - Key string - // Cert is the path to the user's public key certificate. - Cert string -} - -// Ensure it supports the generator pattern that uses parameter injection -var _ Generator = &SecretForTLSGeneratorV1{} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &SecretForTLSGeneratorV1{} - -// Generate returns a secret using the specified parameters -func (s SecretForTLSGeneratorV1) Generate(genericParams map[string]interface{}) (runtime.Object, error) { - err := ValidateParams(s.ParamNames(), genericParams) - if err != nil { - return nil, err - } - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - delegate := &SecretForTLSGeneratorV1{ - Name: params["name"], - Key: params["key"], - Cert: params["cert"], - } - return delegate.StructuredGenerate() -} - -// StructuredGenerate outputs a secret object using the configured fields -func (s SecretForTLSGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := s.validate(); err != nil { - return nil, err - } - tlsCrt, err := readFile(s.Cert) - if err != nil { - return nil, err - } - tlsKey, err := readFile(s.Key) - if err != nil { - return nil, err - } - secret := &api.Secret{} - secret.Name = s.Name - secret.Type = api.SecretTypeTLS - secret.Data = map[string][]byte{} - secret.Data[api.TLSCertKey] = []byte(tlsCrt) - secret.Data[api.TLSPrivateKeyKey] = []byte(tlsKey) - return secret, nil -} - -// readFile just reads a file into a byte array. -func readFile(file string) ([]byte, error) { - b, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err) - } - return b, nil -} - -// ParamNames returns the set of supported input parameters when using the parameter injection generator pattern -func (s SecretForTLSGeneratorV1) ParamNames() []GeneratorParam { - return []GeneratorParam{ - {"name", true}, - {"key", true}, - {"cert", true}, - } -} - -// validate validates required fields are set to support structured generation -func (s SecretForTLSGeneratorV1) validate() error { - // TODO: This is not strictly necessary. We can generate a self signed cert - // if no key/cert is given. The only requiredment is that we either get both - // or none. See test/e2e/ingress_utils for self signed cert generation. - if len(s.Key) == 0 { - return fmt.Errorf("key must be specified.") - } - if len(s.Cert) == 0 { - return fmt.Errorf("certificate must be specified.") - } - if _, err := tls.LoadX509KeyPair(s.Cert, s.Key); err != nil { - return fmt.Errorf("failed to load key pair %v", err) - } - // TODO: Add more validation. - // 1. If the certificate contains intermediates, it is a valid chain. - // 2. Format etc. - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go b/vendor/k8s.io/kubernetes/pkg/kubectl/service.go deleted file mode 100644 index f67b3a11b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/service.go +++ /dev/null @@ -1,229 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/intstr" -) - -// The only difference between ServiceGeneratorV1 and V2 is that the service port is named "default" in V1, while it is left unnamed in V2. -type ServiceGeneratorV1 struct{} - -func (ServiceGeneratorV1) ParamNames() []GeneratorParam { - return paramNames() -} - -func (ServiceGeneratorV1) Generate(params map[string]interface{}) (runtime.Object, error) { - params["port-name"] = "default" - return generate(params) -} - -type ServiceGeneratorV2 struct{} - -func (ServiceGeneratorV2) ParamNames() []GeneratorParam { - return paramNames() -} - -func (ServiceGeneratorV2) Generate(params map[string]interface{}) (runtime.Object, error) { - return generate(params) -} - -func paramNames() []GeneratorParam { - return []GeneratorParam{ - {"default-name", true}, - {"name", false}, - {"selector", true}, - // port will be used if a user specifies --port OR the exposed object - // has one port - {"port", false}, - // ports will be used iff a user doesn't specify --port AND the - // exposed object has multiple ports - {"ports", false}, - {"labels", false}, - {"external-ip", false}, - {"create-external-load-balancer", false}, - {"load-balancer-ip", false}, - {"type", false}, - {"protocol", false}, - // protocols will be used to keep port-protocol mapping derived from - // exposed object - {"protocols", false}, - {"container-port", false}, // alias of target-port - {"target-port", false}, - {"port-name", false}, - {"session-affinity", false}, - } -} - -func generate(genericParams map[string]interface{}) (runtime.Object, error) { - params := map[string]string{} - for key, value := range genericParams { - strVal, isString := value.(string) - if !isString { - return nil, fmt.Errorf("expected string, saw %v for '%s'", value, key) - } - params[key] = strVal - } - selectorString, found := params["selector"] - if !found || len(selectorString) == 0 { - return nil, fmt.Errorf("'selector' is a required parameter.") - } - selector, err := ParseLabels(selectorString) - if err != nil { - return nil, err - } - - labelsString, found := params["labels"] - var labels map[string]string - if found && len(labelsString) > 0 { - labels, err = ParseLabels(labelsString) - if err != nil { - return nil, err - } - } - - name, found := params["name"] - if !found || len(name) == 0 { - name, found = params["default-name"] - if !found || len(name) == 0 { - return nil, fmt.Errorf("'name' is a required parameter.") - } - } - ports := []api.ServicePort{} - servicePortName, found := params["port-name"] - if !found { - // Leave the port unnamed. - servicePortName = "" - } - - protocolsString, found := params["protocols"] - var portProtocolMap map[string]string - if found && len(protocolsString) > 0 { - portProtocolMap, err = ParseProtocols(protocolsString) - if err != nil { - return nil, err - } - } - // ports takes precedence over port since it will be - // specified only when the user hasn't specified a port - // via --port and the exposed object has multiple ports. - var portString string - if portString, found = params["ports"]; !found { - portString, found = params["port"] - if !found { - return nil, fmt.Errorf("'port' is a required parameter.") - } - } - - portStringSlice := strings.Split(portString, ",") - for i, stillPortString := range portStringSlice { - port, err := strconv.Atoi(stillPortString) - if err != nil { - return nil, err - } - name := servicePortName - // If we are going to assign multiple ports to a service, we need to - // generate a different name for each one. - if len(portStringSlice) > 1 { - name = fmt.Sprintf("port-%d", i+1) - } - protocol := params["protocol"] - - switch { - case len(protocol) == 0 && len(portProtocolMap) == 0: - // Default to TCP, what the flag was doing previously. - protocol = "TCP" - case len(protocol) > 0 && len(portProtocolMap) > 0: - // User has specified the --protocol while exposing a multiprotocol resource - // We should stomp multiple protocols with the one specified ie. do nothing - case len(protocol) == 0 && len(portProtocolMap) > 0: - // no --protocol and we expose a multiprotocol resource - protocol = "TCP" // have the default so we can stay sane - if exposeProtocol, found := portProtocolMap[stillPortString]; found { - protocol = exposeProtocol - } - } - ports = append(ports, api.ServicePort{ - Name: name, - Port: int32(port), - Protocol: api.Protocol(protocol), - }) - } - - service := api.Service{ - ObjectMeta: api.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: api.ServiceSpec{ - Selector: selector, - Ports: ports, - }, - } - targetPortString, found := params["target-port"] - if !found { - targetPortString, found = params["container-port"] - } - if found && len(targetPortString) > 0 { - var targetPort intstr.IntOrString - if portNum, err := strconv.Atoi(targetPortString); err != nil { - targetPort = intstr.FromString(targetPortString) - } else { - targetPort = intstr.FromInt(portNum) - } - // Use the same target-port for every port - for i := range service.Spec.Ports { - service.Spec.Ports[i].TargetPort = targetPort - } - } else { - // If --target-port or --container-port haven't been specified, this - // should be the same as Port - for i := range service.Spec.Ports { - port := service.Spec.Ports[i].Port - service.Spec.Ports[i].TargetPort = intstr.FromInt(int(port)) - } - } - if params["create-external-load-balancer"] == "true" { - service.Spec.Type = api.ServiceTypeLoadBalancer - } - if len(params["external-ip"]) > 0 { - service.Spec.ExternalIPs = []string{params["external-ip"]} - } - if len(params["type"]) != 0 { - service.Spec.Type = api.ServiceType(params["type"]) - } - if service.Spec.Type == api.ServiceTypeLoadBalancer { - service.Spec.LoadBalancerIP = params["load-balancer-ip"] - } - if len(params["session-affinity"]) != 0 { - switch api.ServiceAffinity(params["session-affinity"]) { - case api.ServiceAffinityNone: - service.Spec.SessionAffinity = api.ServiceAffinityNone - case api.ServiceAffinityClientIP: - service.Spec.SessionAffinity = api.ServiceAffinityClientIP - default: - return nil, fmt.Errorf("unknown session affinity: %s", params["session-affinity"]) - } - } - return &service, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go b/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go deleted file mode 100644 index 2be08dd2d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/serviceaccount.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/runtime" -) - -// ServiceAccountGeneratorV1 supports stable generation of a service account -type ServiceAccountGeneratorV1 struct { - // Name of service account - Name string -} - -// Ensure it supports the generator pattern that uses parameters specified during construction -var _ StructuredGenerator = &ServiceAccountGeneratorV1{} - -// StructuredGenerate outputs a service account object using the configured fields -func (g *ServiceAccountGeneratorV1) StructuredGenerate() (runtime.Object, error) { - if err := g.validate(); err != nil { - return nil, err - } - serviceAccount := &api.ServiceAccount{} - serviceAccount.Name = g.Name - return serviceAccount, nil -} - -// validate validates required fields are set to support structured generation -func (g *ServiceAccountGeneratorV1) validate() error { - if len(g.Name) == 0 { - return fmt.Errorf("name must be specified") - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go deleted file mode 100644 index 568c46d6d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_event_list.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "k8s.io/kubernetes/pkg/api" -) - -// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field -type SortableEvents []api.Event - -func (list SortableEvents) Len() int { - return len(list) -} - -func (list SortableEvents) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableEvents) Less(i, j int) bool { - return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go deleted file mode 100644 index 98c67344a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/sorted_resource_name_list.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "sort" - - "k8s.io/kubernetes/pkg/api" - qosutil "k8s.io/kubernetes/pkg/kubelet/qos/util" -) - -type SortableResourceNames []api.ResourceName - -func (list SortableResourceNames) Len() int { - return len(list) -} - -func (list SortableResourceNames) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableResourceNames) Less(i, j int) bool { - return list[i] < list[j] -} - -// SortedResourceNames returns the sorted resource names of a resource list. -func SortedResourceNames(list api.ResourceList) []api.ResourceName { - resources := make([]api.ResourceName, 0, len(list)) - for res := range list { - resources = append(resources, res) - } - sort.Sort(SortableResourceNames(resources)) - return resources -} - -type SortableResourceQuotas []api.ResourceQuota - -func (list SortableResourceQuotas) Len() int { - return len(list) -} - -func (list SortableResourceQuotas) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} - -func (list SortableResourceQuotas) Less(i, j int) bool { - return list[i].Name < list[j].Name -} - -// SortedQoSResourceNames returns the sorted resource names of a QoS list. -func SortedQoSResourceNames(list qosutil.QoSList) []api.ResourceName { - resources := make([]api.ResourceName, 0, len(list)) - for res := range list { - resources = append(resources, res) - } - sort.Sort(SortableResourceNames(resources)) - return resources -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go b/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go deleted file mode 100644 index e95b8a1e7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/sorting_printer.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "reflect" - "sort" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/integer" - "k8s.io/kubernetes/pkg/util/jsonpath" - - "github.com/golang/glog" -) - -// Sorting printer sorts list types before delegating to another printer. -// Non-list types are simply passed through -type SortingPrinter struct { - SortField string - Delegate ResourcePrinter - Decoder runtime.Decoder -} - -func (s *SortingPrinter) PrintObj(obj runtime.Object, out io.Writer) error { - if !meta.IsListType(obj) { - return s.Delegate.PrintObj(obj, out) - } - - if err := s.sortObj(obj); err != nil { - return err - } - return s.Delegate.PrintObj(obj, out) -} - -// TODO: implement HandledResources() -func (p *SortingPrinter) HandledResources() []string { - return []string{} -} - -func (s *SortingPrinter) sortObj(obj runtime.Object) error { - objs, err := meta.ExtractList(obj) - if err != nil { - return err - } - if len(objs) == 0 { - return nil - } - - sorter, err := SortObjects(s.Decoder, objs, s.SortField) - if err != nil { - return err - } - - switch list := obj.(type) { - case *v1.List: - outputList := make([]runtime.RawExtension, len(objs)) - for ix := range objs { - outputList[ix] = list.Items[sorter.OriginalPosition(ix)] - } - list.Items = outputList - return nil - } - return meta.SetList(obj, objs) -} - -func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput string) (*RuntimeSort, error) { - parser := jsonpath.New("sorting") - - field, err := massageJSONPath(fieldInput) - if err != nil { - return nil, err - } - - if err := parser.Parse(field); err != nil { - return nil, err - } - - for ix := range objs { - item := objs[ix] - switch u := item.(type) { - case *runtime.Unknown: - var err error - if objs[ix], _, err = decoder.Decode(u.Raw, nil, nil); err != nil { - return nil, err - } - } - } - - values, err := parser.FindResults(reflect.ValueOf(objs[0]).Elem().Interface()) - if err != nil { - return nil, err - } - if len(values) == 0 { - return nil, fmt.Errorf("couldn't find any field with path: %s", field) - } - - sorter := NewRuntimeSort(field, objs) - sort.Sort(sorter) - return sorter, nil -} - -// RuntimeSort is an implementation of the golang sort interface that knows how to sort -// lists of runtime.Object -type RuntimeSort struct { - field string - objs []runtime.Object - origPosition []int -} - -func NewRuntimeSort(field string, objs []runtime.Object) *RuntimeSort { - sorter := &RuntimeSort{field: field, objs: objs, origPosition: make([]int, len(objs))} - for ix := range objs { - sorter.origPosition[ix] = ix - } - return sorter -} - -func (r *RuntimeSort) Len() int { - return len(r.objs) -} - -func (r *RuntimeSort) Swap(i, j int) { - r.objs[i], r.objs[j] = r.objs[j], r.objs[i] - r.origPosition[i], r.origPosition[j] = r.origPosition[j], r.origPosition[i] -} - -func isLess(i, j reflect.Value) (bool, error) { - switch i.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return i.Int() < j.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return i.Uint() < j.Uint(), nil - case reflect.Float32, reflect.Float64: - return i.Float() < j.Float(), nil - case reflect.String: - return i.String() < j.String(), nil - case reflect.Ptr: - return isLess(i.Elem(), j.Elem()) - case reflect.Struct: - // sort unversioned.Time - in := i.Interface() - if t, ok := in.(unversioned.Time); ok { - return t.Before(j.Interface().(unversioned.Time)), nil - } - // fallback to the fields comparison - for idx := 0; idx < i.NumField(); idx++ { - less, err := isLess(i.Field(idx), j.Field(idx)) - if err != nil || !less { - return less, err - } - } - return true, nil - case reflect.Array, reflect.Slice: - // note: the length of i and j may be different - for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { - less, err := isLess(i.Index(idx), j.Index(idx)) - if err != nil || !less { - return less, err - } - } - return true, nil - default: - return false, fmt.Errorf("unsortable type: %v", i.Kind()) - } -} - -func (r *RuntimeSort) Less(i, j int) bool { - iObj := r.objs[i] - jObj := r.objs[j] - - parser := jsonpath.New("sorting") - parser.Parse(r.field) - - iValues, err := parser.FindResults(reflect.ValueOf(iObj).Elem().Interface()) - if err != nil { - glog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) - } - jValues, err := parser.FindResults(reflect.ValueOf(jObj).Elem().Interface()) - if err != nil { - glog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) - } - - iField := iValues[0][0] - jField := jValues[0][0] - - less, err := isLess(iField, jField) - if err != nil { - glog.Fatalf("Field %s in %v is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) - } - return less -} - -// Returns the starting (original) position of a particular index. e.g. If OriginalPosition(0) returns 5 than the -// the item currently at position 0 was at position 5 in the original unsorted array. -func (r *RuntimeSort) OriginalPosition(ix int) int { - if ix < 0 || ix > len(r.origPosition) { - return -1 - } - return r.origPosition[ix] -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go b/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go deleted file mode 100644 index d784ef24c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/stop.go +++ /dev/null @@ -1,448 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "strings" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util" - deploymentutil "k8s.io/kubernetes/pkg/util/deployment" - utilerrors "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - Interval = time.Second * 1 - Timeout = time.Minute * 5 -) - -// A Reaper handles terminating an object as gracefully as possible. -// timeout is how long we'll wait for the termination to be successful -// gracePeriod is time given to an API object for it to delete itself cleanly, -// e.g., pod shutdown. It may or may not be supported by the API object. -type Reaper interface { - Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error -} - -type NoSuchReaperError struct { - kind unversioned.GroupKind -} - -func (n *NoSuchReaperError) Error() string { - return fmt.Sprintf("no reaper has been implemented for %v", n.kind) -} - -func IsNoSuchReaperError(err error) bool { - _, ok := err.(*NoSuchReaperError) - return ok -} - -func ReaperFor(kind unversioned.GroupKind, c client.Interface) (Reaper, error) { - switch kind { - case api.Kind("ReplicationController"): - return &ReplicationControllerReaper{c, Interval, Timeout}, nil - - case extensions.Kind("ReplicaSet"): - return &ReplicaSetReaper{c, Interval, Timeout}, nil - - case extensions.Kind("DaemonSet"): - return &DaemonSetReaper{c, Interval, Timeout}, nil - - case api.Kind("Pod"): - return &PodReaper{c}, nil - - case api.Kind("Service"): - return &ServiceReaper{c}, nil - - case extensions.Kind("Job"), batch.Kind("Job"): - return &JobReaper{c, Interval, Timeout}, nil - - case extensions.Kind("Deployment"): - return &DeploymentReaper{c, Interval, Timeout}, nil - - } - return nil, &NoSuchReaperError{kind} -} - -func ReaperForReplicationController(c client.Interface, timeout time.Duration) (Reaper, error) { - return &ReplicationControllerReaper{c, Interval, timeout}, nil -} - -type ReplicationControllerReaper struct { - client.Interface - pollInterval, timeout time.Duration -} -type ReplicaSetReaper struct { - client.Interface - pollInterval, timeout time.Duration -} -type DaemonSetReaper struct { - client.Interface - pollInterval, timeout time.Duration -} -type JobReaper struct { - client.Interface - pollInterval, timeout time.Duration -} -type DeploymentReaper struct { - client.Interface - pollInterval, timeout time.Duration -} -type PodReaper struct { - client.Interface -} -type ServiceReaper struct { - client.Interface -} - -type objInterface interface { - Delete(name string) error - Get(name string) (meta.Object, error) -} - -// getOverlappingControllers finds rcs that this controller overlaps, as well as rcs overlapping this controller. -func getOverlappingControllers(c client.ReplicationControllerInterface, rc *api.ReplicationController) ([]api.ReplicationController, error) { - rcs, err := c.List(api.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("error getting replication controllers: %v", err) - } - var matchingRCs []api.ReplicationController - rcLabels := labels.Set(rc.Spec.Selector) - for _, controller := range rcs.Items { - newRCLabels := labels.Set(controller.Spec.Selector) - if labels.SelectorFromSet(newRCLabels).Matches(rcLabels) || labels.SelectorFromSet(rcLabels).Matches(newRCLabels) { - matchingRCs = append(matchingRCs, controller) - } - } - return matchingRCs, nil -} - -func (reaper *ReplicationControllerReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - rc := reaper.ReplicationControllers(namespace) - scaler, err := ScalerFor(api.Kind("ReplicationController"), *reaper) - if err != nil { - return err - } - ctrl, err := rc.Get(name) - if err != nil { - return err - } - if timeout == 0 { - timeout = Timeout + time.Duration(10*ctrl.Spec.Replicas)*time.Second - } - - // The rc manager will try and detect all matching rcs for a pod's labels, - // and only sync the oldest one. This means if we have a pod with labels - // [(k1: v1), (k2: v2)] and two rcs: rc1 with selector [(k1=v1)], and rc2 with selector [(k1=v1),(k2=v2)], - // the rc manager will sync the older of the two rcs. - // - // If there are rcs with a superset of labels, eg: - // deleting: (k1=v1), superset: (k2=v2, k1=v1) - // - It isn't safe to delete the rc because there could be a pod with labels - // (k1=v1) that isn't managed by the superset rc. We can't scale it down - // either, because there could be a pod (k2=v2, k1=v1) that it deletes - // causing a fight with the superset rc. - // If there are rcs with a subset of labels, eg: - // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) - // - Even if it's safe to delete this rc without a scale down because all it's pods - // are being controlled by the subset rc the code returns an error. - - // In theory, creating overlapping controllers is user error, so the loop below - // tries to account for this logic only in the common case, where we end up - // with multiple rcs that have an exact match on selectors. - - overlappingCtrls, err := getOverlappingControllers(rc, ctrl) - if err != nil { - return fmt.Errorf("error getting replication controllers: %v", err) - } - exactMatchRCs := []api.ReplicationController{} - overlapRCs := []string{} - for _, overlappingRC := range overlappingCtrls { - if len(overlappingRC.Spec.Selector) == len(ctrl.Spec.Selector) { - exactMatchRCs = append(exactMatchRCs, overlappingRC) - } else { - overlapRCs = append(overlapRCs, overlappingRC.Name) - } - } - if len(overlapRCs) > 0 { - return fmt.Errorf( - "Detected overlapping controllers for rc %v: %v, please manage deletion individually with --cascade=false.", - ctrl.Name, strings.Join(overlapRCs, ",")) - } - if len(exactMatchRCs) == 1 { - // No overlapping controllers. - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { - return err - } - } - return rc.Delete(name) -} - -// TODO(madhusudancs): Implement it when controllerRef is implemented - https://github.com/kubernetes/kubernetes/issues/2210 -// getOverlappingReplicaSets finds ReplicaSets that this ReplicaSet overlaps, as well as ReplicaSets overlapping this ReplicaSet. -func getOverlappingReplicaSets(c client.ReplicaSetInterface, rs *extensions.ReplicaSet) ([]extensions.ReplicaSet, []extensions.ReplicaSet, error) { - var overlappingRSs, exactMatchRSs []extensions.ReplicaSet - return overlappingRSs, exactMatchRSs, nil -} - -func (reaper *ReplicaSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - rsc := reaper.Extensions().ReplicaSets(namespace) - scaler, err := ScalerFor(extensions.Kind("ReplicaSet"), *reaper) - if err != nil { - return err - } - rs, err := rsc.Get(name) - if err != nil { - return err - } - if timeout == 0 { - timeout = Timeout + time.Duration(10*rs.Spec.Replicas)*time.Second - } - - // The ReplicaSet controller will try and detect all matching ReplicaSets - // for a pod's labels, and only sync the oldest one. This means if we have - // a pod with labels [(k1: v1), (k2: v2)] and two ReplicaSets: rs1 with - // selector [(k1=v1)], and rs2 with selector [(k1=v1),(k2=v2)], the - // ReplicaSet controller will sync the older of the two ReplicaSets. - // - // If there are ReplicaSets with a superset of labels, eg: - // deleting: (k1=v1), superset: (k2=v2, k1=v1) - // - It isn't safe to delete the ReplicaSet because there could be a pod - // with labels (k1=v1) that isn't managed by the superset ReplicaSet. - // We can't scale it down either, because there could be a pod - // (k2=v2, k1=v1) that it deletes causing a fight with the superset - // ReplicaSet. - // If there are ReplicaSets with a subset of labels, eg: - // deleting: (k2=v2, k1=v1), subset: (k1=v1), superset: (k2=v2, k1=v1, k3=v3) - // - Even if it's safe to delete this ReplicaSet without a scale down because - // all it's pods are being controlled by the subset ReplicaSet the code - // returns an error. - - // In theory, creating overlapping ReplicaSets is user error, so the loop below - // tries to account for this logic only in the common case, where we end up - // with multiple ReplicaSets that have an exact match on selectors. - - // TODO(madhusudancs): Re-evaluate again when controllerRef is implemented - - // https://github.com/kubernetes/kubernetes/issues/2210 - overlappingRSs, exactMatchRSs, err := getOverlappingReplicaSets(rsc, rs) - if err != nil { - return fmt.Errorf("error getting ReplicaSets: %v", err) - } - - if len(overlappingRSs) > 0 { - var names []string - for _, overlappingRS := range overlappingRSs { - names = append(names, overlappingRS.Name) - } - return fmt.Errorf( - "Detected overlapping ReplicaSets for ReplicaSet %v: %v, please manage deletion individually with --cascade=false.", - rs.Name, strings.Join(names, ",")) - } - if len(exactMatchRSs) == 0 { - // No overlapping ReplicaSets. - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForReplicas := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil { - return err - } - } - - if err := rsc.Delete(name, nil); err != nil { - return err - } - return nil -} - -func (reaper *DaemonSetReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - ds, err := reaper.Extensions().DaemonSets(namespace).Get(name) - if err != nil { - return err - } - - // We set the nodeSelector to a random label. This label is nearly guaranteed - // to not be set on any node so the DameonSetController will start deleting - // daemon pods. Once it's done deleting the daemon pods, it's safe to delete - // the DaemonSet. - ds.Spec.Template.Spec.NodeSelector = map[string]string{ - string(util.NewUUID()): string(util.NewUUID()), - } - // force update to avoid version conflict - ds.ResourceVersion = "" - - if ds, err = reaper.Extensions().DaemonSets(namespace).Update(ds); err != nil { - return err - } - - // Wait for the daemon set controller to kill all the daemon pods. - if err := wait.Poll(reaper.pollInterval, reaper.timeout, func() (bool, error) { - updatedDS, err := reaper.Extensions().DaemonSets(namespace).Get(name) - if err != nil { - return false, nil - } - return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil - }); err != nil { - return err - } - - return reaper.Extensions().DaemonSets(namespace).Delete(name) -} - -func (reaper *JobReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - jobs := reaper.Batch().Jobs(namespace) - pods := reaper.Pods(namespace) - scaler, err := ScalerFor(batch.Kind("Job"), *reaper) - if err != nil { - return err - } - job, err := jobs.Get(name) - if err != nil { - return err - } - if timeout == 0 { - // we will never have more active pods than job.Spec.Parallelism - parallelism := *job.Spec.Parallelism - timeout = Timeout + time.Duration(10*parallelism)*time.Second - } - - // TODO: handle overlapping jobs - retry := NewRetryParams(reaper.pollInterval, reaper.timeout) - waitForJobs := NewRetryParams(reaper.pollInterval, timeout) - if err = scaler.Scale(namespace, name, 0, nil, retry, waitForJobs); err != nil { - return err - } - // at this point only dead pods are left, that should be removed - selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) - options := api.ListOptions{LabelSelector: selector} - podList, err := pods.List(options) - if err != nil { - return err - } - errList := []error{} - for _, pod := range podList.Items { - if err := pods.Delete(pod.Name, gracePeriod); err != nil { - // ignores the error when the pod isn't found - if !errors.IsNotFound(err) { - errList = append(errList, err) - } - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - // once we have all the pods removed we can safely remove the job itself - return jobs.Delete(name, nil) -} - -func (reaper *DeploymentReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - deployments := reaper.Extensions().Deployments(namespace) - replicaSets := reaper.Extensions().ReplicaSets(namespace) - rsReaper, _ := ReaperFor(extensions.Kind("ReplicaSet"), reaper) - - deployment, err := reaper.updateDeploymentWithRetries(namespace, name, func(d *extensions.Deployment) { - // set deployment's history and scale to 0 - // TODO replace with patch when available: https://github.com/kubernetes/kubernetes/issues/20527 - d.Spec.RevisionHistoryLimit = util.Int32Ptr(0) - d.Spec.Replicas = 0 - d.Spec.Paused = true - }) - if err != nil { - return err - } - - // Use observedGeneration to determine if the deployment controller noticed the pause. - if err := deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { - return deployments.Get(name) - }, deployment.Generation, 1*time.Second, 1*time.Minute); err != nil { - return err - } - - // Stop all replica sets. - selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return err - } - - options := api.ListOptions{LabelSelector: selector} - rsList, err := replicaSets.List(options) - if err != nil { - return err - } - errList := []error{} - for _, rc := range rsList.Items { - if err := rsReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil { - scaleGetErr, ok := err.(*ScaleError) - if !errors.IsNotFound(err) || ok && !errors.IsNotFound(scaleGetErr.ActualError) { - errList = append(errList, err) - } - } - } - if len(errList) > 0 { - return utilerrors.NewAggregate(errList) - } - - // Delete deployment at the end. - // Note: We delete deployment at the end so that if removing RSs fails, we atleast have the deployment to retry. - return deployments.Delete(name, nil) -} - -type updateDeploymentFunc func(d *extensions.Deployment) - -func (reaper *DeploymentReaper) updateDeploymentWithRetries(namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) { - deployments := reaper.Extensions().Deployments(namespace) - err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - if deployment, err = deployments.Get(name); err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(deployment) - if deployment, err = deployments.Update(deployment); err == nil { - return true, nil - } - return false, nil - }) - return deployment, err -} - -func (reaper *PodReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - pods := reaper.Pods(namespace) - _, err := pods.Get(name) - if err != nil { - return err - } - return pods.Delete(name, gracePeriod) -} - -func (reaper *ServiceReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *api.DeleteOptions) error { - services := reaper.Services(namespace) - _, err := services.Get(name) - if err != nil { - return err - } - return services.Delete(name) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/version.go b/vendor/k8s.io/kubernetes/pkg/kubectl/version.go deleted file mode 100644 index 4c39b3c99..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/version.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "fmt" - "io" - "os" - - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/version" -) - -func GetServerVersion(w io.Writer, kubeClient client.Interface) { - serverVersion, err := kubeClient.Discovery().ServerVersion() - if err != nil { - fmt.Printf("Couldn't read server version from server: %v\n", err) - os.Exit(1) - } - - fmt.Fprintf(w, "Server Version: %#v\n", *serverVersion) -} - -func GetClientVersion(w io.Writer) { - fmt.Fprintf(w, "Client Version: %#v\n", version.Get()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go b/vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go deleted file mode 100644 index d2920dd7d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubectl/watchloop.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kubectl - -import ( - "os" - "os/signal" - - "k8s.io/kubernetes/pkg/watch" -) - -// WatchLoop loops, passing events in w to fn. -// If user sends interrupt signal, shut down cleanly. Otherwise, never return. -func WatchLoop(w watch.Interface, fn func(watch.Event) error) { - signals := make(chan os.Signal, 1) - signal.Notify(signals, os.Interrupt) - defer signal.Stop(signals) - for { - select { - case event, ok := <-w.ResultChan(): - if !ok { - return - } - if err := fn(event); err != nil { - w.Stop() - } - case <-signals: - w.Stop() - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go deleted file mode 100644 index 04a25c907..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package qos contains helper functions for quality of service. -// For each resource (memory, CPU) Kubelet supports three classes of containers. -// Memory guaranteed containers will receive the highest priority and will get all the resources -// they need. -// Burstable containers will be guaranteed their request and can “burst” and use more resources -// when available. -// Best-Effort containers, which don’t specify a request, can use resources only if not being used -// by other pods. -package qos diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go deleted file mode 100644 index 511e629fa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/policy.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package qos - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/kubelet/qos/util" -) - -const ( - PodInfraOOMAdj int = -999 - KubeletOOMScoreAdj int = -999 - KubeProxyOOMScoreAdj int = -999 - guaranteedOOMScoreAdj int = -998 - besteffortOOMScoreAdj int = 1000 -) - -// GetContainerOOMAdjust returns the amount by which the OOM score of all processes in the -// container should be adjusted. -// The OOM score of a process is the percentage of memory it consumes -// multiplied by 10 (barring exceptional cases) + a configurable quantity which is between -1000 -// and 1000. Containers with higher OOM scores are killed if the system runs out of memory. -// See https://lwn.net/Articles/391222/ for more information. -func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int { - switch util.GetPodQos(pod) { - case util.Guaranteed: - // Guaranteed containers should be the last to get killed. - return guaranteedOOMScoreAdj - case util.BestEffort: - return besteffortOOMScoreAdj - } - - // Burstable containers are a middle tier, between Guaranteed and Best-Effort. Ideally, - // we want to protect Burstable containers that consume less memory than requested. - // The formula below is a heuristic. A container requesting for 10% of a system's - // memory will have an OOM score adjust of 900. If a process in container Y - // uses over 10% of memory, its OOM score will be 1000. The idea is that containers - // which use more than their request will have an OOM score of 1000 and will be prime - // targets for OOM kills. - // Note that this is a heuristic, it won't work if a container has many small processes. - memoryRequest := container.Resources.Requests.Memory().Value() - oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity - // A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure - // that burstable pods have a higher OOM score adjustment. - if oomScoreAdjust < 2 { - return 2 - } - // Give burstable pods a higher chance of survival over besteffort pods. - if int(oomScoreAdjust) == besteffortOOMScoreAdj { - return int(oomScoreAdjust - 1) - } - return int(oomScoreAdjust) -} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go b/vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go deleted file mode 100644 index 9d7a5786a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/qos/util/qos.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" -) - -const ( - Guaranteed = "Guaranteed" - Burstable = "Burstable" - BestEffort = "BestEffort" -) - -// isResourceGuaranteed returns true if the container's resource requirements are Guaranteed. -func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { - // A container resource is guaranteed if its request == limit. - // If request == limit, the user is very confident of resource consumption. - req, hasReq := container.Resources.Requests[resource] - limit, hasLimit := container.Resources.Limits[resource] - if !hasReq || !hasLimit { - return false - } - return req.Cmp(limit) == 0 && req.Value() != 0 -} - -// isResourceBestEffort returns true if the container's resource requirements are best-effort. -func isResourceBestEffort(container *api.Container, resource api.ResourceName) bool { - // A container resource is best-effort if its request is unspecified or 0. - // If a request is specified, then the user expects some kind of resource guarantee. - req, hasReq := container.Resources.Requests[resource] - return !hasReq || req.Value() == 0 -} - -// GetPodQos returns the QoS class of a pod. -// A pod is besteffort if none of its containers have specified any requests or limits. -// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal. -// A pod is burstable if limits and requests do not match across all containers. -func GetPodQos(pod *api.Pod) string { - requests := api.ResourceList{} - limits := api.ResourceList{} - zeroQuantity := resource.MustParse("0") - isGuaranteed := true - for _, container := range pod.Spec.Containers { - // process requests - for name, quantity := range container.Resources.Requests { - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.Copy() - if _, exists := requests[name]; !exists { - requests[name] = *delta - } else { - delta.Add(requests[name]) - requests[name] = *delta - } - } - } - // process limits - for name, quantity := range container.Resources.Limits { - if quantity.Cmp(zeroQuantity) == 1 { - delta := quantity.Copy() - if _, exists := limits[name]; !exists { - limits[name] = *delta - } else { - delta.Add(limits[name]) - limits[name] = *delta - } - } - } - if len(container.Resources.Limits) != len(supportedComputeResources) { - isGuaranteed = false - } - } - if len(requests) == 0 && len(limits) == 0 { - return BestEffort - } - // Check is requests match limits for all resources. - if isGuaranteed { - for name, req := range requests { - if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 { - isGuaranteed = false - break - } - } - } - if isGuaranteed && - len(requests) == len(limits) && - len(limits) == len(supportedComputeResources) { - return Guaranteed - } - return Burstable -} - -// QoSList is a set of (resource name, QoS class) pairs. -type QoSList map[api.ResourceName]string - -// GetQoS returns a mapping of resource name to QoS class of a container -func GetQoS(container *api.Container) QoSList { - resourceToQoS := QoSList{} - for resource := range allResources(container) { - switch { - case isResourceGuaranteed(container, resource): - resourceToQoS[resource] = Guaranteed - case isResourceBestEffort(container, resource): - resourceToQoS[resource] = BestEffort - default: - resourceToQoS[resource] = Burstable - } - } - return resourceToQoS -} - -// supportedComputeResources is the list of supported compute resources -var supportedComputeResources = []api.ResourceName{ - api.ResourceCPU, - api.ResourceMemory, -} - -// allResources returns a set of all possible resources whose mapped key value is true if present on the container -func allResources(container *api.Container) map[api.ResourceName]bool { - resources := map[api.ResourceName]bool{} - for _, resource := range supportedComputeResources { - resources[resource] = false - } - for resource := range container.Resources.Requests { - resources[resource] = true - } - for resource := range container.Resources.Limits { - resources[resource] = true - } - return resources -} diff --git a/vendor/k8s.io/kubernetes/pkg/labels/doc.go b/vendor/k8s.io/kubernetes/pkg/labels/doc.go deleted file mode 100644 index 0e0282c35..000000000 --- a/vendor/k8s.io/kubernetes/pkg/labels/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package labels implements a simple label system, parsing and matching -// selectors with sets of labels. -package labels diff --git a/vendor/k8s.io/kubernetes/pkg/labels/labels.go b/vendor/k8s.io/kubernetes/pkg/labels/labels.go deleted file mode 100644 index 73324ba2b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/labels/labels.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "sort" - "strings" -) - -// Labels allows you to present labels independently from their storage. -type Labels interface { - // Has returns whether the provided label exists. - Has(label string) (exists bool) - - // Get returns the value for the provided label. - Get(label string) (value string) -} - -// Set is a map of label:value. It implements Labels. -type Set map[string]string - -// String returns all labels listed as a human readable string. -// Conveniently, exactly the format that ParseSelector takes. -func (ls Set) String() string { - selector := make([]string, 0, len(ls)) - for key, value := range ls { - selector = append(selector, key+"="+value) - } - // Sort for determinism. - sort.StringSlice(selector).Sort() - return strings.Join(selector, ",") -} - -// Has returns whether the provided label exists in the map. -func (ls Set) Has(label string) bool { - _, exists := ls[label] - return exists -} - -// Get returns the value in the map for the provided label. -func (ls Set) Get(label string) string { - return ls[label] -} - -// AsSelector converts labels into a selectors. -func (ls Set) AsSelector() Selector { - return SelectorFromSet(ls) -} - -// FormatLables convert label map into plain string -func FormatLabels(labelMap map[string]string) string { - l := Set(labelMap).String() - if l == "" { - l = "<none>" - } - return l -} diff --git a/vendor/k8s.io/kubernetes/pkg/labels/selector.go b/vendor/k8s.io/kubernetes/pkg/labels/selector.go deleted file mode 100644 index ab64ecc80..000000000 --- a/vendor/k8s.io/kubernetes/pkg/labels/selector.go +++ /dev/null @@ -1,808 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/validation" -) - -// Selector represents a label selector. -type Selector interface { - // Matches returns true if this selector matches the given set of labels. - Matches(Labels) bool - - // Empty returns true if this selector does not restrict the selection space. - Empty() bool - - // String returns a human readable string that represents this selector. - String() string - - // Add adds requirements to the Selector - Add(r ...Requirement) Selector -} - -// Everything returns a selector that matches all labels. -func Everything() Selector { - return internalSelector{} -} - -type nothingSelector struct{} - -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "<null>" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } - -// Nothing returns a selector that matches no labels -func Nothing() Selector { - return nothingSelector{} -} - -// Operator represents a key's relationship -// to a set of values in a Requirement. -type Operator string - -const ( - DoesNotExistOperator Operator = "!" - EqualsOperator Operator = "=" - DoubleEqualsOperator Operator = "==" - InOperator Operator = "in" - NotEqualsOperator Operator = "!=" - NotInOperator Operator = "notin" - ExistsOperator Operator = "exists" - GreaterThanOperator Operator = "gt" - LessThanOperator Operator = "lt" -) - -func NewSelector() Selector { - return internalSelector(nil) -} - -type internalSelector []Requirement - -// Sort by key to obtain determisitic parser -type ByKey []Requirement - -func (a ByKey) Len() int { return len(a) } - -func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } - -// Requirement is a selector that contains values, a key -// and an operator that relates the key and values. The zero -// value of Requirement is invalid. -// Requirement implements both set based match and exact match -// Requirement is initialized via NewRequirement constructor for creating a valid Requirement. -type Requirement struct { - key string - operator Operator - strValues sets.String -} - -// NewRequirement is the constructor for a Requirement. -// If any of these rules is violated, an error is returned: -// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. -// (2) If the operator is In or NotIn, the values set must be non-empty. -// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. -// (4) If the operator is Exists or DoesNotExist, the value set must be empty. -// (5) If the operator is Gt or Lt, the values set must contain only one value. -// (6) The key is invalid due to its length, or sequence -// of characters. See validateLabelKey for more details. -// -// The empty string is a valid value in the input values set. -func NewRequirement(key string, op Operator, vals sets.String) (*Requirement, error) { - if err := validateLabelKey(key); err != nil { - return nil, err - } - switch op { - case InOperator, NotInOperator: - if len(vals) == 0 { - return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") - } - case EqualsOperator, DoubleEqualsOperator, NotEqualsOperator: - if len(vals) != 1 { - return nil, fmt.Errorf("exact-match compatibility requires one single value") - } - case ExistsOperator, DoesNotExistOperator: - if len(vals) != 0 { - return nil, fmt.Errorf("values set must be empty for exists and does not exist") - } - case GreaterThanOperator, LessThanOperator: - if len(vals) != 1 { - return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") - } - for val := range vals { - if _, err := strconv.ParseFloat(val, 64); err != nil { - return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be a number") - } - } - default: - return nil, fmt.Errorf("operator '%v' is not recognized", op) - } - - for v := range vals { - if err := validateLabelValue(v); err != nil { - return nil, err - } - } - return &Requirement{key: key, operator: op, strValues: vals}, nil -} - -// Matches returns true if the Requirement matches the input Labels. -// There is a match in the following cases: -// (1) The operator is Exists and Labels has the Requirement's key. -// (2) The operator is In, Labels has the Requirement's key and Labels' -// value for that key is in Requirement's value set. -// (3) The operator is NotIn, Labels has the Requirement's key and -// Labels' value for that key is not in Requirement's value set. -// (4) The operator is DoesNotExist or NotIn and Labels does not have the -// Requirement's key. -func (r *Requirement) Matches(ls Labels) bool { - switch r.operator { - case InOperator, EqualsOperator, DoubleEqualsOperator: - if !ls.Has(r.key) { - return false - } - return r.strValues.Has(ls.Get(r.key)) - case NotInOperator, NotEqualsOperator: - if !ls.Has(r.key) { - return true - } - return !r.strValues.Has(ls.Get(r.key)) - case ExistsOperator: - return ls.Has(r.key) - case DoesNotExistOperator: - return !ls.Has(r.key) - case GreaterThanOperator, LessThanOperator: - if !ls.Has(r.key) { - return false - } - lsValue, err := strconv.ParseFloat(ls.Get(r.key), 64) - if err != nil { - glog.V(10).Infof("Parse float failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) - return false - } - - // There should be only one strValue in r.strValues, and can be converted to a float number. - if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %+v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) - return false - } - - var rValue float64 - for strValue := range r.strValues { - rValue, err = strconv.ParseFloat(strValue, 64) - if err != nil { - glog.V(10).Infof("Parse float failed for value %+v in requirement %+v, for 'Gt', 'Lt' operators, the value must be a number", strValue, r) - return false - } - } - return (r.operator == GreaterThanOperator && lsValue > rValue) || (r.operator == LessThanOperator && lsValue < rValue) - default: - return false - } -} - -func (r *Requirement) Key() string { - return r.key -} -func (r *Requirement) Operator() Operator { - return r.operator -} -func (r *Requirement) Values() sets.String { - ret := sets.String{} - for k := range r.strValues { - ret.Insert(k) - } - return ret -} - -// Return true if the internalSelector doesn't restrict selection space -func (lsel internalSelector) Empty() bool { - if lsel == nil { - return true - } - return len(lsel) == 0 -} - -// String returns a human-readable string that represents this -// Requirement. If called on an invalid Requirement, an error is -// returned. See NewRequirement for creating a valid Requirement. -func (r *Requirement) String() string { - var buffer bytes.Buffer - if r.operator == DoesNotExistOperator { - buffer.WriteString("!") - } - buffer.WriteString(r.key) - - switch r.operator { - case EqualsOperator: - buffer.WriteString("=") - case DoubleEqualsOperator: - buffer.WriteString("==") - case NotEqualsOperator: - buffer.WriteString("!=") - case InOperator: - buffer.WriteString(" in ") - case NotInOperator: - buffer.WriteString(" notin ") - case GreaterThanOperator: - buffer.WriteString(">") - case LessThanOperator: - buffer.WriteString("<") - case ExistsOperator, DoesNotExistOperator: - return buffer.String() - } - - switch r.operator { - case InOperator, NotInOperator: - buffer.WriteString("(") - } - if len(r.strValues) == 1 { - buffer.WriteString(r.strValues.List()[0]) - } else { // only > 1 since == 0 prohibited by NewRequirement - buffer.WriteString(strings.Join(r.strValues.List(), ",")) - } - - switch r.operator { - case InOperator, NotInOperator: - buffer.WriteString(")") - } - return buffer.String() -} - -// Add adds requirements to the selector. It copies the current selector returning a new one -func (lsel internalSelector) Add(reqs ...Requirement) Selector { - var sel internalSelector - for ix := range lsel { - sel = append(sel, lsel[ix]) - } - for _, r := range reqs { - sel = append(sel, r) - } - sort.Sort(ByKey(sel)) - return sel -} - -// Matches for a internalSelector returns true if all -// its Requirements match the input Labels. If any -// Requirement does not match, false is returned. -func (lsel internalSelector) Matches(l Labels) bool { - for ix := range lsel { - if matches := lsel[ix].Matches(l); !matches { - return false - } - } - return true -} - -// String returns a comma-separated string of all -// the internalSelector Requirements' human-readable strings. -func (lsel internalSelector) String() string { - var reqs []string - for ix := range lsel { - reqs = append(reqs, lsel[ix].String()) - } - return strings.Join(reqs, ",") -} - -// constants definition for lexer token -type Token int - -const ( - ErrorToken Token = iota - EndOfStringToken - ClosedParToken - CommaToken - DoesNotExistToken - DoubleEqualsToken - EqualsToken - GreaterThanToken - IdentifierToken // to represent keys and values - InToken - LessThanToken - NotEqualsToken - NotInToken - OpenParToken -) - -// string2token contains the mapping between lexer Token and token literal -// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) -var string2token = map[string]Token{ - ")": ClosedParToken, - ",": CommaToken, - "!": DoesNotExistToken, - "==": DoubleEqualsToken, - "=": EqualsToken, - ">": GreaterThanToken, - "in": InToken, - "<": LessThanToken, - "!=": NotEqualsToken, - "notin": NotInToken, - "(": OpenParToken, -} - -// The item produced by the lexer. It contains the Token and the literal. -type ScannedItem struct { - tok Token - literal string -} - -// isWhitespace returns true if the rune is a space, tab, or newline. -func isWhitespace(ch byte) bool { - return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' -} - -// isSpecialSymbol detect if the character ch can be an operator -func isSpecialSymbol(ch byte) bool { - switch ch { - case '=', '!', '(', ')', ',', '>', '<': - return true - } - return false -} - -// Lexer represents the Lexer struct for label selector. -// It contains necessary informationt to tokenize the input string -type Lexer struct { - // s stores the string to be tokenized - s string - // pos is the position currently tokenized - pos int -} - -// read return the character currently lexed -// increment the position and check the buffer overflow -func (l *Lexer) read() (b byte) { - b = 0 - if l.pos < len(l.s) { - b = l.s[l.pos] - l.pos++ - } - return b -} - -// unread 'undoes' the last read character -func (l *Lexer) unread() { - l.pos-- -} - -// scanIdOrKeyword scans string to recognize literal token (for example 'in') or an identifier. -func (l *Lexer) scanIdOrKeyword() (tok Token, lit string) { - var buffer []byte -IdentifierLoop: - for { - switch ch := l.read(); { - case ch == 0: - break IdentifierLoop - case isSpecialSymbol(ch) || isWhitespace(ch): - l.unread() - break IdentifierLoop - default: - buffer = append(buffer, ch) - } - } - s := string(buffer) - if val, ok := string2token[s]; ok { // is a literal token? - return val, s - } - return IdentifierToken, s // otherwise is an identifier -} - -// scanSpecialSymbol scans string starting with special symbol. -// special symbol identify non literal operators. "!=", "==", "=" -func (l *Lexer) scanSpecialSymbol() (Token, string) { - lastScannedItem := ScannedItem{} - var buffer []byte -SpecialSymbolLoop: - for { - switch ch := l.read(); { - case ch == 0: - break SpecialSymbolLoop - case isSpecialSymbol(ch): - buffer = append(buffer, ch) - if token, ok := string2token[string(buffer)]; ok { - lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} - } else if lastScannedItem.tok != 0 { - l.unread() - break SpecialSymbolLoop - } - default: - l.unread() - break SpecialSymbolLoop - } - } - if lastScannedItem.tok == 0 { - return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) - } - return lastScannedItem.tok, lastScannedItem.literal -} - -// skipWhiteSpaces consumes all blank characters -// returning the first non blank character -func (l *Lexer) skipWhiteSpaces(ch byte) byte { - for { - if !isWhitespace(ch) { - return ch - } - ch = l.read() - } -} - -// Lex returns a pair of Token and the literal -// literal is meaningfull only for IdentifierToken token -func (l *Lexer) Lex() (tok Token, lit string) { - switch ch := l.skipWhiteSpaces(l.read()); { - case ch == 0: - return EndOfStringToken, "" - case isSpecialSymbol(ch): - l.unread() - return l.scanSpecialSymbol() - default: - l.unread() - return l.scanIdOrKeyword() - } -} - -// Parser data structure contains the label selector parser data strucutre -type Parser struct { - l *Lexer - scannedItems []ScannedItem - position int -} - -// Parser context represents context during parsing: -// some literal for example 'in' and 'notin' can be -// recognized as operator for example 'x in (a)' but -// it can be recognized as value for example 'value in (in)' -type ParserContext int - -const ( - KeyAndOperator ParserContext = iota - Values -) - -// lookahead func returns the current token and string. No increment of current position -func (p *Parser) lookahead(context ParserContext) (Token, string) { - tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal - if context == Values { - switch tok { - case InToken, NotInToken: - tok = IdentifierToken - } - } - return tok, lit -} - -// consume returns current token and string. Increments the the position -func (p *Parser) consume(context ParserContext) (Token, string) { - p.position++ - tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal - if context == Values { - switch tok { - case InToken, NotInToken: - tok = IdentifierToken - } - } - return tok, lit -} - -// scan runs through the input string and stores the ScannedItem in an array -// Parser can now lookahead and consume the tokens -func (p *Parser) scan() { - for { - token, literal := p.l.Lex() - p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) - if token == EndOfStringToken { - break - } - } -} - -// parse runs the left recursive descending algorithm -// on input string. It returns a list of Requirement objects. -func (p *Parser) parse() (internalSelector, error) { - p.scan() // init scannedItems - - var requirements internalSelector - for { - tok, lit := p.lookahead(Values) - switch tok { - case IdentifierToken, DoesNotExistToken: - r, err := p.parseRequirement() - if err != nil { - return nil, fmt.Errorf("unable to parse requirement: %v", err) - } - requirements = append(requirements, *r) - t, l := p.consume(Values) - switch t { - case EndOfStringToken: - return requirements, nil - case CommaToken: - t2, l2 := p.lookahead(Values) - if t2 != IdentifierToken && t2 != DoesNotExistToken { - return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) - } - default: - return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) - } - case EndOfStringToken: - return requirements, nil - default: - return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) - } - } -} - -func (p *Parser) parseRequirement() (*Requirement, error) { - key, operator, err := p.parseKeyAndInferOperator() - if err != nil { - return nil, err - } - if operator == ExistsOperator || operator == DoesNotExistOperator { // operator found lookahead set checked - return NewRequirement(key, operator, nil) - } - operator, err = p.parseOperator() - if err != nil { - return nil, err - } - var values sets.String - switch operator { - case InOperator, NotInOperator: - values, err = p.parseValues() - case EqualsOperator, DoubleEqualsOperator, NotEqualsOperator, GreaterThanOperator, LessThanOperator: - values, err = p.parseExactValue() - } - if err != nil { - return nil, err - } - return NewRequirement(key, operator, values) - -} - -// parseKeyAndInferOperator parse literals. -// in case of no operator '!, in, notin, ==, =, !=' are found -// the 'exists' operator is inferred -func (p *Parser) parseKeyAndInferOperator() (string, Operator, error) { - var operator Operator - tok, literal := p.consume(Values) - if tok == DoesNotExistToken { - operator = DoesNotExistOperator - tok, literal = p.consume(Values) - } - if tok != IdentifierToken { - err := fmt.Errorf("found '%s', expected: identifier", literal) - return "", "", err - } - if err := validateLabelKey(literal); err != nil { - return "", "", err - } - if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { - if operator != DoesNotExistOperator { - operator = ExistsOperator - } - } - return literal, operator, nil -} - -// parseOperator return operator and eventually matchType -// matchType can be exact -func (p *Parser) parseOperator() (op Operator, err error) { - tok, lit := p.consume(KeyAndOperator) - switch tok { - // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator - case InToken: - op = InOperator - case EqualsToken: - op = EqualsOperator - case DoubleEqualsToken: - op = DoubleEqualsOperator - case GreaterThanToken: - op = GreaterThanOperator - case LessThanToken: - op = LessThanOperator - case NotInToken: - op = NotInOperator - case NotEqualsToken: - op = NotEqualsOperator - default: - return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) - } - return op, nil -} - -// parseValues parses the values for set based matching (x,y,z) -func (p *Parser) parseValues() (sets.String, error) { - tok, lit := p.consume(Values) - if tok != OpenParToken { - return nil, fmt.Errorf("found '%s' expected: '('", lit) - } - tok, lit = p.lookahead(Values) - switch tok { - case IdentifierToken, CommaToken: - s, err := p.parseIdentifiersList() // handles general cases - if err != nil { - return s, err - } - if tok, _ = p.consume(Values); tok != ClosedParToken { - return nil, fmt.Errorf("found '%s', expected: ')'", lit) - } - return s, nil - case ClosedParToken: // handles "()" - p.consume(Values) - return sets.NewString(""), nil - default: - return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) - } -} - -// parseIdentifiersList parses a (possibly empty) list of -// of comma separated (possibly empty) identifiers -func (p *Parser) parseIdentifiersList() (sets.String, error) { - s := sets.NewString() - for { - tok, lit := p.consume(Values) - switch tok { - case IdentifierToken: - s.Insert(lit) - tok2, lit2 := p.lookahead(Values) - switch tok2 { - case CommaToken: - continue - case ClosedParToken: - return s, nil - default: - return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) - } - case CommaToken: // handled here since we can have "(," - if s.Len() == 0 { - s.Insert("") // to handle (, - } - tok2, _ := p.lookahead(Values) - if tok2 == ClosedParToken { - s.Insert("") // to handle ,) Double "" removed by StringSet - return s, nil - } - if tok2 == CommaToken { - p.consume(Values) - s.Insert("") // to handle ,, Double "" removed by StringSet - } - default: // it can be operator - return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) - } - } -} - -// parseExactValue parses the only value for exact match style -func (p *Parser) parseExactValue() (sets.String, error) { - s := sets.NewString() - tok, lit := p.lookahead(Values) - if tok == EndOfStringToken || tok == CommaToken { - s.Insert("") - return s, nil - } - tok, lit = p.consume(Values) - if tok == IdentifierToken { - s.Insert(lit) - return s, nil - } - return nil, fmt.Errorf("found '%s', expected: identifier", lit) -} - -// Parse takes a string representing a selector and returns a selector -// object, or an error. This parsing function differs from ParseSelector -// as they parse different selectors with different syntaxes. -// The input will cause an error if it does not follow this form: -// -// <selector-syntax> ::= <requirement> | <requirement> "," <selector-syntax> ] -// <requirement> ::= [!] KEY [ <set-based-restriction> | <exact-match-restriction> ] -// <set-based-restriction> ::= "" | <inclusion-exclusion> <value-set> -// <inclusion-exclusion> ::= <inclusion> | <exclusion> -// <exclusion> ::= "notin" -// <inclusion> ::= "in" -// <value-set> ::= "(" <values> ")" -// <values> ::= VALUE | VALUE "," <values> -// <exact-match-restriction> ::= ["="|"=="|"!="] VALUE -// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. -// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. -// Delimiter is white space: (' ', '\t') -// Example of valid syntax: -// "x in (foo,,baz),y,z notin ()" -// -// Note: -// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the -// VALUEs in its requirement -// (2) Exclusion - " notin " - denotes that the KEY is not equal to any -// of the VALUEs in its requirement or does not exist -// (3) The empty string is a valid VALUE -// (4) A requirement with just a KEY - as in "y" above - denotes that -// the KEY exists and can be any VALUE. -// (5) A requirement with just !KEY requires that the KEY not exist. -// -func Parse(selector string) (Selector, error) { - parsedSelector, err := parse(selector) - if err == nil { - return parsedSelector, nil - } - return nil, err -} - -// parse parses the string representation of the selector and returns the internalSelector struct. -// The callers of this method can then decide how to return the internalSelector struct to their -// callers. This function has two callers now, one returns a Selector interface and the other -// returns a list of requirements. -func parse(selector string) (internalSelector, error) { - p := &Parser{l: &Lexer{s: selector, pos: 0}} - items, err := p.parse() - if err != nil { - return nil, err - } - sort.Sort(ByKey(items)) // sort to grant determistic parsing - return internalSelector(items), err -} - -func validateLabelKey(k string) error { - if errs := validation.IsQualifiedName(k); len(errs) != 0 { - return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) - } - return nil -} - -func validateLabelValue(v string) error { - if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) - } - return nil -} - -// SelectorFromSet returns a Selector which will match exactly the given Set. A -// nil and empty Sets are considered equivalent to Everything(). -func SelectorFromSet(ls Set) Selector { - if ls == nil { - return internalSelector{} - } - var requirements internalSelector - for label, value := range ls { - if r, err := NewRequirement(label, EqualsOperator, sets.NewString(value)); err != nil { - //TODO: double check errors when input comes from serialization? - return internalSelector{} - } else { - requirements = append(requirements, *r) - } - } - // sort to have deterministic string representation - sort.Sort(ByKey(requirements)) - return internalSelector(requirements) -} - -// ParseToRequirements takes a string representing a selector and returns a list of -// requirements. This function is suitable for those callers that perform additional -// processing on selector requirements. -// See the documentation for Parse() function for more details. -// TODO: Consider exporting the internalSelector type instead. -func ParseToRequirements(selector string) ([]Requirement, error) { - return parse(selector) -} diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go deleted file mode 100644 index dc6c989e8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package ports defines ports used by various pieces of the kubernetes -// infrastructure. -package ports diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go deleted file mode 100644 index 246a1a562..000000000 --- a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ports - -const ( - // ProxyPort is the default port for the proxy healthz server. - // May be overridden by a flag at startup. - ProxyStatusPort = 10249 - // KubeletPort is the default port for the kubelet server on each host machine. - // May be overridden by a flag at startup. - KubeletPort = 10250 - // SchedulerPort is the default port for the scheduler status server. - // May be overridden by a flag at startup. - SchedulerPort = 10251 - // ControllerManagerPort is the default port for the controller manager status server. - // May be overridden by a flag at startup. - ControllerManagerPort = 10252 - // Port for flannel daemon. - FlannelDaemonPort = 10253 - // KubeletReadOnlyPort exposes basic read-only services from the kubelet. - // May be overridden by a flag at startup. - // This is necessary for heapster to collect monitoring stats from the kubelet - // until heapster can transition to using the SSL endpoint. - // TODO(roberthbailey): Remove this once we have a better solution for heapster. - KubeletReadOnlyPort = 10255 -) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go deleted file mode 100644 index 2486e9b74..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/generic/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package generic provides a generic object store interface and a -// generic label/field matching type. -package generic diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go deleted file mode 100644 index 08e2df7b4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/generic/matcher.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/runtime" -) - -// AttrFunc returns label and field sets for List or Watch to compare against, or an error. -type AttrFunc func(obj runtime.Object) (label labels.Set, field fields.Set, err error) - -// ObjectMetaFieldsSet returns a fields set that represents the ObjectMeta. -func ObjectMetaFieldsSet(objectMeta api.ObjectMeta, hasNamespaceField bool) fields.Set { - if !hasNamespaceField { - return fields.Set{ - "metadata.name": objectMeta.Name, - } - } - return fields.Set{ - "metadata.name": objectMeta.Name, - "metadata.namespace": objectMeta.Namespace, - } -} - -// MergeFieldsSets merges a fields'set from fragment into the source. -func MergeFieldsSets(source fields.Set, fragment fields.Set) fields.Set { - for k, value := range fragment { - source[k] = value - } - return source -} - -// SelectionPredicate implements a generic predicate that can be passed to -// GenericRegistry's List or Watch methods. Implements the Matcher interface. -type SelectionPredicate struct { - Label labels.Selector - Field fields.Selector - GetAttrs AttrFunc -} - -// Matches returns true if the given object's labels and fields (as -// returned by s.GetAttrs) match s.Label and s.Field. An error is -// returned if s.GetAttrs fails. -func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) { - if s.Label.Empty() && s.Field.Empty() { - return true, nil - } - labels, fields, err := s.GetAttrs(obj) - if err != nil { - return false, err - } - return s.Label.Matches(labels) && s.Field.Matches(fields), nil -} - -// MatchesSingle will return (name, true) if and only if s.Field matches on the object's -// name. -func (s *SelectionPredicate) MatchesSingle() (string, bool) { - // TODO: should be namespace.name - if name, ok := s.Field.RequiresExactMatch("metadata.name"); ok { - return name, true - } - return "", false -} - -// Matcher can return true if an object matches the Matcher's selection -// criteria. If it is known that the matcher will match only a single object -// then MatchesSingle should return the key of that object and true. This is an -// optimization only--Matches() should continue to work. -type Matcher interface { - // Matches should return true if obj matches this matcher's requirements. - Matches(obj runtime.Object) (matchesThisObject bool, err error) - - // If this matcher matches a single object, return the key for that - // object and true here. This will greatly increase efficiency. You - // must still implement Matches(). Note that key does NOT need to - // include the object's namespace. - MatchesSingle() (key string, matchesSingleObject bool) - - // TODO: when we start indexing objects, add something like the below: - // MatchesIndices() (indexName []string, indexValue []string) - // where indexName/indexValue are the same length. -} - -// MatcherFunc makes a matcher from the provided function. For easy definition -// of matchers for testing. Note: use SelectionPredicate above for real code! -func MatcherFunc(f func(obj runtime.Object) (bool, error)) Matcher { - return matcherFunc(f) -} - -type matcherFunc func(obj runtime.Object) (bool, error) - -// Matches calls the embedded function. -func (m matcherFunc) Matches(obj runtime.Object) (bool, error) { - return m(obj) -} - -// MatchesSingle always returns "", false-- because this is a predicate -// implementation of Matcher. -func (m matcherFunc) MatchesSingle() (string, bool) { - return "", false -} - -// MatchOnKey returns a matcher that will send only the object matching key -// through the matching function f. For testing! -// Note: use SelectionPredicate above for real code! -func MatchOnKey(key string, f func(obj runtime.Object) (bool, error)) Matcher { - return matchKey{key, f} -} - -type matchKey struct { - key string - matcherFunc -} - -// MatchesSingle always returns its key, true. -func (m matchKey) MatchesSingle() (string, bool) { - return m.key, true -} - -var ( - // Assert implementations match the interface. - _ = Matcher(matchKey{}) - _ = Matcher(&SelectionPredicate{}) - _ = Matcher(matcherFunc(nil)) -) diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/options.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/options.go deleted file mode 100644 index eea52c995..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/generic/options.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - pkgstorage "k8s.io/kubernetes/pkg/storage" -) - -// RESTOptions is set of configuration options to generic registries. -type RESTOptions struct { - Storage pkgstorage.Interface - Decorator StorageDecorator - DeleteCollectionWorkers int -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go b/vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go deleted file mode 100644 index 70109efe3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/generic/storage_decorator.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generic - -import ( - "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/storage" -) - -// StorageDecorator is a function signature for producing -// a storage.Interface from given parameters. -type StorageDecorator func( - storageInterface storage.Interface, - capacity int, - objectType runtime.Object, - resourcePrefix string, - scopeStrategy rest.NamespaceScopedStrategy, - newListFunc func() runtime.Object) storage.Interface - -// Returns given 'storageInterface' without any decoration. -func UndecoratedStorage( - storageInterface storage.Interface, - capacity int, - objectType runtime.Object, - resourcePrefix string, - scopeStrategy rest.NamespaceScopedStrategy, - newListFunc func() runtime.Object) storage.Interface { - return storageInterface -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go deleted file mode 100644 index 758ffd466..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/codec.go +++ /dev/null @@ -1,574 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/url" - "strings" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/unversioned" - apiutil "k8s.io/kubernetes/pkg/api/util" - "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/apimachinery/registered" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/sets" - "k8s.io/kubernetes/pkg/util/yaml" - "k8s.io/kubernetes/pkg/watch/versioned" -) - -type thirdPartyObjectConverter struct { - converter runtime.ObjectConvertor -} - -func (t *thirdPartyObjectConverter) ConvertToVersion(in runtime.Object, outVersion unversioned.GroupVersion) (out runtime.Object, err error) { - switch in.(type) { - // This seems weird, but in this case the ThirdPartyResourceData is really just a wrapper on the raw 3rd party data. - // The actual thing printed/sent to server is the actual raw third party resource data, which only has one version. - case *extensions.ThirdPartyResourceData: - return in, nil - default: - return t.converter.ConvertToVersion(in, outVersion) - } -} - -func (t *thirdPartyObjectConverter) Convert(in, out interface{}) error { - return t.converter.Convert(in, out) -} - -func (t *thirdPartyObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return t.converter.ConvertFieldLabel(version, kind, label, value) -} - -func NewThirdPartyObjectConverter(converter runtime.ObjectConvertor) runtime.ObjectConvertor { - return &thirdPartyObjectConverter{converter} -} - -type thirdPartyResourceDataMapper struct { - mapper meta.RESTMapper - kind string - version string - group string -} - -var _ meta.RESTMapper = &thirdPartyResourceDataMapper{} - -func (t *thirdPartyResourceDataMapper) getResource() unversioned.GroupVersionResource { - plural, _ := meta.KindToResource(t.getKind()) - - return plural -} - -func (t *thirdPartyResourceDataMapper) getKind() unversioned.GroupVersionKind { - return unversioned.GroupVersionKind{Group: t.group, Version: t.version, Kind: t.kind} -} - -func (t *thirdPartyResourceDataMapper) isThirdPartyResource(partialResource unversioned.GroupVersionResource) bool { - actualResource := t.getResource() - if strings.ToLower(partialResource.Resource) != strings.ToLower(actualResource.Resource) { - return false - } - if len(partialResource.Group) != 0 && partialResource.Group != actualResource.Group { - return false - } - if len(partialResource.Version) != 0 && partialResource.Version != actualResource.Version { - return false - } - - return true -} - -func (t *thirdPartyResourceDataMapper) ResourcesFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionResource, error) { - if t.isThirdPartyResource(resource) { - return []unversioned.GroupVersionResource{t.getResource()}, nil - } - return t.mapper.ResourcesFor(resource) -} - -func (t *thirdPartyResourceDataMapper) KindsFor(resource unversioned.GroupVersionResource) ([]unversioned.GroupVersionKind, error) { - if t.isThirdPartyResource(resource) { - return []unversioned.GroupVersionKind{t.getKind()}, nil - } - return t.mapper.KindsFor(resource) -} - -func (t *thirdPartyResourceDataMapper) ResourceFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionResource, error) { - if t.isThirdPartyResource(resource) { - return t.getResource(), nil - } - return t.mapper.ResourceFor(resource) -} - -func (t *thirdPartyResourceDataMapper) KindFor(resource unversioned.GroupVersionResource) (unversioned.GroupVersionKind, error) { - if t.isThirdPartyResource(resource) { - return t.getKind(), nil - } - return t.mapper.KindFor(resource) -} - -func (t *thirdPartyResourceDataMapper) RESTMapping(gk unversioned.GroupKind, versions ...string) (*meta.RESTMapping, error) { - if len(versions) != 1 { - return nil, fmt.Errorf("unexpected set of versions: %v", versions) - } - if gk.Group != t.group { - return nil, fmt.Errorf("unknown group %q expected %s", gk.Group, t.group) - } - if gk.Kind != "ThirdPartyResourceData" { - return nil, fmt.Errorf("unknown kind %s expected %s", gk.Kind, t.kind) - } - if versions[0] != t.version { - return nil, fmt.Errorf("unknown version %q expected %q", versions[0], t.version) - } - - // TODO figure out why we're doing this rewriting - extensionGK := unversioned.GroupKind{Group: extensions.GroupName, Kind: "ThirdPartyResourceData"} - - mapping, err := t.mapper.RESTMapping(extensionGK, registered.GroupOrDie(extensions.GroupName).GroupVersion.Version) - if err != nil { - return nil, err - } - mapping.ObjectConvertor = &thirdPartyObjectConverter{mapping.ObjectConvertor} - return mapping, nil -} - -func (t *thirdPartyResourceDataMapper) AliasesForResource(resource string) ([]string, bool) { - return t.mapper.AliasesForResource(resource) -} - -func (t *thirdPartyResourceDataMapper) ResourceSingularizer(resource string) (singular string, err error) { - return t.mapper.ResourceSingularizer(resource) -} - -func NewMapper(mapper meta.RESTMapper, kind, version, group string) meta.RESTMapper { - return &thirdPartyResourceDataMapper{ - mapper: mapper, - kind: kind, - version: version, - group: group, - } -} - -type thirdPartyResourceDataCodecFactory struct { - delegate runtime.NegotiatedSerializer - kind string - encodeGV unversioned.GroupVersion - decodeGV unversioned.GroupVersion -} - -func NewNegotiatedSerializer(s runtime.NegotiatedSerializer, kind string, encodeGV, decodeGV unversioned.GroupVersion) runtime.NegotiatedSerializer { - return &thirdPartyResourceDataCodecFactory{ - delegate: s, - kind: kind, - encodeGV: encodeGV, - decodeGV: decodeGV, - } -} - -func (t *thirdPartyResourceDataCodecFactory) SupportedMediaTypes() []string { - supported := sets.NewString(t.delegate.SupportedMediaTypes()...) - return supported.Intersection(sets.NewString("application/json", "application/yaml")).List() -} - -func (t *thirdPartyResourceDataCodecFactory) SerializerForMediaType(mediaType string, params map[string]string) (runtime.SerializerInfo, bool) { - switch mediaType { - case "application/json", "application/yaml": - return t.delegate.SerializerForMediaType(mediaType, params) - default: - return runtime.SerializerInfo{}, false - } -} - -func (t *thirdPartyResourceDataCodecFactory) SupportedStreamingMediaTypes() []string { - supported := sets.NewString(t.delegate.SupportedStreamingMediaTypes()...) - return supported.Intersection(sets.NewString("application/json", "application/json;stream=watch")).List() -} - -func (t *thirdPartyResourceDataCodecFactory) StreamingSerializerForMediaType(mediaType string, params map[string]string) (runtime.StreamSerializerInfo, bool) { - switch mediaType { - case "application/json", "application/json;stream=watch": - return t.delegate.StreamingSerializerForMediaType(mediaType, params) - default: - return runtime.StreamSerializerInfo{}, false - } -} - -func (t *thirdPartyResourceDataCodecFactory) EncoderForVersion(s runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder { - return &thirdPartyResourceDataEncoder{delegate: t.delegate.EncoderForVersion(s, gv), gvk: gv.WithKind(t.kind)} -} - -func (t *thirdPartyResourceDataCodecFactory) DecoderToVersion(s runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder { - return NewDecoder(t.delegate.DecoderToVersion(s, gv), t.kind) -} - -func NewCodec(delegate runtime.Codec, gvk unversioned.GroupVersionKind) runtime.Codec { - return runtime.NewCodec(NewEncoder(delegate, gvk), NewDecoder(delegate, gvk.Kind)) -} - -type thirdPartyResourceDataDecoder struct { - delegate runtime.Decoder - kind string -} - -func NewDecoder(delegate runtime.Decoder, kind string) runtime.Decoder { - return &thirdPartyResourceDataDecoder{delegate: delegate, kind: kind} -} - -var _ runtime.Decoder = &thirdPartyResourceDataDecoder{} - -func parseObject(data []byte) (map[string]interface{}, error) { - var obj interface{} - if err := json.Unmarshal(data, &obj); err != nil { - return nil, err - } - mapObj, ok := obj.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("unexpected object: %#v", obj) - } - return mapObj, nil -} - -func (t *thirdPartyResourceDataDecoder) populate(data []byte) (runtime.Object, error) { - mapObj, err := parseObject(data) - if err != nil { - return nil, err - } - return t.populateFromObject(mapObj, data) -} - -func (t *thirdPartyResourceDataDecoder) populateFromObject(mapObj map[string]interface{}, data []byte) (runtime.Object, error) { - typeMeta := unversioned.TypeMeta{} - if err := json.Unmarshal(data, &typeMeta); err != nil { - return nil, err - } - isList := strings.HasSuffix(typeMeta.Kind, "List") - switch { - case !isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind): - result := &extensions.ThirdPartyResourceData{} - if err := t.populateResource(result, mapObj, data); err != nil { - return nil, err - } - return result, nil - case isList && (len(t.kind) == 0 || typeMeta.Kind == t.kind+"List"): - list := &extensions.ThirdPartyResourceDataList{} - if err := t.populateListResource(list, mapObj); err != nil { - return nil, err - } - return list, nil - default: - return nil, fmt.Errorf("unexpected kind: %s, expected %s", typeMeta.Kind, t.kind) - } -} - -func (t *thirdPartyResourceDataDecoder) populateResource(objIn *extensions.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { - metadata, ok := mapObj["metadata"].(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected object for metadata: %#v", mapObj["metadata"]) - } - - metadataData, err := json.Marshal(metadata) - if err != nil { - return err - } - - if err := json.Unmarshal(metadataData, &objIn.ObjectMeta); err != nil { - return err - } - // Override API Version with the ThirdPartyResourceData value - // TODO: fix this hard code - objIn.APIVersion = v1beta1.SchemeGroupVersion.String() - - objIn.Data = data - return nil -} - -func IsThirdPartyObject(rawData []byte, gvk *unversioned.GroupVersionKind) (isThirdParty bool, gvkOut *unversioned.GroupVersionKind, err error) { - var gv unversioned.GroupVersion - if gvk == nil { - data, err := yaml.ToJSON(rawData) - if err != nil { - return false, nil, err - } - metadata := unversioned.TypeMeta{} - if err = json.Unmarshal(data, &metadata); err != nil { - return false, nil, err - } - gv, err = unversioned.ParseGroupVersion(metadata.APIVersion) - if err != nil { - return false, nil, err - } - gvkOut = &unversioned.GroupVersionKind{ - Group: gv.Group, - Version: gv.Version, - Kind: metadata.Kind, - } - } else { - gv = gvk.GroupVersion() - gvkOut = gvk - } - return registered.IsThirdPartyAPIGroupVersion(gv), gvkOut, nil -} - -func (t *thirdPartyResourceDataDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - if into == nil { - if gvk == nil || gvk.Kind != t.kind { - if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { - return nil, nil, err - } else if !isThirdParty { - return t.delegate.Decode(data, gvk, into) - } - } - obj, err := t.populate(data) - if err != nil { - return nil, nil, err - } - return obj, gvk, nil - } - switch o := into.(type) { - case *extensions.ThirdPartyResourceData: - break - case *runtime.VersionedObjects: - // We're not sure that it's third party, we need to test - if gvk == nil || gvk.Kind != t.kind { - if isThirdParty, _, err := IsThirdPartyObject(data, gvk); err != nil { - return nil, nil, err - } else if !isThirdParty { - return t.delegate.Decode(data, gvk, into) - } - } - obj, err := t.populate(data) - if err != nil { - return nil, nil, err - } - o.Objects = []runtime.Object{ - obj, - } - return o, gvk, nil - default: - return t.delegate.Decode(data, gvk, into) - } - - thirdParty := into.(*extensions.ThirdPartyResourceData) - var dataObj interface{} - if err := json.Unmarshal(data, &dataObj); err != nil { - return nil, nil, err - } - mapObj, ok := dataObj.(map[string]interface{}) - if !ok { - - return nil, nil, fmt.Errorf("unexpected object: %#v", dataObj) - } - /*if gvk.Kind != "ThirdPartyResourceData" { - return nil, nil, fmt.Errorf("unexpected kind: %s", gvk.Kind) - }*/ - actual := &unversioned.GroupVersionKind{} - if kindObj, found := mapObj["kind"]; !found { - if gvk == nil { - return nil, nil, runtime.NewMissingKindErr(string(data)) - } - mapObj["kind"] = gvk.Kind - actual.Kind = gvk.Kind - } else { - kindStr, ok := kindObj.(string) - if !ok { - return nil, nil, fmt.Errorf("unexpected object for 'kind': %v", kindObj) - } - if len(t.kind) > 0 && kindStr != t.kind { - return nil, nil, fmt.Errorf("kind doesn't match, expecting: %s, got %s", gvk.Kind, kindStr) - } - actual.Kind = kindStr - } - if versionObj, found := mapObj["apiVersion"]; !found { - if gvk == nil { - return nil, nil, runtime.NewMissingVersionErr(string(data)) - } - mapObj["apiVersion"] = gvk.GroupVersion().String() - actual.Group, actual.Version = gvk.Group, gvk.Version - } else { - versionStr, ok := versionObj.(string) - if !ok { - return nil, nil, fmt.Errorf("unexpected object for 'apiVersion': %v", versionObj) - } - if gvk != nil && versionStr != gvk.GroupVersion().String() { - return nil, nil, fmt.Errorf("version doesn't match, expecting: %v, got %s", gvk.GroupVersion(), versionStr) - } - gv, err := unversioned.ParseGroupVersion(versionStr) - if err != nil { - return nil, nil, err - } - actual.Group, actual.Version = gv.Group, gv.Version - } - - mapObj, err := parseObject(data) - if err != nil { - return nil, actual, err - } - if err := t.populateResource(thirdParty, mapObj, data); err != nil { - return nil, actual, err - } - return thirdParty, actual, nil -} - -func (t *thirdPartyResourceDataDecoder) populateListResource(objIn *extensions.ThirdPartyResourceDataList, mapObj map[string]interface{}) error { - items, ok := mapObj["items"].([]interface{}) - if !ok { - return fmt.Errorf("unexpected object for items: %#v", mapObj["items"]) - } - objIn.Items = make([]extensions.ThirdPartyResourceData, len(items)) - for ix := range items { - objData, err := json.Marshal(items[ix]) - if err != nil { - return err - } - objMap, err := parseObject(objData) - if err != nil { - return err - } - if err := t.populateResource(&objIn.Items[ix], objMap, objData); err != nil { - return err - } - } - return nil -} - -const template = `{ - "kind": "%s", - "apiVersion": "%s", - "metadata": {}, - "items": [ %s ] -}` - -type thirdPartyResourceDataEncoder struct { - delegate runtime.Encoder - gvk unversioned.GroupVersionKind -} - -func NewEncoder(delegate runtime.Encoder, gvk unversioned.GroupVersionKind) runtime.Encoder { - return &thirdPartyResourceDataEncoder{delegate: delegate, gvk: gvk} -} - -var _ runtime.Encoder = &thirdPartyResourceDataEncoder{} - -func encodeToJSON(obj *extensions.ThirdPartyResourceData, stream io.Writer) error { - var objOut interface{} - if err := json.Unmarshal(obj.Data, &objOut); err != nil { - return err - } - objMap, ok := objOut.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected type: %v", objOut) - } - objMap["metadata"] = obj.ObjectMeta - encoder := json.NewEncoder(stream) - return encoder.Encode(objMap) -} - -func (t *thirdPartyResourceDataEncoder) Encode(obj runtime.Object, stream io.Writer) (err error) { - switch obj := obj.(type) { - case *extensions.ThirdPartyResourceData: - return encodeToJSON(obj, stream) - case *extensions.ThirdPartyResourceDataList: - // TODO: There must be a better way to do this... - dataStrings := make([]string, len(obj.Items)) - for ix := range obj.Items { - buff := &bytes.Buffer{} - err := encodeToJSON(&obj.Items[ix], buff) - if err != nil { - return err - } - dataStrings[ix] = buff.String() - } - gv := t.gvk.GroupVersion() - fmt.Fprintf(stream, template, t.gvk.Kind+"List", gv.String(), strings.Join(dataStrings, ",")) - return nil - case *versioned.InternalEvent: - event := &versioned.Event{} - err := versioned.Convert_versioned_InternalEvent_to_versioned_Event(obj, event, nil) - if err != nil { - return err - } - - enc := json.NewEncoder(stream) - err = enc.Encode(event) - if err != nil { - return err - } - - return nil - case *unversioned.Status, *unversioned.APIResourceList: - return t.delegate.Encode(obj, stream) - default: - return fmt.Errorf("unexpected object to encode: %#v", obj) - } -} - -func NewObjectCreator(group, version string, delegate runtime.ObjectCreater) runtime.ObjectCreater { - return &thirdPartyResourceDataCreator{group, version, delegate} -} - -type thirdPartyResourceDataCreator struct { - group string - version string - delegate runtime.ObjectCreater -} - -func (t *thirdPartyResourceDataCreator) New(kind unversioned.GroupVersionKind) (out runtime.Object, err error) { - switch kind.Kind { - case "ThirdPartyResourceData": - if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { - return nil, fmt.Errorf("unknown kind %v", kind) - } - return &extensions.ThirdPartyResourceData{}, nil - case "ThirdPartyResourceDataList": - if apiutil.GetGroupVersion(t.group, t.version) != kind.GroupVersion().String() { - return nil, fmt.Errorf("unknown kind %v", kind) - } - return &extensions.ThirdPartyResourceDataList{}, nil - // TODO: this list needs to be formalized higher in the chain - case "ListOptions", "WatchEvent": - if apiutil.GetGroupVersion(t.group, t.version) == kind.GroupVersion().String() { - // Translate third party group to external group. - gvk := registered.EnabledVersionsForGroup(api.GroupName)[0].WithKind(kind.Kind) - return t.delegate.New(gvk) - } - return t.delegate.New(kind) - default: - return t.delegate.New(kind) - } -} - -func NewThirdPartyParameterCodec(p runtime.ParameterCodec) runtime.ParameterCodec { - return &thirdPartyParameterCodec{p} -} - -type thirdPartyParameterCodec struct { - delegate runtime.ParameterCodec -} - -func (t *thirdPartyParameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into runtime.Object) error { - return t.delegate.DecodeParameters(parameters, v1.SchemeGroupVersion, into) -} - -func (t *thirdPartyParameterCodec) EncodeParameters(obj runtime.Object, to unversioned.GroupVersion) (url.Values, error) { - return t.delegate.EncodeParameters(obj, v1.SchemeGroupVersion) -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go deleted file mode 100644 index 62e2dc1e3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package thirdpartyresourcedata provides Registry interface and its REST -// implementation for storing ThirdPartyResourceData api objects. -package thirdpartyresourcedata diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go deleted file mode 100644 index 058276d1e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/registry.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/watch" -) - -// Registry is an interface implemented by things that know how to store ThirdPartyResourceData objects. -type Registry interface { - ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) - WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) - GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) - CreateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) - UpdateThirdPartyResourceData(ctx api.Context, resource *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) - DeleteThirdPartyResourceData(ctx api.Context, name string) error -} - -// storage puts strong typing around storage calls -type storage struct { - rest.StandardStorage -} - -// NewRegistry returns a new Registry interface for the given Storage. Any mismatched -// types will panic. -func NewRegistry(s rest.StandardStorage) Registry { - return &storage{s} -} - -func (s *storage) ListThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (*extensions.ThirdPartyResourceDataList, error) { - obj, err := s.List(ctx, options) - if err != nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResourceDataList), nil -} - -func (s *storage) WatchThirdPartyResourceData(ctx api.Context, options *api.ListOptions) (watch.Interface, error) { - return s.Watch(ctx, options) -} - -func (s *storage) GetThirdPartyResourceData(ctx api.Context, name string) (*extensions.ThirdPartyResourceData, error) { - obj, err := s.Get(ctx, name) - if err != nil { - return nil, err - } - return obj.(*extensions.ThirdPartyResourceData), nil -} - -func (s *storage) CreateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { - obj, err := s.Create(ctx, ThirdPartyResourceData) - return obj.(*extensions.ThirdPartyResourceData), err -} - -func (s *storage) UpdateThirdPartyResourceData(ctx api.Context, ThirdPartyResourceData *extensions.ThirdPartyResourceData) (*extensions.ThirdPartyResourceData, error) { - obj, _, err := s.Update(ctx, ThirdPartyResourceData.Name, rest.DefaultUpdatedObjectInfo(ThirdPartyResourceData, api.Scheme)) - return obj.(*extensions.ThirdPartyResourceData), err -} - -func (s *storage) DeleteThirdPartyResourceData(ctx api.Context, name string) error { - _, err := s.Delete(ctx, name, nil) - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go deleted file mode 100644 index 9f7673d7c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/strategy.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/apis/extensions/validation" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/registry/generic" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -// strategy implements behavior for ThirdPartyResource objects -type strategy struct { - runtime.ObjectTyper - api.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating ThirdPartyResource -// objects via the REST API. -var Strategy = strategy{api.Scheme, api.SimpleNameGenerator} - -var _ = rest.RESTCreateStrategy(Strategy) - -var _ = rest.RESTUpdateStrategy(Strategy) - -func (strategy) NamespaceScoped() bool { - return true -} - -func (strategy) PrepareForCreate(obj runtime.Object) { -} - -func (strategy) Validate(ctx api.Context, obj runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceData(obj.(*extensions.ThirdPartyResourceData)) -} - -// Canonicalize normalizes the object after validation. -func (strategy) Canonicalize(obj runtime.Object) { -} - -func (strategy) AllowCreateOnUpdate() bool { - return false -} - -func (strategy) PrepareForUpdate(obj, old runtime.Object) { -} - -func (strategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) field.ErrorList { - return validation.ValidateThirdPartyResourceDataUpdate(obj.(*extensions.ThirdPartyResourceData), old.(*extensions.ThirdPartyResourceData)) -} - -func (strategy) AllowUnconditionalUpdate() bool { - return true -} - -// Matcher returns a generic matcher for a given label and field selector. -func Matcher(label labels.Selector, field fields.Selector) generic.Matcher { - return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { - sa, ok := obj.(*extensions.ThirdPartyResourceData) - if !ok { - return false, fmt.Errorf("not a ThirdPartyResourceData") - } - fields := SelectableFields(sa) - return label.Matches(labels.Set(sa.Labels)) && field.Matches(fields), nil - }) -} - -// SelectableFields returns a label set that can be used for filter selection -func SelectableFields(obj *extensions.ThirdPartyResourceData) labels.Set { - return labels.Set{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go b/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go deleted file mode 100644 index 120981e85..000000000 --- a/vendor/k8s.io/kubernetes/pkg/registry/thirdpartyresourcedata/util.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package thirdpartyresourcedata - -import ( - "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" -) - -func ExtractGroupVersionKind(list *extensions.ThirdPartyResourceList) ([]unversioned.GroupVersion, []unversioned.GroupVersionKind, error) { - gvs := []unversioned.GroupVersion{} - gvks := []unversioned.GroupVersionKind{} - for ix := range list.Items { - rsrc := &list.Items[ix] - kind, group, err := ExtractApiGroupAndKind(rsrc) - if err != nil { - return nil, nil, err - } - for _, version := range rsrc.Versions { - gv := unversioned.GroupVersion{Group: group, Version: version.Name} - gvs = append(gvs, gv) - gvks = append(gvks, unversioned.GroupVersionKind{Group: group, Version: version.Name, Kind: kind}) - } - } - return gvs, gvks, nil -} - -func convertToCamelCase(input string) string { - result := "" - toUpper := true - for ix := range input { - char := input[ix] - if toUpper { - result = result + string([]byte{(char - 32)}) - toUpper = false - } else if char == '-' { - toUpper = true - } else { - result = result + string([]byte{char}) - } - } - return result -} - -func ExtractApiGroupAndKind(rsrc *extensions.ThirdPartyResource) (kind string, group string, err error) { - parts := strings.Split(rsrc.Name, ".") - if len(parts) < 3 { - return "", "", fmt.Errorf("unexpectedly short resource name: %s, expected at least <kind>.<domain>.<tld>", rsrc.Name) - } - return convertToCamelCase(parts[0]), strings.Join(parts[1:], "."), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS b/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS deleted file mode 100644 index d038b5e9b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -assignees: - - caesarxuchao - - deads2k - - lavalamp - - smarterclayton diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/codec.go b/vendor/k8s.io/kubernetes/pkg/runtime/codec.go deleted file mode 100644 index 9cd7e1536..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/codec.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/url" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion/queryparams" -) - -// codec binds an encoder and decoder. -type codec struct { - Encoder - Decoder -} - -// NewCodec creates a Codec from an Encoder and Decoder. -func NewCodec(e Encoder, d Decoder) Codec { - return codec{e, d} -} - -// Encode is a convenience wrapper for encoding to a []byte from an Encoder -func Encode(e Encoder, obj Object) ([]byte, error) { - // TODO: reuse buffer - buf := &bytes.Buffer{} - if err := e.Encode(obj, buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Decode is a convenience wrapper for decoding data into an Object. -func Decode(d Decoder, data []byte) (Object, error) { - obj, _, err := d.Decode(data, nil, nil) - return obj, err -} - -// DecodeInto performs a Decode into the provided object. -func DecodeInto(d Decoder, data []byte, into Object) error { - out, gvk, err := d.Decode(data, nil, into) - if err != nil { - return err - } - if out != into { - return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) - } - return nil -} - -// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. -func EncodeOrDie(e Encoder, obj Object) string { - bytes, err := Encode(e, obj) - if err != nil { - panic(err) - } - return string(bytes) -} - -// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or -// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. -func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk unversioned.GroupVersionKind, obj Object) (Object, error) { - if obj != nil { - into, _, err := t.ObjectKinds(obj) - if err != nil { - return nil, err - } - if gvk == into[0] { - return obj, nil - } - } - return c.New(gvk) -} - -// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. -type NoopEncoder struct { - Decoder -} - -var _ Serializer = NoopEncoder{} - -func (n NoopEncoder) Encode(obj Object, w io.Writer) error { - return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) -} - -// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. -type NoopDecoder struct { - Encoder -} - -var _ Serializer = NoopDecoder{} - -func (n NoopDecoder) Decode(data []byte, gvk *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) { - return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) -} - -// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. -func NewParameterCodec(scheme *Scheme) ParameterCodec { - return ¶meterCodec{ - typer: scheme, - convertor: scheme, - creator: scheme, - } -} - -// parameterCodec implements conversion to and from query parameters and objects. -type parameterCodec struct { - typer ObjectTyper - convertor ObjectConvertor - creator ObjectCreater -} - -var _ ParameterCodec = ¶meterCodec{} - -// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then -// converts that object to into (if necessary). Returns an error if the operation cannot be completed. -func (c *parameterCodec) DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into Object) error { - if len(parameters) == 0 { - return nil - } - targetGVKs, _, err := c.typer.ObjectKinds(into) - if err != nil { - return err - } - targetGVK := targetGVKs[0] - if targetGVK.GroupVersion() == from { - return c.convertor.Convert(¶meters, into) - } - input, err := c.creator.New(from.WithKind(targetGVK.Kind)) - if err != nil { - return err - } - if err := c.convertor.Convert(¶meters, input); err != nil { - return err - } - return c.convertor.Convert(input, into) -} - -// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. -// Returns an error if conversion is not possible. -func (c *parameterCodec) EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) { - gvks, _, err := c.typer.ObjectKinds(obj) - if err != nil { - return nil, err - } - gvk := gvks[0] - if to != gvk.GroupVersion() { - out, err := c.convertor.ConvertToVersion(obj, to) - if err != nil { - return nil, err - } - obj = out - } - return queryparams.Convert(obj) -} - -type base64Serializer struct { - Serializer -} - -func NewBase64Serializer(s Serializer) Serializer { - return &base64Serializer{s} -} - -func (s base64Serializer) Encode(obj Object, stream io.Writer) error { - e := base64.NewEncoder(base64.StdEncoding, stream) - err := s.Serializer.Encode(obj, e) - e.Close() - return err -} - -func (s base64Serializer) Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) { - out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) - n, err := base64.StdEncoding.Decode(out, data) - if err != nil { - return nil, nil, err - } - return s.Serializer.Decode(out[:n], defaults, into) -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go b/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go deleted file mode 100644 index 69cf00fea..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/conversion.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Defines conversions between generic types and structs to map query strings -// to struct objects. -package runtime - -import ( - "reflect" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/conversion" -) - -// JSONKeyMapper uses the struct tags on a conversion to determine the key value for -// the other side. Use when mapping from a map[string]* to a struct or vice versa. -func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { - if s := destTag.Get("json"); len(s) > 0 { - return strings.SplitN(s, ",", 2)[0], key - } - if s := sourceTag.Get("json"); len(s) > 0 { - return key, strings.SplitN(s, ",", 2)[0] - } - return key, key -} - -// DefaultStringConversions are helpers for converting []string and string to real values. -var DefaultStringConversions = []interface{}{ - Convert_Slice_string_To_string, - Convert_Slice_string_To_int, - Convert_Slice_string_To_bool, - Convert_Slice_string_To_int64, -} - -func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { - if len(*input) == 0 { - *out = "" - } - *out = (*input)[0] - return nil -} - -func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { - if len(*input) == 0 { - *out = 0 - } - str := (*input)[0] - i, err := strconv.Atoi(str) - if err != nil { - return err - } - *out = i - return nil -} - -// Conver_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value, a value of "false", or a value of "0" resolve to false. -// Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { - if len(*input) == 0 { - *out = false - return nil - } - switch strings.ToLower((*input)[0]) { - case "false", "0": - *out = false - default: - *out = true - } - return nil -} - -func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { - if len(*input) == 0 { - *out = 0 - } - str := (*input)[0] - i, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return err - } - *out = i - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go deleted file mode 100644 index fad426daa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/deep_copy_generated.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package runtime - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func DeepCopy_runtime_RawExtension(in RawExtension, out *RawExtension, c *conversion.Cloner) error { - if in.Raw != nil { - in, out := in.Raw, &out.Raw - *out = make([]byte, len(in)) - copy(*out, in) - } else { - out.Raw = nil - } - if in.Object == nil { - out.Object = nil - } else if newVal, err := c.DeepCopy(in.Object); err != nil { - return err - } else { - out.Object = newVal.(Object) - } - return nil -} - -func DeepCopy_runtime_TypeMeta(in TypeMeta, out *TypeMeta, c *conversion.Cloner) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - return nil -} - -func DeepCopy_runtime_Unknown(in Unknown, out *Unknown, c *conversion.Cloner) error { - if err := DeepCopy_runtime_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { - return err - } - if in.Raw != nil { - in, out := in.Raw, &out.Raw - *out = make([]byte, len(in)) - copy(*out, in) - } else { - out.Raw = nil - } - out.ContentEncoding = in.ContentEncoding - out.ContentType = in.ContentType - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/doc.go b/vendor/k8s.io/kubernetes/pkg/runtime/doc.go deleted file mode 100644 index 08e18891b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package runtime includes helper functions for working with API objects -// that follow the kubernetes API object conventions, which are: -// -// 0. Your API objects have a common metadata struct member, TypeMeta. -// 1. Your code refers to an internal set of API objects. -// 2. In a separate package, you have an external set of API objects. -// 3. The external set is considered to be versioned, and no breaking -// changes are ever made to it (fields may be added but not changed -// or removed). -// 4. As your api evolves, you'll make an additional versioned package -// with every major change. -// 5. Versioned packages have conversion functions which convert to -// and from the internal version. -// 6. You'll continue to support older versions according to your -// deprecation policy, and you can easily provide a program/library -// to update old versions into new versions because of 5. -// 7. All of your serializations and deserializations are handled in a -// centralized place. -// -// Package runtime provides a conversion helper to make 5 easy, and the -// Encode/Decode/DecodeInto trio to accomplish 7. You can also register -// additional "codecs" which use a version of your choice. It's -// recommended that you register your types with runtime in your -// package's init function. -// -// As a bonus, a few common types useful from all api objects and versions -// are provided in types.go. -package runtime diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/error.go b/vendor/k8s.io/kubernetes/pkg/runtime/error.go deleted file mode 100644 index ca60ee813..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/error.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -type notRegisteredErr struct { - gvk unversioned.GroupVersionKind - t reflect.Type -} - -// NewNotRegisteredErr is exposed for testing. -func NewNotRegisteredErr(gvk unversioned.GroupVersionKind, t reflect.Type) error { - return ¬RegisteredErr{gvk: gvk, t: t} -} - -func (k *notRegisteredErr) Error() string { - if k.t != nil { - return fmt.Sprintf("no kind is registered for the type %v", k.t) - } - if len(k.gvk.Kind) == 0 { - return fmt.Sprintf("no version %q has been registered", k.gvk.GroupVersion()) - } - if k.gvk.Version == APIVersionInternal { - return fmt.Sprintf("no kind %q is registered for the internal version of group %q", k.gvk.Kind, k.gvk.Group) - } - - return fmt.Sprintf("no kind %q is registered for version %q", k.gvk.Kind, k.gvk.GroupVersion()) -} - -// IsNotRegisteredError returns true if the error indicates the provided -// object or input data is not registered. -func IsNotRegisteredError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*notRegisteredErr) - return ok -} - -type missingKindErr struct { - data string -} - -func NewMissingKindErr(data string) error { - return &missingKindErr{data} -} - -func (k *missingKindErr) Error() string { - return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) -} - -// IsMissingKind returns true if the error indicates that the provided object -// is missing a 'Kind' field. -func IsMissingKind(err error) bool { - if err == nil { - return false - } - _, ok := err.(*missingKindErr) - return ok -} - -type missingVersionErr struct { - data string -} - -// IsMissingVersion returns true if the error indicates that the provided object -// is missing a 'Versioj' field. -func NewMissingVersionErr(data string) error { - return &missingVersionErr{data} -} - -func (k *missingVersionErr) Error() string { - return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) -} - -func IsMissingVersion(err error) bool { - if err == nil { - return false - } - _, ok := err.(*missingVersionErr) - return ok -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go deleted file mode 100644 index 289268483..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/generated.pb.go +++ /dev/null @@ -1,689 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/runtime/generated.proto -// DO NOT EDIT! - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (m *RawExtension) String() string { return proto.CompactTextString(m) } -func (*RawExtension) ProtoMessage() {} - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (m *TypeMeta) String() string { return proto.CompactTextString(m) } -func (*TypeMeta) ProtoMessage() {} - -func (m *Unknown) Reset() { *m = Unknown{} } -func (m *Unknown) String() string { return proto.CompactTextString(m) } -func (*Unknown) ProtoMessage() {} - -func init() { - proto.RegisterType((*RawExtension)(nil), "k8s.io.kubernetes.pkg.runtime.RawExtension") - proto.RegisterType((*TypeMeta)(nil), "k8s.io.kubernetes.pkg.runtime.TypeMeta") - proto.RegisterType((*Unknown)(nil), "k8s.io.kubernetes.pkg.runtime.Unknown") -} -func (m *RawExtension) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *RawExtension) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Raw != nil { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) - i += copy(data[i:], m.Raw) - } - return i, nil -} - -func (m *TypeMeta) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *TypeMeta) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.APIVersion))) - i += copy(data[i:], m.APIVersion) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Kind))) - i += copy(data[i:], m.Kind) - return i, nil -} - -func (m *Unknown) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Unknown) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.Raw != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Raw))) - i += copy(data[i:], m.Raw) - } - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i += copy(data[i:], m.ContentEncoding) - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i += copy(data[i:], m.ContentType) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *RawExtension) Size() (n int) { - var l int - _ = l - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Unknown) Size() (n int) { - var l int - _ = l - l = m.TypeMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ContentEncoding) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContentType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RawExtension) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Unknown) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Unknown: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TypeMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], data[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContentEncoding = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContentType = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto b/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto deleted file mode 100644 index 852721228..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/generated.proto +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.runtime; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "runtime"; - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +gencopy=true -// +protobuf=true -message RawExtension { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - optional bytes raw = 1; -} - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +gencopy=true -// +protobuf=true -message TypeMeta { - optional string apiVersion = 1; - - optional string kind = 2; -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +gencopy=true -// +protobuf=true -message Unknown { - optional TypeMeta typeMeta = 1; - - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - optional bytes raw = 2; - - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - optional string contentEncoding = 3; - - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - optional string contentType = 4; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/helper.go b/vendor/k8s.io/kubernetes/pkg/runtime/helper.go deleted file mode 100644 index b131fdcd9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/helper.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "io" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/util/errors" -) - -// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. -type unsafeObjectConvertor struct { - *Scheme -} - -var _ ObjectConvertor = unsafeObjectConvertor{} - -// ConvertToVersion converts in to the provided outVersion without copying the input first, which -// is only safe if the output object is not mutated or reused. -func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { - return c.Scheme.UnsafeConvertToVersion(in, outVersion) -} - -// UnsafeObjectConvertor performs object conversion without copying the object structure, -// for use when the converted object will not be reused or mutated. Primarily for use within -// versioned codecs, which use the external object for serialization but do not return it. -func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { - return unsafeObjectConvertor{scheme} -} - -// SetField puts the value of src, into fieldName, which must be a member of v. -// The value of src must be assignable to the field. -func SetField(src interface{}, v reflect.Value, fieldName string) error { - field := v.FieldByName(fieldName) - if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) - } - srcValue := reflect.ValueOf(src) - if srcValue.Type().AssignableTo(field.Type()) { - field.Set(srcValue) - return nil - } - if srcValue.Type().ConvertibleTo(field.Type()) { - field.Set(srcValue.Convert(field.Type())) - return nil - } - return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) -} - -// Field puts the value of fieldName, which must be a member of v, into dest, -// which must be a variable to which this field's value can be assigned. -func Field(v reflect.Value, fieldName string, dest interface{}) error { - field := v.FieldByName(fieldName) - if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) - } - destValue, err := conversion.EnforcePtr(dest) - if err != nil { - return err - } - if field.Type().AssignableTo(destValue.Type()) { - destValue.Set(field) - return nil - } - if field.Type().ConvertibleTo(destValue.Type()) { - destValue.Set(field.Convert(destValue.Type())) - return nil - } - return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) -} - -// fieldPtr puts the address of fieldName, which must be a member of v, -// into dest, which must be an address of a variable to which this field's -// address can be assigned. -func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { - field := v.FieldByName(fieldName) - if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) - } - v, err := conversion.EnforcePtr(dest) - if err != nil { - return err - } - field = field.Addr() - if field.Type().AssignableTo(v.Type()) { - v.Set(field) - return nil - } - if field.Type().ConvertibleTo(v.Type()) { - v.Set(field.Convert(v.Type())) - return nil - } - return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) -} - -// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. -// TODO: accept a content type. -func EncodeList(e Encoder, objects []Object) error { - var errs []error - for i := range objects { - data, err := Encode(e, objects[i]) - if err != nil { - errs = append(errs, err) - continue - } - // TODO: Set ContentEncoding and ContentType. - objects[i] = &Unknown{Raw: data} - } - return errors.NewAggregate(errs) -} - -func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { - for _, decoder := range decoders { - // TODO: Decode based on ContentType. - obj, err := Decode(decoder, obj.Raw) - if err != nil { - if IsNotRegisteredError(err) { - continue - } - return nil, err - } - return obj, nil - } - // could not decode, so leave the object as Unknown, but give the decoders the - // chance to set Unknown.TypeMeta if it is available. - for _, decoder := range decoders { - if err := DecodeInto(decoder, obj.Raw, obj); err == nil { - return obj, nil - } - } - return obj, nil -} - -// DecodeList alters the list in place, attempting to decode any objects found in -// the list that have the Unknown type. Any errors that occur are returned -// after the entire list is processed. Decoders are tried in order. -func DecodeList(objects []Object, decoders ...Decoder) []error { - errs := []error(nil) - for i, obj := range objects { - switch t := obj.(type) { - case *Unknown: - decoded, err := decodeListItem(t, decoders) - if err != nil { - errs = append(errs, err) - break - } - objects[i] = decoded - } - } - return errs -} - -// MultiObjectTyper returns the types of objects across multiple schemes in order. -type MultiObjectTyper []ObjectTyper - -var _ ObjectTyper = MultiObjectTyper{} - -func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []unversioned.GroupVersionKind, unversionedType bool, err error) { - for _, t := range m { - gvks, unversionedType, err = t.ObjectKinds(obj) - if err == nil { - return - } - } - return -} - -func (m MultiObjectTyper) Recognizes(gvk unversioned.GroupVersionKind) bool { - for _, t := range m { - if t.Recognizes(gvk) { - return true - } - } - return false -} - -// SetZeroValue would set the object of objPtr to zero value of its type. -func SetZeroValue(objPtr Object) error { - v, err := conversion.EnforcePtr(objPtr) - if err != nil { - return err - } - v.Set(reflect.Zero(v.Type())) - return nil -} - -// DefaultFramer is valid for any stream that can read objects serially without -// any separation in the stream. -var DefaultFramer = defaultFramer{} - -type defaultFramer struct{} - -func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } -func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go b/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go deleted file mode 100644 index 7239ac44c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/interfaces.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "io" - "net/url" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -const ( - // APIVersionInternal may be used if you are registering a type that should not - // be considered stable or serialized - it is a convention only and has no - // special behavior in this package. - APIVersionInternal = "__internal" -) - -type Encoder interface { - // Encode writes an object to a stream. Implementations may return errors if the versions are - // incompatible, or if no conversion is defined. - Encode(obj Object, w io.Writer) error -} - -type Decoder interface { - // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the - // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and - // version from the serialized data, or an error. If into is non-nil, it will be used as the target type - // and implementations may choose to use it rather than reallocating an object. However, the object is not - // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are - // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the - // type of the into may be used to guide conversion decisions. - Decode(data []byte, defaults *unversioned.GroupVersionKind, into Object) (Object, *unversioned.GroupVersionKind, error) -} - -// Serializer is the core interface for transforming objects into a serialized format and back. -// Implementations may choose to perform conversion of the object, but no assumptions should be made. -type Serializer interface { - Encoder - Decoder -} - -// Codec is a Serializer that deals with the details of versioning objects. It offers the same -// interface as Serializer, so this is a marker to consumers that care about the version of the objects -// they receive. -type Codec Serializer - -// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and -// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing -// and the desired version must be specified. -type ParameterCodec interface { - // DecodeParameters takes the given url.Values in the specified group version and decodes them - // into the provided object, or returns an error. - DecodeParameters(parameters url.Values, from unversioned.GroupVersion, into Object) error - // EncodeParameters encodes the provided object as query parameters or returns an error. - EncodeParameters(obj Object, to unversioned.GroupVersion) (url.Values, error) -} - -// Framer is a factory for creating readers and writers that obey a particular framing pattern. -type Framer interface { - NewFrameReader(r io.ReadCloser) io.ReadCloser - NewFrameWriter(w io.Writer) io.Writer -} - -// SerializerInfo contains information about a specific serialization format -type SerializerInfo struct { - Serializer - // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. - EncodesAsText bool - // MediaType is the value that represents this serializer over the wire. - MediaType string -} - -// StreamSerializerInfo contains information about a specific stream serialization format -type StreamSerializerInfo struct { - SerializerInfo - // Framer is the factory for retrieving streams that separate objects on the wire - Framer - // Embedded is the type of the nested serialization that should be used. - Embedded SerializerInfo -} - -// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers -// for multiple supported media types. This would commonly be accepted by a server component -// that performs HTTP content negotiation to accept multiple formats. -type NegotiatedSerializer interface { - // SupportedMediaTypes is the media types supported for reading and writing single objects. - SupportedMediaTypes() []string - // SerializerForMediaType returns a serializer for the provided media type. params is the set of - // parameters applied to the media type that may modify the resulting output. ok will be false - // if no serializer matched the media type. - SerializerForMediaType(mediaType string, params map[string]string) (s SerializerInfo, ok bool) - - // SupportedStreamingMediaTypes returns the media types of the supported streaming serializers. - // Streaming serializers control how multiple objects are written to a stream output. - SupportedStreamingMediaTypes() []string - // StreamingSerializerForMediaType returns a serializer for the provided media type that supports - // reading and writing multiple objects to a stream. It returns a framer and serializer, or an - // error if no such serializer can be created. Params is the set of parameters applied to the - // media type that may modify the resulting output. ok will be false if no serializer matched - // the media type. - StreamingSerializerForMediaType(mediaType string, params map[string]string) (s StreamSerializerInfo, ok bool) - - // EncoderForVersion returns an encoder that ensures objects being written to the provided - // serializer are in the provided group version. - // TODO: take multiple group versions - EncoderForVersion(serializer Encoder, gv unversioned.GroupVersion) Encoder - // DecoderForVersion returns a decoder that ensures objects being read by the provided - // serializer are in the provided group version by default. - // TODO: take multiple group versions - DecoderToVersion(serializer Decoder, gv unversioned.GroupVersion) Decoder -} - -// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers -// that can read and write data at rest. This would commonly be used by client tools that must -// read files, or server side storage interfaces that persist restful objects. -type StorageSerializer interface { - // SerializerForMediaType returns a serializer for the provided media type. Options is a set of - // parameters applied to the media type that may modify the resulting output. - SerializerForMediaType(mediaType string, options map[string]string) (SerializerInfo, bool) - - // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats - // by introspecting the data at rest. - UniversalDeserializer() Decoder - - // EncoderForVersion returns an encoder that ensures objects being written to the provided - // serializer are in the provided group version. - // TODO: take multiple group versions - EncoderForVersion(serializer Encoder, gv unversioned.GroupVersion) Encoder - // DecoderForVersion returns a decoder that ensures objects being read by the provided - // serializer are in the provided group version by default. - // TODO: take multiple group versions - DecoderToVersion(serializer Decoder, gv unversioned.GroupVersion) Decoder -} - -/////////////////////////////////////////////////////////////////////////////// -// Non-codec interfaces - -type ObjectVersioner interface { - ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (out Object, err error) -} - -// ObjectConvertor converts an object to a different version. -type ObjectConvertor interface { - // Convert attempts to convert one object into another, or returns an error. This method does - // not guarantee the in object is not mutated. - Convert(in, out interface{}) error - // ConvertToVersion takes the provided object and converts it the provided version. This - // method does not guarantee that the in object is not mutated. - ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (out Object, err error) - ConvertFieldLabel(version, kind, label, value string) (string, string, error) -} - -// ObjectTyper contains methods for extracting the APIVersion and Kind -// of objects. -type ObjectTyper interface { - // ObjectKinds returns the all possible group,version,kind of the provided object, true if - // the object is unversioned, or an error if the object is not recognized - // (IsNotRegisteredError will return true). - ObjectKinds(Object) ([]unversioned.GroupVersionKind, bool, error) - // Recognizes returns true if the scheme is able to handle the provided version and kind, - // or more precisely that the provided version is a possible conversion or decoding - // target. - Recognizes(gvk unversioned.GroupVersionKind) bool -} - -// ObjectCreater contains methods for instantiating an object by kind and version. -type ObjectCreater interface { - New(kind unversioned.GroupVersionKind) (out Object, err error) -} - -// ObjectCopier duplicates an object. -type ObjectCopier interface { - // Copy returns an exact copy of the provided Object, or an error if the - // copy could not be completed. - Copy(Object) (Object, error) -} - -// ResourceVersioner provides methods for setting and retrieving -// the resource version from an API object. -type ResourceVersioner interface { - SetResourceVersion(obj Object, version string) error - ResourceVersion(obj Object) (string, error) -} - -// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. -type SelfLinker interface { - SetSelfLink(obj Object, selfLink string) error - SelfLink(obj Object) (string, error) - - // Knowing Name is sometimes necessary to use a SelfLinker. - Name(obj Object) (string, error) - // Knowing Namespace is sometimes necessary to use a SelfLinker - Namespace(obj Object) (string, error) -} - -// All API types registered with Scheme must support the Object interface. Since objects in a scheme are -// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows -// serializers to set the kind, version, and group the object is represented as. An Object may choose -// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. -type Object interface { - GetObjectKind() unversioned.ObjectKind -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/register.go b/vendor/k8s.io/kubernetes/pkg/runtime/register.go deleted file mode 100644 index 5201a15ff..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/register.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta -func (obj *TypeMeta) GroupVersionKind() unversioned.GroupVersionKind { - return unversioned.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *Unknown) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } - -func (obj *Unstructured) GetObjectKind() unversioned.ObjectKind { return obj } -func (obj *UnstructuredList) GetObjectKind() unversioned.ObjectKind { return obj } - -// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind -// interface if no objects are provided, or the ObjectKind interface of the object in the -// highest array position. -func (obj *VersionedObjects) GetObjectKind() unversioned.ObjectKind { - last := obj.Last() - if last == nil { - return unversioned.EmptyObjectKind - } - return last.GetObjectKind() -} - -// First returns the leftmost object in the VersionedObjects array, which is usually the -// object as serialized on the wire. -func (obj *VersionedObjects) First() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[0] -} - -// Last is the rightmost object in the VersionedObjects array, which is the object after -// all transformations have been applied. This is the same object that would be returned -// by Decode in a normal invocation (without VersionedObjects in the into argument). -func (obj *VersionedObjects) Last() Object { - if len(obj.Objects) == 0 { - return nil - } - return obj.Objects[len(obj.Objects)-1] -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go b/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go deleted file mode 100644 index c49c43e93..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/scheme.go +++ /dev/null @@ -1,623 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "net/url" - "reflect" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" -) - -// Scheme defines methods for serializing and deserializing API objects, a type -// registry for converting group, version, and kind information to and from Go -// schemas, and mappings between Go schemas of different versions. A scheme is the -// foundation for a versioned API and versioned configuration over time. -// -// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time -// identifier for a particular representation of that Type (typically backwards -// compatible), a Kind is the unique name for that Type within the Version, and a -// Group identifies a set of Versions, Kinds, and Types that evolve over time. An -// Unversioned Type is one that is not yet formally bound to a type and is promised -// to be backwards compatible (effectively a "v1" of a Type that does not expect -// to break in the future). -// -// Schemes are not expected to change at runtime and are only threadsafe after -// registration is complete. -type Scheme struct { - // versionMap allows one to figure out the go type of an object with - // the given version and name. - gvkToType map[unversioned.GroupVersionKind]reflect.Type - - // typeToGroupVersion allows one to find metadata for a given go object. - // The reflect.Type we index by should *not* be a pointer. - typeToGVK map[reflect.Type][]unversioned.GroupVersionKind - - // unversionedTypes are transformed without conversion in ConvertToVersion. - unversionedTypes map[reflect.Type]unversioned.GroupVersionKind - - // unversionedKinds are the names of kinds that can be created in the context of any group - // or version - // TODO: resolve the status of unversioned types. - unversionedKinds map[string]reflect.Type - - // Map from version and resource to the corresponding func to convert - // resource field labels in that version to internal version. - fieldLabelConversionFuncs map[string]map[string]FieldLabelConversionFunc - - // converter stores all registered conversion functions. It also has - // default coverting behavior. - converter *conversion.Converter - - // cloner stores all registered copy functions. It also has default - // deep copy behavior. - cloner *conversion.Cloner -} - -// Function to convert a field selector to internal representation. -type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error) - -// NewScheme creates a new Scheme. This scheme is pluggable by default. -func NewScheme() *Scheme { - s := &Scheme{ - gvkToType: map[unversioned.GroupVersionKind]reflect.Type{}, - typeToGVK: map[reflect.Type][]unversioned.GroupVersionKind{}, - unversionedTypes: map[reflect.Type]unversioned.GroupVersionKind{}, - unversionedKinds: map[string]reflect.Type{}, - cloner: conversion.NewCloner(), - fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, - } - s.converter = conversion.NewConverter(s.nameFunc) - - s.AddConversionFuncs(DefaultEmbeddedConversions()...) - - // Enable map[string][]string conversions by default - if err := s.AddConversionFuncs(DefaultStringConversions...); err != nil { - panic(err) - } - if err := s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil { - panic(err) - } - if err := s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields); err != nil { - panic(err) - } - return s -} - -// nameFunc returns the name of the type that we wish to use to determine when two types attempt -// a conversion. Defaults to the go name of the type if the type is not registered. -func (s *Scheme) nameFunc(t reflect.Type) string { - // find the preferred names for this type - gvks, ok := s.typeToGVK[t] - if !ok { - return t.Name() - } - - for _, gvk := range gvks { - internalGV := gvk.GroupVersion() - internalGV.Version = "__internal" // this is hacky and maybe should be passed in - internalGVK := internalGV.WithKind(gvk.Kind) - - if internalType, exists := s.gvkToType[internalGVK]; exists { - return s.typeToGVK[internalType][0].Kind - } - } - - return gvks[0].Kind -} - -// fromScope gets the input version, desired output version, and desired Scheme -// from a conversion.Scope. -func (s *Scheme) fromScope(scope conversion.Scope) *Scheme { - return s -} - -// Converter allows access to the converter for the scheme -func (s *Scheme) Converter() *conversion.Converter { - return s.converter -} - -// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules. -// Whenever an object of this type is serialized, it is serialized with the provided group version and is not -// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an -// API group and version that would never be updated. -// -// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// every version with particular schemas. Resolve this method at that point. -func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ...Object) { - s.AddKnownTypes(version, types...) - for _, obj := range types { - t := reflect.TypeOf(obj).Elem() - gvk := version.WithKind(t.Name()) - s.unversionedTypes[t] = gvk - if _, ok := s.unversionedKinds[gvk.Kind]; ok { - panic(fmt.Sprintf("%v has already been registered as unversioned kind %q - kind name must be unique", reflect.TypeOf(t), gvk.Kind)) - } - s.unversionedKinds[gvk.Kind] = t - } -} - -// AddKnownTypes registers all types passed in 'types' as being members of version 'version'. -// All objects passed to types should be pointers to structs. The name that go reports for -// the struct becomes the "kind" field when encoding. Version may not be empty - use the -// APIVersionInternal constant if you have a type that does not have a formal version. -func (s *Scheme) AddKnownTypes(gv unversioned.GroupVersion, types ...Object) { - if len(gv.Version) == 0 { - panic(fmt.Sprintf("version is required on all types: %s %v", gv, types[0])) - } - for _, obj := range types { - t := reflect.TypeOf(obj) - if t.Kind() != reflect.Ptr { - panic("All types must be pointers to structs.") - } - t = t.Elem() - if t.Kind() != reflect.Struct { - panic("All types must be pointers to structs.") - } - - gvk := gv.WithKind(t.Name()) - s.gvkToType[gvk] = t - s.typeToGVK[t] = append(s.typeToGVK[t], gvk) - } -} - -// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should -// be encoded as. Useful for testing when you don't want to make multiple packages to define -// your structs. Version may not be empty - use the APIVersionInternal constant if you have a -// type that does not have a formal version. -func (s *Scheme) AddKnownTypeWithName(gvk unversioned.GroupVersionKind, obj Object) { - t := reflect.TypeOf(obj) - if len(gvk.Version) == 0 { - panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) - } - if t.Kind() != reflect.Ptr { - panic("All types must be pointers to structs.") - } - t = t.Elem() - if t.Kind() != reflect.Struct { - panic("All types must be pointers to structs.") - } - - s.gvkToType[gvk] = t - s.typeToGVK[t] = append(s.typeToGVK[t], gvk) -} - -// KnownTypes returns the types known for the given version. -func (s *Scheme) KnownTypes(gv unversioned.GroupVersion) map[string]reflect.Type { - types := make(map[string]reflect.Type) - for gvk, t := range s.gvkToType { - if gv != gvk.GroupVersion() { - continue - } - - types[gvk.Kind] = t - } - return types -} - -// ObjectKind returns the group,version,kind of the go object and true if this object -// is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (unversioned.GroupVersionKind, bool, error) { - gvks, unversionedType, err := s.ObjectKinds(obj) - if err != nil { - return unversioned.GroupVersionKind{}, false, err - } - return gvks[0], unversionedType, nil -} - -// ObjectKinds returns all possible group,version,kind of the go object, true if the -// object is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKinds(obj Object) ([]unversioned.GroupVersionKind, bool, error) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, false, err - } - t := v.Type() - - gvks, ok := s.typeToGVK[t] - if !ok { - return nil, false, ¬RegisteredErr{t: t} - } - _, unversionedType := s.unversionedTypes[t] - - return gvks, unversionedType, nil -} - -// Recognizes returns true if the scheme is able to handle the provided group,version,kind -// of an object. -func (s *Scheme) Recognizes(gvk unversioned.GroupVersionKind) bool { - _, exists := s.gvkToType[gvk] - return exists -} - -func (s *Scheme) IsUnversioned(obj Object) (bool, bool) { - v, err := conversion.EnforcePtr(obj) - if err != nil { - return false, false - } - t := v.Type() - - if _, ok := s.typeToGVK[t]; !ok { - return false, false - } - _, ok := s.unversionedTypes[t] - return ok, true -} - -// New returns a new API object of the given version and name, or an error if it hasn't -// been registered. The version and kind fields must be specified. -func (s *Scheme) New(kind unversioned.GroupVersionKind) (Object, error) { - if t, exists := s.gvkToType[kind]; exists { - return reflect.New(t).Interface().(Object), nil - } - - if t, exists := s.unversionedKinds[kind.Kind]; exists { - return reflect.New(t).Interface().(Object), nil - } - return nil, ¬RegisteredErr{gvk: kind} -} - -// AddGenericConversionFunc adds a function that accepts the ConversionFunc call pattern -// (for two conversion types) to the converter. These functions are checked first during -// a normal conversion, but are otherwise not called. Use AddConversionFuncs when registering -// typed conversions. -func (s *Scheme) AddGenericConversionFunc(fn conversion.GenericConversionFunc) { - s.converter.AddGenericConversionFunc(fn) -} - -// Log sets a logger on the scheme. For test purposes only -func (s *Scheme) Log(l conversion.DebugLogger) { - s.converter.Debug = l -} - -// AddIgnoredConversionType identifies a pair of types that should be skipped by -// conversion (because the data inside them is explicitly dropped during -// conversion). -func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { - return s.converter.RegisterIgnoredConversion(from, to) -} - -// AddConversionFuncs adds functions to the list of conversion functions. The given -// functions should know how to convert between two of your API objects, or their -// sub-objects. We deduce how to call these functions from the types of their two -// parameters; see the comment for Converter.Register. -// -// Note that, if you need to copy sub-objects that didn't change, you can use the -// conversion.Scope object that will be passed to your conversion function. -// Additionally, all conversions started by Scheme will set the SrcVersion and -// DestVersion fields on the Meta object. Example: -// -// s.AddConversionFuncs( -// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error { -// // You can depend on Meta() being non-nil, and this being set to -// // the source version, e.g., "" -// s.Meta().SrcVersion -// // You can depend on this being set to the destination version, -// // e.g., "v1". -// s.Meta().DestVersion -// // Call scope.Convert to copy sub-fields. -// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0) -// return nil -// }, -// ) -// -// (For more detail about conversion functions, see Converter.Register's comment.) -// -// Also note that the default behavior, if you don't add a conversion function, is to -// sanely copy fields that have the same names and same type names. It's OK if the -// destination type has extra fields, but it must not remove any. So you only need to -// add conversion functions for things with changed/removed fields. -func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { - for _, f := range conversionFuncs { - if err := s.converter.RegisterConversionFunc(f); err != nil { - return err - } - } - return nil -} - -// Similar to AddConversionFuncs, but registers conversion functions that were -// automatically generated. -func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { - for _, f := range conversionFuncs { - if err := s.converter.RegisterGeneratedConversionFunc(f); err != nil { - return err - } - } - return nil -} - -// AddDeepCopyFuncs adds a function to the list of deep-copy functions. -// For the expected format of deep-copy function, see the comment for -// Copier.RegisterDeepCopyFunction. -func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { - for _, f := range deepCopyFuncs { - if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { - return err - } - } - return nil -} - -// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were -// automatically generated. -func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...interface{}) error { - for _, f := range deepCopyFuncs { - if err := s.cloner.RegisterGeneratedDeepCopyFunc(f); err != nil { - return err - } - } - return nil -} - -// AddFieldLabelConversionFunc adds a conversion function to convert field selectors -// of the given kind from the given version to internal version representation. -func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { - if s.fieldLabelConversionFuncs[version] == nil { - s.fieldLabelConversionFuncs[version] = map[string]FieldLabelConversionFunc{} - } - - s.fieldLabelConversionFuncs[version][kind] = conversionFunc - return nil -} - -// AddStructFieldConversion allows you to specify a mechanical copy for a moved -// or renamed struct field without writing an entire conversion function. See -// the comment in conversion.Converter.SetStructFieldCopy for parameter details. -// Call as many times as needed, even on the same fields. -func (s *Scheme) AddStructFieldConversion(srcFieldType interface{}, srcFieldName string, destFieldType interface{}, destFieldName string) error { - return s.converter.SetStructFieldCopy(srcFieldType, srcFieldName, destFieldType, destFieldName) -} - -// RegisterInputDefaults sets the provided field mapping function and field matching -// as the defaults for the provided input type. The fn may be nil, in which case no -// mapping will happen by default. Use this method to register a mechanism for handling -// a specific input type in conversion, such as a map[string]string to structs. -func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { - return s.converter.RegisterInputDefaults(in, fn, defaultFlags) -} - -// AddDefaultingFuncs adds functions to the list of default-value functions. -// Each of the given functions is responsible for applying default values -// when converting an instance of a versioned API object into an internal -// API object. These functions do not need to handle sub-objects. We deduce -// how to call these functions from the types of their two parameters. -// -// s.AddDefaultingFuncs( -// func(obj *v1.Pod) { -// if obj.OptionalField == "" { -// obj.OptionalField = "DefaultValue" -// } -// }, -// ) -func (s *Scheme) AddDefaultingFuncs(defaultingFuncs ...interface{}) error { - for _, f := range defaultingFuncs { - err := s.converter.RegisterDefaultingFunc(f) - if err != nil { - return err - } - } - return nil -} - -// Copy does a deep copy of an API object. -func (s *Scheme) Copy(src Object) (Object, error) { - dst, err := s.DeepCopy(src) - if err != nil { - return nil, err - } - return dst.(Object), nil -} - -// Performs a deep copy of the given object. -func (s *Scheme) DeepCopy(src interface{}) (interface{}, error) { - return s.cloner.DeepCopy(src) -} - -// Convert will attempt to convert in into out. Both must be pointers. For easy -// testing of conversion functions. Returns an error if the conversion isn't -// possible. You can call this with types that haven't been registered (for example, -// a to test conversion of types that are nested within registered types), but in -// that case, the conversion.Scope object passed to your conversion functions won't -// have SrcVersion or DestVersion fields set correctly in Meta(). -func (s *Scheme) Convert(in, out interface{}) error { - inVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"} - outVersion := unversioned.GroupVersion{Group: "unknown", Version: "unknown"} - if inObj, ok := in.(Object); ok { - if gvks, _, err := s.ObjectKinds(inObj); err == nil { - inVersion = gvks[0].GroupVersion() - } - } - if outObj, ok := out.(Object); ok { - if gvks, _, err := s.ObjectKinds(outObj); err == nil { - outVersion = gvks[0].GroupVersion() - } - } - flags, meta := s.generateConvertMeta(inVersion, outVersion, in) - if flags == 0 { - flags = conversion.AllowDifferentFieldTypeNames - } - return s.converter.Convert(in, out, flags, meta) -} - -// Converts the given field label and value for an kind field selector from -// versioned representation to an unversioned one. -func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - if s.fieldLabelConversionFuncs[version] == nil { - return "", "", fmt.Errorf("No field label conversion function found for version: %s", version) - } - conversionFunc, ok := s.fieldLabelConversionFuncs[version][kind] - if !ok { - return "", "", fmt.Errorf("No field label conversion function found for version %s and kind %s", version, kind) - } - return conversionFunc(label, value) -} - -// ConvertToVersion attempts to convert an input object to its matching Kind in another -// version within this scheme. Will return an error if the provided version does not -// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also -// return an error if the conversion does not result in a valid Object being -// returned. The serializer handles loading/serializing nested objects. -func (s *Scheme) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { - switch in.(type) { - case *Unknown, *Unstructured, *UnstructuredList: - old := in.GetObjectKind().GroupVersionKind() - defer in.GetObjectKind().SetGroupVersionKind(old) - setTargetVersion(in, s, outVersion) - return in, nil - } - t := reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { - return nil, fmt.Errorf("only pointer types may be converted: %v", t) - } - - t = t.Elem() - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) - } - - var kind unversioned.GroupVersionKind - if unversionedKind, ok := s.unversionedTypes[t]; ok { - kind = unversionedKind - } else { - kinds, ok := s.typeToGVK[t] - if !ok || len(kinds) == 0 { - return nil, fmt.Errorf("%v is not a registered type and cannot be converted into version %q", t, outVersion) - } - kind = kinds[0] - } - - outKind := outVersion.WithKind(kind.Kind) - - inKinds, _, err := s.ObjectKinds(in) - if err != nil { - return nil, err - } - - out, err := s.New(outKind) - if err != nil { - return nil, err - } - - flags, meta := s.generateConvertMeta(inKinds[0].GroupVersion(), outVersion, in) - if err := s.converter.Convert(in, out, flags, meta); err != nil { - return nil, err - } - - setTargetVersion(out, s, outVersion) - return out, nil -} - -// UnsafeConvertToVersion will convert in to the provided outVersion if such a conversion is possible, -// but does not guarantee the output object does not share fields with the input object. It attempts to be as -// efficient as possible when doing conversion. -func (s *Scheme) UnsafeConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { - switch t := in.(type) { - case *Unknown: - t.APIVersion = outVersion.String() - return t, nil - case *Unstructured: - t.SetAPIVersion(outVersion.String()) - return t, nil - case *UnstructuredList: - t.SetAPIVersion(outVersion.String()) - return t, nil - } - - // determine the incoming kinds with as few allocations as possible. - t := reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { - return nil, fmt.Errorf("only pointer types may be converted: %v", t) - } - t = t.Elem() - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) - } - kinds, ok := s.typeToGVK[t] - if !ok || len(kinds) == 0 { - return nil, fmt.Errorf("%v is not a registered type and cannot be converted into version %q", t, outVersion) - } - - // if the Go type is also registered to the destination kind, no conversion is necessary - for i := range kinds { - if kinds[i].Version == outVersion.Version && kinds[i].Group == outVersion.Group { - setTargetKind(in, kinds[i]) - return in, nil - } - } - - // type is unversioned, no conversion necessary - // it should be possible to avoid this allocation - if unversionedKind, ok := s.unversionedTypes[t]; ok { - kind := unversionedKind - outKind := outVersion.WithKind(kind.Kind) - setTargetKind(in, outKind) - return in, nil - } - - // allocate a new object as the target using the target kind - // TODO: this should look in the target group version and find the first kind that matches, rather than the - // first kind registered in typeToGVK - kind := kinds[0] - kind.Version = outVersion.Version - kind.Group = outVersion.Group - out, err := s.New(kind) - if err != nil { - return nil, err - } - - // TODO: try to avoid the allocations here - in fast paths we are not likely to need these flags or meta - flags, meta := s.converter.DefaultMeta(t) - if err := s.converter.Convert(in, out, flags, meta); err != nil { - return nil, err - } - - setTargetKind(out, kind) - return out, nil -} - -// generateConvertMeta constructs the meta value we pass to Convert. -func (s *Scheme) generateConvertMeta(srcGroupVersion, destGroupVersion unversioned.GroupVersion, in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { - return s.converter.DefaultMeta(reflect.TypeOf(in)) -} - -// setTargetVersion is deprecated and should be replaced by use of setTargetKind -func setTargetVersion(obj Object, raw *Scheme, gv unversioned.GroupVersion) { - if gv.Version == APIVersionInternal { - // internal is a special case - obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{}) - return - } - if gvks, _, _ := raw.ObjectKinds(obj); len(gvks) > 0 { - obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: gvks[0].Kind}) - } else { - obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version}) - } -} - -// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. -func setTargetKind(obj Object, kind unversioned.GroupVersionKind) { - if kind.Version == APIVersionInternal { - // internal is a special case - // TODO: look at removing the need to special case this - obj.GetObjectKind().SetGroupVersionKind(unversioned.GroupVersionKind{}) - return - } - obj.GetObjectKind().SetGroupVersionKind(kind) -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go deleted file mode 100644 index 4432e4896..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/codec_factory.go +++ /dev/null @@ -1,364 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "io" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/json" - "k8s.io/kubernetes/pkg/runtime/serializer/recognizer" - "k8s.io/kubernetes/pkg/runtime/serializer/versioning" -) - -// serializerExtensions are for serializers that are conditionally compiled in -var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} - -type serializerType struct { - AcceptContentTypes []string - ContentType string - FileExtensions []string - // EncodesAsText should be true if this content type can be represented safely in UTF-8 - EncodesAsText bool - - Serializer runtime.Serializer - PrettySerializer runtime.Serializer - // RawSerializer serializes an object without adding a type wrapper. Some serializers, like JSON - // automatically include identifying type information with the JSON. Others, like Protobuf, need - // a wrapper object that includes type information. This serializer should be set if the serializer - // can serialize / deserialize objects without type info. Note that this serializer will always - // be expected to pass into or a gvk to Decode, since no type information will be available on - // the object itself. - RawSerializer runtime.Serializer - // Specialize gives the type the opportunity to return a different serializer implementation if - // the content type contains alternate operations. Here it is used to implement "pretty" as an - // option to application/json, but could also be used to allow serializers to perform type - // defaulting or alter output. - Specialize func(map[string]string) (runtime.Serializer, bool) - - AcceptStreamContentTypes []string - StreamContentType string - - Framer runtime.Framer - StreamSerializer runtime.Serializer - StreamSpecialize func(map[string]string) (runtime.Serializer, bool) -} - -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { - jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) - jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) - yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) - - serializers := []serializerType{ - { - AcceptContentTypes: []string{"application/json"}, - ContentType: "application/json", - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - PrettySerializer: jsonPrettySerializer, - - AcceptStreamContentTypes: []string{"application/json", "application/json;stream=watch"}, - StreamContentType: "application/json", - Framer: json.Framer, - StreamSerializer: jsonSerializer, - }, - { - AcceptContentTypes: []string{"application/yaml"}, - ContentType: "application/yaml", - FileExtensions: []string{"yaml"}, - EncodesAsText: true, - Serializer: yamlSerializer, - - // TODO: requires runtime.RawExtension to properly distinguish when the nested content is - // yaml, because the yaml encoder invokes MarshalJSON first - //AcceptStreamContentTypes: []string{"application/yaml", "application/yaml;stream=watch"}, - //StreamContentType: "application/yaml;stream=watch", - //Framer: json.YAMLFramer, - //StreamSerializer: yamlSerializer, - }, - } - - for _, fn := range serializerExtensions { - if serializer, ok := fn(scheme); ok { - serializers = append(serializers, serializer) - } - } - return serializers -} - -// CodecFactory provides methods for retrieving codecs and serializers for specific -// versions and content types. -type CodecFactory struct { - scheme *runtime.Scheme - serializers []serializerType - universal runtime.Decoder - accepts []string - streamingAccepts []string - - legacySerializer runtime.Serializer -} - -// NewCodecFactory provides methods for retrieving serializers for the supported wire formats -// and conversion wrappers to define preferred internal and external versions. In the future, -// as the internal version is used less, callers may instead use a defaulting serializer and -// only convert objects which are shared internally (Status, common API machinery). -// TODO: allow other codecs to be compiled in? -// TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { - serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) - return newCodecFactory(scheme, serializers) -} - -// newCodecFactory is a helper for testing that allows a different metafactory to be specified. -func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { - decoders := make([]runtime.Decoder, 0, len(serializers)) - accepts := []string{} - alreadyAccepted := make(map[string]struct{}) - - var legacySerializer runtime.Serializer - for _, d := range serializers { - decoders = append(decoders, d.Serializer) - for _, mediaType := range d.AcceptContentTypes { - if _, ok := alreadyAccepted[mediaType]; ok { - continue - } - alreadyAccepted[mediaType] = struct{}{} - accepts = append(accepts, mediaType) - if mediaType == "application/json" { - legacySerializer = d.Serializer - } - } - } - if legacySerializer == nil { - legacySerializer = serializers[0].Serializer - } - - streamAccepts := []string{} - alreadyAccepted = make(map[string]struct{}) - for _, d := range serializers { - if len(d.StreamContentType) == 0 { - continue - } - for _, mediaType := range d.AcceptStreamContentTypes { - if _, ok := alreadyAccepted[mediaType]; ok { - continue - } - alreadyAccepted[mediaType] = struct{}{} - streamAccepts = append(streamAccepts, mediaType) - } - } - - return CodecFactory{ - scheme: scheme, - serializers: serializers, - universal: recognizer.NewDecoder(decoders...), - - accepts: accepts, - streamingAccepts: streamAccepts, - - legacySerializer: legacySerializer, - } -} - -var _ runtime.NegotiatedSerializer = &CodecFactory{} - -// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. -func (f CodecFactory) SupportedMediaTypes() []string { - return f.accepts -} - -// SupportedStreamingMediaTypes returns the RFC2046 media types that this factory has stream serializers for. -func (f CodecFactory) SupportedStreamingMediaTypes() []string { - return f.streamingAccepts -} - -// LegacyCodec encodes output to a given API version, and decodes output into the internal form from -// any recognized source. The returned codec will always encode output to JSON. -// -// This method is deprecated - clients and servers should negotiate a serializer by mime-type and -// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). -func (f CodecFactory) LegacyCodec(version ...unversioned.GroupVersion) runtime.Codec { - return versioning.NewCodecForScheme(f.scheme, f.legacySerializer, f.universal, version, nil) -} - -// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies -// runtime.Object. It does not perform conversion. It does not perform defaulting. -func (f CodecFactory) UniversalDeserializer() runtime.Decoder { - return f.universal -} - -// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used -// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes -// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate -// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified, -// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs -// defaulting. -// -// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form -func (f CodecFactory) UniversalDecoder(versions ...unversioned.GroupVersion) runtime.Decoder { - return f.CodecForVersions(nil, f.universal, nil, versions) -} - -// CodecFor creates a codec with the provided serializer. If an object is decoded and its group is not in the list, -// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not -// converted. If encode or decode are nil, no conversion is performed. -func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode []unversioned.GroupVersion, decode []unversioned.GroupVersion) runtime.Codec { - return versioning.NewCodecForScheme(f.scheme, encoder, decoder, encode, decode) -} - -// DecoderToVersion returns a decoder that targets the provided group version. -func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder { - return f.CodecForVersions(nil, decoder, nil, []unversioned.GroupVersion{gv}) -} - -// EncoderForVersion returns an encoder that targets the provided group version. -func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder { - return f.CodecForVersions(encoder, nil, []unversioned.GroupVersion{gv}, nil) -} - -// SerializerForMediaType returns a serializer that matches the provided RFC2046 mediaType, or false if no such -// serializer exists -func (f CodecFactory) SerializerForMediaType(mediaType string, params map[string]string) (runtime.SerializerInfo, bool) { - for _, s := range f.serializers { - for _, accepted := range s.AcceptContentTypes { - if accepted == mediaType { - // specialization abstracts variants to the content type - if s.Specialize != nil && len(params) > 0 { - serializer, ok := s.Specialize(params) - // TODO: return formatted mediaType+params - return runtime.SerializerInfo{Serializer: serializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, ok - } - - // legacy support for ?pretty=1 continues, but this is more formally defined - if v, ok := params["pretty"]; ok && v == "1" && s.PrettySerializer != nil { - return runtime.SerializerInfo{Serializer: s.PrettySerializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, true - } - - // return the base variant - return runtime.SerializerInfo{Serializer: s.Serializer, MediaType: s.ContentType, EncodesAsText: s.EncodesAsText}, true - } - } - } - return runtime.SerializerInfo{}, false -} - -// StreamingSerializerForMediaType returns a serializer that matches the provided RFC2046 mediaType, or false if no such -// serializer exists -func (f CodecFactory) StreamingSerializerForMediaType(mediaType string, params map[string]string) (runtime.StreamSerializerInfo, bool) { - for _, s := range f.serializers { - for _, accepted := range s.AcceptStreamContentTypes { - if accepted == mediaType { - // TODO: accept params - nested, ok := f.SerializerForMediaType(s.ContentType, nil) - if !ok { - panic("no serializer defined for internal content type") - } - - if s.StreamSpecialize != nil && len(params) > 0 { - serializer, ok := s.StreamSpecialize(params) - // TODO: return formatted mediaType+params - return runtime.StreamSerializerInfo{ - SerializerInfo: runtime.SerializerInfo{ - Serializer: serializer, - MediaType: s.StreamContentType, - EncodesAsText: s.EncodesAsText, - }, - Framer: s.Framer, - Embedded: nested, - }, ok - } - - return runtime.StreamSerializerInfo{ - SerializerInfo: runtime.SerializerInfo{ - Serializer: s.StreamSerializer, - MediaType: s.StreamContentType, - EncodesAsText: s.EncodesAsText, - }, - Framer: s.Framer, - Embedded: nested, - }, true - } - } - } - return runtime.StreamSerializerInfo{}, false -} - -// SerializerForFileExtension returns a serializer for the provided extension, or false if no serializer matches. -func (f CodecFactory) SerializerForFileExtension(extension string) (runtime.Serializer, bool) { - for _, s := range f.serializers { - for _, ext := range s.FileExtensions { - if extension == ext { - return s.Serializer, true - } - } - } - return nil, false -} - -// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. -type DirectCodecFactory struct { - CodecFactory -} - -// EncoderForVersion returns an encoder that does not do conversion. gv is ignored. -func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, gv unversioned.GroupVersion) runtime.Encoder { - return DirectCodec{ - runtime.NewCodec(serializer, nil), - f.CodecFactory.scheme, - } -} - -// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. -func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, gv unversioned.GroupVersion) runtime.Decoder { - return DirectCodec{ - runtime.NewCodec(nil, serializer), - nil, - } -} - -// DirectCodec is a codec that does not do conversion. It sets the gvk during serialization, and removes the gvk during deserialization. -type DirectCodec struct { - runtime.Serializer - runtime.ObjectTyper -} - -// EncodeToStream does not do conversion. It sets the gvk during serialization. overrides are ignored. -func (c DirectCodec) Encode(obj runtime.Object, stream io.Writer) error { - gvks, _, err := c.ObjectTyper.ObjectKinds(obj) - if err != nil { - return err - } - kind := obj.GetObjectKind() - oldGVK := kind.GroupVersionKind() - kind.SetGroupVersionKind(gvks[0]) - err = c.Serializer.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err -} - -// Decode does not do conversion. It removes the gvk during deserialization. -func (c DirectCodec) Decode(data []byte, defaults *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - obj, gvk, err := c.Serializer.Decode(data, defaults, into) - if obj != nil { - kind := obj.GetObjectKind() - // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(unversioned.GroupVersionKind{}) - } - return obj, gvk, err -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go deleted file mode 100644 index c226448f3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/json.go +++ /dev/null @@ -1,243 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "encoding/json" - "io" - - "github.com/ghodss/yaml" - "github.com/ugorji/go/codec" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/framer" - utilyaml "k8s.io/kubernetes/pkg/util/yaml" -) - -// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer -// is not nil, the object has the group, version, and kind fields set. -func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: false, - pretty: pretty, - } -} - -// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer -// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that -// matches JSON, and will error if constructs are used that do not serialize to JSON. -func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - yaml: true, - } -} - -type Serializer struct { - meta MetaFactory - creater runtime.ObjectCreater - typer runtime.ObjectTyper - yaml bool - pretty bool -} - -// Serializer implements Serializer -var _ runtime.Serializer = &Serializer{} - -// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then -// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be -// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using -// normal JSON/YAML unmarshalling. If into is provided and the original data is not fully qualified with kind/version/group, the type of -// the into will be used to alter the returned gvk. On success or most errors, the method will return the calculated schema kind. -func (s *Serializer) Decode(originalData []byte, gvk *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - if versioned, ok := into.(*runtime.VersionedObjects); ok { - into = versioned.Last() - obj, actual, err := s.Decode(originalData, gvk, into) - if err != nil { - return nil, actual, err - } - versioned.Objects = []runtime.Object{obj} - return versioned, actual, nil - } - - data := originalData - if s.yaml { - altered, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, nil, err - } - data = altered - } - - actual, err := s.meta.Interpret(data) - if err != nil { - return nil, nil, err - } - - if gvk != nil { - // apply kind and version defaulting from provided default - if len(actual.Kind) == 0 { - actual.Kind = gvk.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = gvk.Group - actual.Version = gvk.Version - } - if len(actual.Version) == 0 && actual.Group == gvk.Group { - actual.Version = gvk.Version - } - } - - if unk, ok := into.(*runtime.Unknown); ok && unk != nil { - unk.Raw = originalData - unk.ContentType = runtime.ContentTypeJSON - unk.GetObjectKind().SetGroupVersionKind(*actual) - return unk, actual, nil - } - - if into != nil { - types, _, err := s.typer.ObjectKinds(into) - switch { - case runtime.IsNotRegisteredError(err): - if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { - return nil, actual, err - } - return into, actual, nil - case err != nil: - return nil, actual, err - default: - typed := types[0] - if len(actual.Kind) == 0 { - actual.Kind = typed.Kind - } - if len(actual.Version) == 0 && len(actual.Group) == 0 { - actual.Group = typed.Group - actual.Version = typed.Version - } - if len(actual.Version) == 0 && actual.Group == typed.Group { - actual.Version = typed.Version - } - } - } - - if len(actual.Kind) == 0 { - return nil, actual, runtime.NewMissingKindErr(string(originalData)) - } - if len(actual.Version) == 0 { - return nil, actual, runtime.NewMissingVersionErr(string(originalData)) - } - - // use the target if necessary - obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) - if err != nil { - return nil, actual, err - } - - if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { - return nil, actual, err - } - return obj, actual, nil -} - -// Encode serializes the provided object to the given writer. -func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if s.yaml { - json, err := json.Marshal(obj) - if err != nil { - return err - } - data, err := yaml.JSONToYAML(json) - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - - if s.pretty { - data, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - _, err = w.Write(data) - return err - } - encoder := json.NewEncoder(w) - return encoder.Encode(obj) -} - -// RecognizesData implements the RecognizingDecoder interface. -func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.yaml { - // we could potentially look for '---' - return false, true, nil - } - _, ok = utilyaml.GuessJSONStream(peek, 2048) - return ok, false, nil -} - -// Framer is the default JSON framing behavior, with newlines delimiting individual objects. -var Framer = jsonFramer{} - -type jsonFramer struct{} - -// NewFrameWriter implements stream framing for this serializer -func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { - // we can write JSON objects directly to the writer, because they are self-framing - return w -} - -// NewFrameReader implements stream framing for this serializer -func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { - // we need to extract the JSON chunks of data to pass to Decode() - return framer.NewJSONFramedReader(r) -} - -// Framer is the default JSON framing behavior, with newlines delimiting individual objects. -var YAMLFramer = yamlFramer{} - -type yamlFramer struct{} - -// NewFrameWriter implements stream framing for this serializer -func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { - return yamlFrameWriter{w} -} - -// NewFrameReader implements stream framing for this serializer -func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { - // extract the YAML document chunks directly - return utilyaml.NewDocumentDecoder(r) -} - -type yamlFrameWriter struct { - w io.Writer -} - -// Write separates each document with the YAML document separator (`---` followed by line -// break). Writers must write well formed YAML documents (include a final line break). -func (w yamlFrameWriter) Write(data []byte) (n int, err error) { - if _, err := w.w.Write([]byte("---\n")); err != nil { - return 0, err - } - return w.w.Write(data) -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go deleted file mode 100644 index 91df105ed..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/json/meta.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "encoding/json" - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// MetaFactory is used to store and retrieve the version and kind -// information for JSON objects in a serializer. -type MetaFactory interface { - // Interpret should return the version and kind of the wire-format of - // the object. - Interpret(data []byte) (*unversioned.GroupVersionKind, error) -} - -// DefaultMetaFactory is a default factory for versioning objects in JSON. The object -// in memory and in the default JSON serialization will use the "kind" and "apiVersion" -// fields. -var DefaultMetaFactory = SimpleMetaFactory{} - -// SimpleMetaFactory provides default methods for retrieving the type and version of objects -// that are identified with an "apiVersion" and "kind" fields in their JSON -// serialization. It may be parameterized with the names of the fields in memory, or an -// optional list of base structs to search for those fields in memory. -type SimpleMetaFactory struct { -} - -// Interpret will return the APIVersion and Kind of the JSON wire-format -// encoding of an object, or an error. -func (SimpleMetaFactory) Interpret(data []byte) (*unversioned.GroupVersionKind, error) { - findKind := struct { - APIVersion string `json:"apiVersion,omitempty"` - Kind string `json:"kind,omitempty"` - }{} - if err := json.Unmarshal(data, &findKind); err != nil { - return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err) - } - gv, err := unversioned.ParseGroupVersion(findKind.APIVersion) - if err != nil { - return nil, err - } - return &unversioned.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go deleted file mode 100644 index 6f6a56dd3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/negotiated_codec.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// TODO: We should figure out what happens when someone asks -// encoder for version and it conflicts with the raw serializer. -type negotiatedSerializerWrapper struct { - info runtime.SerializerInfo - streamInfo runtime.StreamSerializerInfo -} - -func NegotiatedSerializerWrapper(info runtime.SerializerInfo, streamInfo runtime.StreamSerializerInfo) runtime.NegotiatedSerializer { - return &negotiatedSerializerWrapper{info, streamInfo} -} - -func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []string { - return []string{} -} - -func (n *negotiatedSerializerWrapper) SerializerForMediaType(mediaType string, options map[string]string) (runtime.SerializerInfo, bool) { - return n.info, true -} - -func (n *negotiatedSerializerWrapper) SupportedStreamingMediaTypes() []string { - return []string{} -} - -func (n *negotiatedSerializerWrapper) StreamingSerializerForMediaType(mediaType string, options map[string]string) (runtime.StreamSerializerInfo, bool) { - return n.streamInfo, true -} - -func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ unversioned.GroupVersion) runtime.Encoder { - return e -} - -func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv unversioned.GroupVersion) runtime.Decoder { - return d -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go deleted file mode 100644 index 91b86af6c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package protobuf provides a Kubernetes serializer for the protobuf format. -package protobuf diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go deleted file mode 100644 index a93708c45..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/protobuf_extension.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serializer - -import ( - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/protobuf" -) - -const ( - // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from - // depending on it unintentionally. - // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the - // CodecFactory on initialization. - contentTypeProtobuf = "application/vnd.kubernetes.protobuf" - contentTypeProtobufWatch = contentTypeProtobuf + ";stream=watch" -) - -func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { - serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) - raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) - return serializerType{ - AcceptContentTypes: []string{contentTypeProtobuf}, - ContentType: contentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: serializer, - RawSerializer: raw, - - AcceptStreamContentTypes: []string{contentTypeProtobuf, contentTypeProtobufWatch}, - StreamContentType: contentTypeProtobufWatch, - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: raw, - }, true -} - -func init() { - serializerExtensions = append(serializerExtensions, protobufSerializer) -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go deleted file mode 100644 index f69d126c8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/serializer/versioning/versioning.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package versioning - -import ( - "fmt" - "io" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/runtime" -) - -// EnableCrossGroupDecoding modifies the given decoder in place, if it is a codec -// from this package. It allows objects from one group to be auto-decoded into -// another group. 'destGroup' must already exist in the codec. -// TODO: this is an encapsulation violation and should be refactored -func EnableCrossGroupDecoding(d runtime.Decoder, sourceGroup, destGroup string) error { - internal, ok := d.(*codec) - if !ok { - return fmt.Errorf("unsupported decoder type") - } - - dest, ok := internal.decodeVersion[destGroup] - if !ok { - return fmt.Errorf("group %q is not a possible destination group in the given codec", destGroup) - } - internal.decodeVersion[sourceGroup] = dest - - return nil -} - -// EnableCrossGroupEncoding modifies the given encoder in place, if it is a codec -// from this package. It allows objects from one group to be auto-decoded into -// another group. 'destGroup' must already exist in the codec. -// TODO: this is an encapsulation violation and should be refactored -func EnableCrossGroupEncoding(e runtime.Encoder, sourceGroup, destGroup string) error { - internal, ok := e.(*codec) - if !ok { - return fmt.Errorf("unsupported encoder type") - } - - dest, ok := internal.encodeVersion[destGroup] - if !ok { - return fmt.Errorf("group %q is not a possible destination group in the given codec", destGroup) - } - internal.encodeVersion[sourceGroup] = dest - - return nil -} - -// NewCodecForScheme is a convenience method for callers that are using a scheme. -func NewCodecForScheme( - // TODO: I should be a scheme interface? - scheme *runtime.Scheme, - encoder runtime.Encoder, - decoder runtime.Decoder, - encodeVersion []unversioned.GroupVersion, - decodeVersion []unversioned.GroupVersion, -) runtime.Codec { - return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion) -} - -// NewCodec takes objects in their internal versions and converts them to external versions before -// serializing them. It assumes the serializer provided to it only deals with external versions. -// This class is also a serializer, but is generally used with a specific version. -func NewCodec( - encoder runtime.Encoder, - decoder runtime.Decoder, - convertor runtime.ObjectConvertor, - creater runtime.ObjectCreater, - copier runtime.ObjectCopier, - typer runtime.ObjectTyper, - encodeVersion []unversioned.GroupVersion, - decodeVersion []unversioned.GroupVersion, -) runtime.Codec { - internal := &codec{ - encoder: encoder, - decoder: decoder, - convertor: convertor, - creater: creater, - copier: copier, - typer: typer, - } - if encodeVersion != nil { - internal.encodeVersion = make(map[string]unversioned.GroupVersion) - for _, v := range encodeVersion { - // first one for a group wins. This is consistent with best to worst order throughout the codebase - if _, ok := internal.encodeVersion[v.Group]; ok { - continue - } - internal.encodeVersion[v.Group] = v - } - if len(internal.encodeVersion) == 1 { - for _, v := range internal.encodeVersion { - internal.preferredEncodeVersion = []unversioned.GroupVersion{v} - } - } - } - if decodeVersion != nil { - internal.decodeVersion = make(map[string]unversioned.GroupVersion) - for _, v := range decodeVersion { - // first one for a group wins. This is consistent with best to worst order throughout the codebase - if _, ok := internal.decodeVersion[v.Group]; ok { - continue - } - internal.decodeVersion[v.Group] = v - } - } - - return internal -} - -type codec struct { - encoder runtime.Encoder - decoder runtime.Decoder - convertor runtime.ObjectConvertor - creater runtime.ObjectCreater - copier runtime.ObjectCopier - typer runtime.ObjectTyper - - encodeVersion map[string]unversioned.GroupVersion - decodeVersion map[string]unversioned.GroupVersion - - preferredEncodeVersion []unversioned.GroupVersion -} - -// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is -// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an -// into that matches the serialized version. -func (c *codec) Decode(data []byte, defaultGVK *unversioned.GroupVersionKind, into runtime.Object) (runtime.Object, *unversioned.GroupVersionKind, error) { - versioned, isVersioned := into.(*runtime.VersionedObjects) - if isVersioned { - into = versioned.Last() - } - - obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) - if err != nil { - return nil, gvk, err - } - - // if we specify a target, use generic conversion. - if into != nil { - if into == obj { - if isVersioned { - return versioned, gvk, nil - } - return into, gvk, nil - } - if err := c.convertor.Convert(obj, into); err != nil { - return nil, gvk, err - } - if isVersioned { - versioned.Objects = []runtime.Object{obj, into} - return versioned, gvk, nil - } - return into, gvk, nil - } - - // invoke a version conversion - group := gvk.Group - if defaultGVK != nil { - group = defaultGVK.Group - } - var targetGV unversioned.GroupVersion - if c.decodeVersion == nil { - // convert to internal by default - targetGV.Group = group - targetGV.Version = runtime.APIVersionInternal - } else { - gv, ok := c.decodeVersion[group] - if !ok { - // unknown objects are left in their original version - if isVersioned { - versioned.Objects = []runtime.Object{obj} - return versioned, gvk, nil - } - return obj, gvk, nil - } - targetGV = gv - } - - if gvk.GroupVersion() == targetGV { - if isVersioned { - versioned.Objects = []runtime.Object{obj} - return versioned, gvk, nil - } - return obj, gvk, nil - } - - if isVersioned { - // create a copy, because ConvertToVersion does not guarantee non-mutation of objects - copied, err := c.copier.Copy(obj) - if err != nil { - copied = obj - } - versioned.Objects = []runtime.Object{copied} - } - - // Convert if needed. - out, err := c.convertor.ConvertToVersion(obj, targetGV) - if err != nil { - return nil, gvk, err - } - if isVersioned { - versioned.Objects = append(versioned.Objects, out) - return versioned, gvk, nil - } - return out, gvk, nil -} - -// Encode ensures the provided object is output in the appropriate group and version, invoking -// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. -func (c *codec) Encode(obj runtime.Object, w io.Writer) error { - if _, ok := obj.(*runtime.Unknown); ok { - return c.encoder.Encode(obj, w) - } - gvks, isUnversioned, err := c.typer.ObjectKinds(obj) - if err != nil { - return err - } - gvk := gvks[0] - - if c.encodeVersion == nil || isUnversioned { - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() - objectKind.SetGroupVersionKind(gvk) - err = c.encoder.Encode(obj, w) - objectKind.SetGroupVersionKind(old) - return err - } - - targetGV, ok := c.encodeVersion[gvk.Group] - - // attempt a conversion to the sole encode version - if !ok && c.preferredEncodeVersion != nil { - ok = true - targetGV = c.preferredEncodeVersion[0] - } - - // if no fallback is available, error - if !ok { - return fmt.Errorf("the codec does not recognize group %q for kind %q and cannot encode it", gvk.Group, gvk.Kind) - } - - // Perform a conversion if necessary - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() - out, err := c.convertor.ConvertToVersion(obj, targetGV) - if err != nil { - if ok { - return err - } - } else { - obj = out - } - // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - err = c.encoder.Encode(obj, w) - // restore the old GVK, in case conversion returned the same object - objectKind.SetGroupVersionKind(old) - return err -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/types.go b/vendor/k8s.io/kubernetes/pkg/runtime/types.go deleted file mode 100644 index e646d2afa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/types.go +++ /dev/null @@ -1,514 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api/meta/metatypes" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/types" -) - -// Note that the types provided in this file are not versioned and are intended to be -// safe to use from within all versions of every API object. - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *unversioned.GroupVersionKind) { unversioned.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +gencopy=true -// +protobuf=true -type TypeMeta struct { - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` - Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` -} - -const ( - ContentTypeJSON string = "application/json" -) - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +gencopy=true -// +protobuf=true -type RawExtension struct { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw"` - // Object can hold a representation of this extension - useful for working with versioned - // structs. - Object Object `json:"-"` -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +gencopy=true -// +protobuf=true -type Unknown struct { - TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw"` - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - ContentType string `protobuf:"bytes,4,opt,name=contentType"` -} - -// Unstructured allows objects that do not have Golang structs registered to be manipulated -// generically. This can be used to deal with the API objects from a plug-in. Unstructured -// objects still have functioning TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -type Unstructured struct { - // Object is a JSON compatible map with string, float, int, []interface{}, or map[string]interface{} - // children. - Object map[string]interface{} -} - -func getNestedField(obj map[string]interface{}, fields ...string) interface{} { - var val interface{} = obj - for _, field := range fields { - if _, ok := val.(map[string]interface{}); !ok { - return nil - } - val = val.(map[string]interface{})[field] - } - return val -} - -func getNestedString(obj map[string]interface{}, fields ...string) string { - if str, ok := getNestedField(obj, fields...).(string); ok { - return str - } - return "" -} - -func getNestedSlice(obj map[string]interface{}, fields ...string) []string { - if m, ok := getNestedField(obj, fields...).([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } - } - return strSlice - } - return nil -} - -func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { - if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { - strMap := make(map[string]string, len(m)) - for k, v := range m { - if str, ok := v.(string); ok { - strMap[k] = str - } - } - return strMap - } - return nil -} - -func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { - m := obj - if len(fields) > 1 { - for _, field := range fields[0 : len(fields)-1] { - if _, ok := m[field].(map[string]interface{}); !ok { - m[field] = make(map[string]interface{}) - } - m = m[field].(map[string]interface{}) - } - } - m[fields[len(fields)-1]] = value -} - -func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { - m := make([]interface{}, 0, len(value)) - for _, v := range value { - m = append(m, v) - } - setNestedField(obj, m, fields...) -} - -func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { - m := make(map[string]interface{}, len(value)) - for k, v := range value { - m[k] = v - } - setNestedField(obj, m, fields...) -} - -func (u *Unstructured) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *Unstructured) setNestedSlice(value []string, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedSlice(u.Object, value, fields...) -} - -func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedMap(u.Object, value, fields...) -} - -func extractOwnerReference(src interface{}) metatypes.OwnerReference { - v := src.(map[string]interface{}) - controllerPtr, ok := (getNestedField(v, "controller")).(*bool) - if !ok { - controllerPtr = nil - } else { - if controllerPtr != nil { - controller := *controllerPtr - controllerPtr = &controller - } - } - return metatypes.OwnerReference{ - Kind: getNestedString(v, "kind"), - Name: getNestedString(v, "name"), - APIVersion: getNestedString(v, "apiVersion"), - UID: (types.UID)(getNestedString(v, "uid")), - Controller: controllerPtr, - } -} - -func setOwnerReference(src metatypes.OwnerReference) map[string]interface{} { - ret := make(map[string]interface{}) - controllerPtr := src.Controller - if controllerPtr != nil { - controller := *controllerPtr - controllerPtr = &controller - } - setNestedField(ret, src.Kind, "kind") - setNestedField(ret, src.Name, "name") - setNestedField(ret, src.APIVersion, "apiVersion") - setNestedField(ret, string(src.UID), "uid") - setNestedField(ret, controllerPtr, "controller") - return ret -} - -func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { - field := getNestedField(object, "metadata", "ownerReferences") - if field == nil { - return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) - } - ownerReferences, ok := field.([]map[string]interface{}) - if ok { - return ownerReferences, nil - } - // TODO: This is hacky... - interfaces, ok := field.([]interface{}) - if !ok { - return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) - } - ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) - for i := 0; i < len(interfaces); i++ { - r, ok := interfaces[i].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) - } - ownerReferences = append(ownerReferences, r) - } - return ownerReferences, nil -} - -func (u *Unstructured) GetOwnerReferences() []metatypes.OwnerReference { - original, err := getOwnerReferences(u.Object) - if err != nil { - glog.V(6).Info(err) - return nil - } - ret := make([]metatypes.OwnerReference, 0, len(original)) - for i := 0; i < len(original); i++ { - ret = append(ret, extractOwnerReference(original[i])) - } - return ret -} - -func (u *Unstructured) SetOwnerReferences(references []metatypes.OwnerReference) { - var newReferences = make([]map[string]interface{}, 0, len(references)) - for i := 0; i < len(references); i++ { - newReferences = append(newReferences, setOwnerReference(references[i])) - } - u.setNestedField(newReferences, "metadata", "ownerReferences") -} - -func (u *Unstructured) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *Unstructured) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *Unstructured) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *Unstructured) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *Unstructured) GetNamespace() string { - return getNestedString(u.Object, "metadata", "namespace") -} - -func (u *Unstructured) SetNamespace(namespace string) { - u.setNestedField(namespace, "metadata", "namespace") -} - -func (u *Unstructured) GetName() string { - return getNestedString(u.Object, "metadata", "name") -} - -func (u *Unstructured) SetName(name string) { - u.setNestedField(name, "metadata", "name") -} - -func (u *Unstructured) GetGenerateName() string { - return getNestedString(u.Object, "metadata", "generateName") -} - -func (u *Unstructured) SetGenerateName(name string) { - u.setNestedField(name, "metadata", "generateName") -} - -func (u *Unstructured) GetUID() types.UID { - return types.UID(getNestedString(u.Object, "metadata", "uid")) -} - -func (u *Unstructured) SetUID(uid types.UID) { - u.setNestedField(string(uid), "metadata", "uid") -} - -func (u *Unstructured) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *Unstructured) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *Unstructured) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *Unstructured) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *Unstructured) GetCreationTimestamp() unversioned.Time { - var timestamp unversioned.Time - timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) - return timestamp -} - -func (u *Unstructured) SetCreationTimestamp(timestamp unversioned.Time) { - ts, _ := timestamp.MarshalQueryParameter() - u.setNestedField(ts, "metadata", "creationTimestamp") -} - -func (u *Unstructured) GetDeletionTimestamp() *unversioned.Time { - var timestamp unversioned.Time - timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) - if timestamp.IsZero() { - return nil - } - return ×tamp -} - -func (u *Unstructured) SetDeletionTimestamp(timestamp *unversioned.Time) { - ts, _ := timestamp.MarshalQueryParameter() - u.setNestedField(ts, "metadata", "deletionTimestamp") -} - -func (u *Unstructured) GetLabels() map[string]string { - return getNestedMap(u.Object, "metadata", "labels") -} - -func (u *Unstructured) SetLabels(labels map[string]string) { - u.setNestedMap(labels, "metadata", "labels") -} - -func (u *Unstructured) GetAnnotations() map[string]string { - return getNestedMap(u.Object, "metadata", "annotations") -} - -func (u *Unstructured) SetAnnotations(annotations map[string]string) { - u.setNestedMap(annotations, "metadata", "annotations") -} - -func (u *Unstructured) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *Unstructured) GroupVersionKind() unversioned.GroupVersionKind { - gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return unversioned.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -func (u *Unstructured) GetFinalizers() []string { - return getNestedSlice(u.Object, "metadata", "finalizers") -} - -func (u *Unstructured) SetFinalizers(finalizers []string) { - u.setNestedSlice(finalizers, "metadata", "finalizers") -} - -// UnstructuredList allows lists that do not have Golang structs -// registered to be manipulated generically. This can be used to deal -// with the API lists from a plug-in. -type UnstructuredList struct { - Object map[string]interface{} - - // Items is a list of unstructured objects. - Items []*Unstructured `json:"items"` -} - -func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *UnstructuredList) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *UnstructuredList) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *UnstructuredList) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *UnstructuredList) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *UnstructuredList) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetGroupVersionKind(gvk unversioned.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *UnstructuredList) GroupVersionKind() unversioned.GroupVersionKind { - gv, err := unversioned.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return unversioned.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -// VersionedObjects is used by Decoders to give callers a way to access all versions -// of an object during the decoding process. -type VersionedObjects struct { - // Objects is the set of objects retrieved during decoding, in order of conversion. - // The 0 index is the object as serialized on the wire. If conversion has occurred, - // other objects may be present. The right most object is the same as would be returned - // by a normal Decode call. - Objects []Object -} diff --git a/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go b/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go deleted file mode 100644 index 4a15142c1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/runtime/unstructured.go +++ /dev/null @@ -1,199 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - gojson "encoding/json" - "errors" - "fmt" - "io" - "strings" - - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/util/json" -) - -// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured -// type, which can be used for generic access to objects without a predefined scheme. -// TODO: move into serializer/json. -var UnstructuredJSONScheme Codec = unstructuredJSONScheme{} - -type unstructuredJSONScheme struct{} - -func (s unstructuredJSONScheme) Decode(data []byte, _ *unversioned.GroupVersionKind, obj Object) (Object, *unversioned.GroupVersionKind, error) { - var err error - if obj != nil { - err = s.decodeInto(data, obj) - } else { - obj, err = s.decode(data) - } - - if err != nil { - return nil, nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, &gvk, NewMissingKindErr(string(data)) - } - - return obj, &gvk, nil -} - -func (unstructuredJSONScheme) Encode(obj Object, w io.Writer) error { - switch t := obj.(type) { - case *Unstructured: - return json.NewEncoder(w).Encode(t.Object) - case *UnstructuredList: - items := make([]map[string]interface{}, 0, len(t.Items)) - for _, i := range t.Items { - items = append(items, i.Object) - } - t.Object["items"] = items - defer func() { delete(t.Object, "items") }() - return json.NewEncoder(w).Encode(t.Object) - case *Unknown: - // TODO: Unstructured needs to deal with ContentType. - _, err := w.Write(t.Raw) - return err - default: - return json.NewEncoder(w).Encode(t) - } -} - -func (s unstructuredJSONScheme) decode(data []byte) (Object, error) { - type detector struct { - Items gojson.RawMessage - } - var det detector - if err := json.Unmarshal(data, &det); err != nil { - return nil, err - } - - if det.Items != nil { - list := &UnstructuredList{} - err := s.decodeToList(data, list) - return list, err - } - - // No Items field, so it wasn't a list. - unstruct := &Unstructured{} - err := s.decodeToUnstructured(data, unstruct) - return unstruct, err -} -func (s unstructuredJSONScheme) decodeInto(data []byte, obj Object) error { - switch x := obj.(type) { - case *Unstructured: - return s.decodeToUnstructured(data, x) - case *UnstructuredList: - return s.decodeToList(data, x) - case *VersionedObjects: - u := new(Unstructured) - err := s.decodeToUnstructured(data, u) - if err == nil { - x.Objects = []Object{u} - } - return err - default: - return json.Unmarshal(data, x) - } -} - -func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - - unstruct.Object = m - - return nil -} - -func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { - type decodeList struct { - Items []gojson.RawMessage - } - - var dList decodeList - if err := json.Unmarshal(data, &dList); err != nil { - return err - } - - if err := json.Unmarshal(data, &list.Object); err != nil { - return err - } - - // For typed lists, e.g., a PodList, API server doesn't set each item's - // APIVersion and Kind. We need to set it. - listAPIVersion := list.GetAPIVersion() - listKind := list.GetKind() - itemKind := strings.TrimSuffix(listKind, "List") - - delete(list.Object, "items") - list.Items = nil - for _, i := range dList.Items { - unstruct := &Unstructured{} - if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { - return err - } - // This is hacky. Set the item's Kind and APIVersion to those inferred - // from the List. - if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { - unstruct.SetKind(itemKind) - unstruct.SetAPIVersion(listAPIVersion) - } - list.Items = append(list.Items, unstruct) - } - return nil -} - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteeed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in Object, outVersion unversioned.GroupVersion) (Object, error) { - if gvk := in.GetObjectKind().GroupVersionKind(); gvk.GroupVersion() != outVersion { - return nil, errors.New("unstructured converter cannot convert versions") - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} diff --git a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go b/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go deleted file mode 100644 index 097b1a6c2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/security/podsecuritypolicy/util/util.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/apis/extensions" - "k8s.io/kubernetes/pkg/util/sets" -) - -const ( - ValidatedPSPAnnotation = "kubernetes.io/psp" -) - -func GetAllFSTypesExcept(exceptions ...string) sets.String { - fstypes := GetAllFSTypesAsSet() - for _, e := range exceptions { - fstypes.Delete(e) - } - return fstypes -} - -func GetAllFSTypesAsSet() sets.String { - fstypes := sets.NewString() - fstypes.Insert( - string(extensions.HostPath), - string(extensions.AzureFile), - string(extensions.Flocker), - string(extensions.FlexVolume), - string(extensions.EmptyDir), - string(extensions.GCEPersistentDisk), - string(extensions.AWSElasticBlockStore), - string(extensions.GitRepo), - string(extensions.Secret), - string(extensions.NFS), - string(extensions.ISCSI), - string(extensions.Glusterfs), - string(extensions.PersistentVolumeClaim), - string(extensions.RBD), - string(extensions.Cinder), - string(extensions.CephFS), - string(extensions.DownwardAPI), - string(extensions.FC), - string(extensions.ConfigMap), - string(extensions.VsphereVolume)) - return fstypes -} - -// getVolumeFSType gets the FSType for a volume. -func GetVolumeFSType(v api.Volume) (extensions.FSType, error) { - switch { - case v.HostPath != nil: - return extensions.HostPath, nil - case v.EmptyDir != nil: - return extensions.EmptyDir, nil - case v.GCEPersistentDisk != nil: - return extensions.GCEPersistentDisk, nil - case v.AWSElasticBlockStore != nil: - return extensions.AWSElasticBlockStore, nil - case v.GitRepo != nil: - return extensions.GitRepo, nil - case v.Secret != nil: - return extensions.Secret, nil - case v.NFS != nil: - return extensions.NFS, nil - case v.ISCSI != nil: - return extensions.ISCSI, nil - case v.Glusterfs != nil: - return extensions.Glusterfs, nil - case v.PersistentVolumeClaim != nil: - return extensions.PersistentVolumeClaim, nil - case v.RBD != nil: - return extensions.RBD, nil - case v.FlexVolume != nil: - return extensions.FlexVolume, nil - case v.Cinder != nil: - return extensions.Cinder, nil - case v.CephFS != nil: - return extensions.CephFS, nil - case v.Flocker != nil: - return extensions.Flocker, nil - case v.DownwardAPI != nil: - return extensions.DownwardAPI, nil - case v.FC != nil: - return extensions.FC, nil - case v.AzureFile != nil: - return extensions.AzureFile, nil - case v.ConfigMap != nil: - return extensions.ConfigMap, nil - case v.VsphereVolume != nil: - return extensions.VsphereVolume, nil - } - - return "", fmt.Errorf("unknown volume type for volume: %#v", v) -} - -// fsTypeToStringSet converts an FSType slice to a string set. -func FSTypeToStringSet(fsTypes []extensions.FSType) sets.String { - set := sets.NewString() - for _, v := range fsTypes { - set.Insert(string(v)) - } - return set -} - -// PSPAllowsAllVolumes checks for FSTypeAll in the psp's allowed volumes. -func PSPAllowsAllVolumes(psp *extensions.PodSecurityPolicy) bool { - return PSPAllowsFSType(psp, extensions.All) -} - -// PSPAllowsFSType is a utility for checking if a PSP allows a particular FSType. -// If all volumes are allowed then this will return true for any FSType passed. -func PSPAllowsFSType(psp *extensions.PodSecurityPolicy, fsType extensions.FSType) bool { - if psp == nil { - return false - } - - for _, v := range psp.Spec.Volumes { - if v == fsType || v == extensions.All { - return true - } - } - return false -} - -// FallsInRange is a utility to determine it the id falls in the valid range. -func FallsInRange(id int64, rng extensions.IDRange) bool { - return id >= rng.Min && id <= rng.Max -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS b/vendor/k8s.io/kubernetes/pkg/storage/OWNERS deleted file mode 100644 index a57ded7f6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -assignees: - - lavalamp - - liggitt - - timothysc - - wojtek-t - - xiang90 diff --git a/vendor/k8s.io/kubernetes/pkg/storage/cacher.go b/vendor/k8s.io/kubernetes/pkg/storage/cacher.go deleted file mode 100644 index 7bbb5717a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/cacher.go +++ /dev/null @@ -1,628 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "net/http" - "reflect" - "strconv" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - utilruntime "k8s.io/kubernetes/pkg/util/runtime" - "k8s.io/kubernetes/pkg/util/wait" - "k8s.io/kubernetes/pkg/watch" - - "github.com/golang/glog" - "golang.org/x/net/context" -) - -// CacherConfig contains the configuration for a given Cache. -type CacherConfig struct { - // Maximum size of the history cached in memory. - CacheCapacity int - - // An underlying storage.Interface. - Storage Interface - - // An underlying storage.Versioner. - Versioner Versioner - - // The Cache will be caching objects of a given Type and assumes that they - // are all stored under ResourcePrefix directory in the underlying database. - Type interface{} - ResourcePrefix string - - // KeyFunc is used to get a key in the underyling storage for a given object. - KeyFunc func(runtime.Object) (string, error) - - // NewList is a function that creates new empty object storing a list of - // objects of type Type. - NewListFunc func() runtime.Object -} - -// Cacher is responsible for serving WATCH and LIST requests for a given -// resource from its internal cache and updating its cache in the background -// based on the underlying storage contents. -// Cacher implements storage.Interface (although most of the calls are just -// delegated to the underlying storage). -type Cacher struct { - sync.RWMutex - - // Each user-facing method that is not simply redirected to the underlying - // storage has to read-lock on this mutex before starting any processing. - // This is necessary to prevent users from accessing structures that are - // uninitialized or are being repopulated right now. - // NOTE: We cannot easily reuse the main mutex for it due to multi-threaded - // interactions of Cacher with the underlying WatchCache. Since Cacher is - // caling WatchCache directly and WatchCache is calling Cacher methods - // via its OnEvent and OnReplace hooks, we explicitly assume that if mutexes - // of both structures are held, the one from WatchCache is acquired first - // to avoid deadlocks. Unfortunately, forcing this rule in startCaching - // would be very difficult and introducing one more mutex seems to be much - // easier. - usable sync.RWMutex - - // Underlying storage.Interface. - storage Interface - - // "sliding window" of recent changes of objects and the current state. - watchCache *watchCache - reflector *cache.Reflector - - // Registered watchers. - watcherIdx int - watchers map[int]*cacheWatcher - - // Versioner is used to handle resource versions. - versioner Versioner - - // keyFunc is used to get a key in the underyling storage for a given object. - keyFunc func(runtime.Object) (string, error) - - // Handling graceful termination. - stopLock sync.RWMutex - stopped bool - stopCh chan struct{} - stopWg sync.WaitGroup -} - -// Create a new Cacher responsible from service WATCH and LIST requests from its -// internal cache and updating its cache in the background based on the given -// configuration. -func NewCacher( - storage Interface, - capacity int, - versioner Versioner, - objectType runtime.Object, - resourcePrefix string, - scopeStrategy rest.NamespaceScopedStrategy, - newListFunc func() runtime.Object) Interface { - config := CacherConfig{ - CacheCapacity: capacity, - Storage: storage, - Versioner: versioner, - Type: objectType, - ResourcePrefix: resourcePrefix, - NewListFunc: newListFunc, - } - if scopeStrategy.NamespaceScoped() { - config.KeyFunc = func(obj runtime.Object) (string, error) { - return NamespaceKeyFunc(resourcePrefix, obj) - } - } else { - config.KeyFunc = func(obj runtime.Object) (string, error) { - return NoNamespaceKeyFunc(resourcePrefix, obj) - } - } - return NewCacherFromConfig(config) -} - -// Create a new Cacher responsible from service WATCH and LIST requests from its -// internal cache and updating its cache in the background based on the given -// configuration. -func NewCacherFromConfig(config CacherConfig) *Cacher { - watchCache := newWatchCache(config.CacheCapacity) - listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) - - // Give this error when it is constructed rather than when you get the - // first watch item, because it's much easier to track down that way. - if obj, ok := config.Type.(runtime.Object); ok { - if err := runtime.CheckCodec(config.Storage.Codec(), obj); err != nil { - panic("storage codec doesn't seem to match given type: " + err.Error()) - } - } - - cacher := &Cacher{ - usable: sync.RWMutex{}, - storage: config.Storage, - watchCache: watchCache, - reflector: cache.NewReflector(listerWatcher, config.Type, watchCache, 0), - watcherIdx: 0, - watchers: make(map[int]*cacheWatcher), - versioner: config.Versioner, - keyFunc: config.KeyFunc, - stopped: false, - // We need to (potentially) stop both: - // - wait.Until go-routine - // - reflector.ListAndWatch - // and there are no guarantees on the order that they will stop. - // So we will be simply closing the channel, and synchronizing on the WaitGroup. - stopCh: make(chan struct{}), - stopWg: sync.WaitGroup{}, - } - // See startCaching method for explanation and where this is unlocked. - cacher.usable.Lock() - watchCache.SetOnEvent(cacher.processEvent) - - stopCh := cacher.stopCh - cacher.stopWg.Add(1) - go func() { - defer cacher.stopWg.Done() - wait.Until( - func() { - if !cacher.isStopped() { - cacher.startCaching(stopCh) - } - }, time.Second, stopCh, - ) - }() - return cacher -} - -func (c *Cacher) startCaching(stopChannel <-chan struct{}) { - // The 'usable' lock is always 'RLock'able when it is safe to use the cache. - // It is safe to use the cache after a successful list until a disconnection. - // We start with usable (write) locked. The below OnReplace function will - // unlock it after a successful list. The below defer will then re-lock - // it when this function exits (always due to disconnection), only if - // we actually got a successful list. This cycle will repeat as needed. - successfulList := false - c.watchCache.SetOnReplace(func() { - successfulList = true - c.usable.Unlock() - }) - defer func() { - if successfulList { - c.usable.Lock() - } - }() - - c.terminateAllWatchers() - // Note that since onReplace may be not called due to errors, we explicitly - // need to retry it on errors under lock. - // Also note that startCaching is called in a loop, so there's no need - // to have another loop here. - if err := c.reflector.ListAndWatch(stopChannel); err != nil { - glog.Errorf("unexpected ListAndWatch error: %v", err) - } -} - -// Implements storage.Interface. -func (c *Cacher) Backends(ctx context.Context) []string { - return c.storage.Backends(ctx) -} - -// Implements storage.Interface. -func (c *Cacher) Versioner() Versioner { - return c.storage.Versioner() -} - -// Implements storage.Interface. -func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error { - return c.storage.Create(ctx, key, obj, out, ttl) -} - -// Implements storage.Interface. -func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error { - return c.storage.Delete(ctx, key, out, preconditions) -} - -// Implements storage.Interface. -func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) { - watchRV, err := ParseWatchResourceVersion(resourceVersion) - if err != nil { - return nil, err - } - - // Do NOT allow Watch to start when the underlying structures are not propagated. - c.usable.RLock() - defer c.usable.RUnlock() - - // We explicitly use thread unsafe version and do locking ourself to ensure that - // no new events will be processed in the meantime. The watchCache will be unlocked - // on return from this function. - // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the - // underlying watchCache is calling processEvent under its lock. - c.watchCache.RLock() - defer c.watchCache.RUnlock() - initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) - if err != nil { - // To match the uncached watch implementation, once we have passed authn/authz/admission, - // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, - // rather than a directly returned error. - return newErrWatcher(err), nil - } - - c.Lock() - defer c.Unlock() - watcher := newCacheWatcher(watchRV, initEvents, filterFunction(key, c.keyFunc, filter), forgetWatcher(c, c.watcherIdx)) - c.watchers[c.watcherIdx] = watcher - c.watcherIdx++ - return watcher, nil -} - -// Implements storage.Interface. -func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) { - return c.Watch(ctx, key, resourceVersion, filter) -} - -// Implements storage.Interface. -func (c *Cacher) Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error { - return c.storage.Get(ctx, key, objPtr, ignoreNotFound) -} - -// Implements storage.Interface. -func (c *Cacher) GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error { - return c.storage.GetToList(ctx, key, filter, listObj) -} - -// Implements storage.Interface. -func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error { - if resourceVersion == "" { - // If resourceVersion is not specified, serve it from underlying - // storage (for backward compatibility). - return c.storage.List(ctx, key, resourceVersion, filter, listObj) - } - - // If resourceVersion is specified, serve it from cache. - // It's guaranteed that the returned value is at least that - // fresh as the given resourceVersion. - - listRV, err := ParseListResourceVersion(resourceVersion) - if err != nil { - return err - } - - // To avoid situation when List is processed before the underlying - // watchCache is propagated for the first time, we acquire and immediately - // release the 'usable' lock. - // We don't need to hold it all the time, because watchCache is thread-safe - // and it would complicate already very difficult locking pattern. - c.usable.RLock() - c.usable.RUnlock() - - // List elements from cache, with at least 'listRV'. - listPtr, err := meta.GetItemsPtr(listObj) - if err != nil { - return err - } - listVal, err := conversion.EnforcePtr(listPtr) - if err != nil || listVal.Kind() != reflect.Slice { - return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind()) - } - filterFunc := filterFunction(key, c.keyFunc, filter) - - objs, readResourceVersion, err := c.watchCache.WaitUntilFreshAndList(listRV) - if err != nil { - return fmt.Errorf("failed to wait for fresh list: %v", err) - } - for _, obj := range objs { - object, ok := obj.(runtime.Object) - if !ok { - return fmt.Errorf("non runtime.Object returned from storage: %v", obj) - } - if filterFunc(object) { - listVal.Set(reflect.Append(listVal, reflect.ValueOf(object).Elem())) - } - } - if c.versioner != nil { - if err := c.versioner.UpdateList(listObj, readResourceVersion); err != nil { - return err - } - } - return nil -} - -// Implements storage.Interface. -func (c *Cacher) GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, preconditions *Preconditions, tryUpdate UpdateFunc) error { - return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate) -} - -// Implements storage.Interface. -func (c *Cacher) Codec() runtime.Codec { - return c.storage.Codec() -} - -func (c *Cacher) processEvent(event watchCacheEvent) { - c.Lock() - defer c.Unlock() - for _, watcher := range c.watchers { - watcher.add(event) - } -} - -func (c *Cacher) terminateAllWatchers() { - c.Lock() - defer c.Unlock() - for key, watcher := range c.watchers { - delete(c.watchers, key) - watcher.stop() - } -} - -func (c *Cacher) isStopped() bool { - c.stopLock.RLock() - defer c.stopLock.RUnlock() - return c.stopped -} - -func (c *Cacher) Stop() { - c.stopLock.Lock() - c.stopped = true - c.stopLock.Unlock() - close(c.stopCh) - c.stopWg.Wait() -} - -func forgetWatcher(c *Cacher, index int) func(bool) { - return func(lock bool) { - if lock { - c.Lock() - defer c.Unlock() - } - // It's possible that the watcher is already not in the map (e.g. in case of - // simulaneous Stop() and terminateAllWatchers(), but it doesn't break anything. - delete(c.watchers, index) - } -} - -func filterFunction(key string, keyFunc func(runtime.Object) (string, error), filter FilterFunc) FilterFunc { - return func(obj runtime.Object) bool { - objKey, err := keyFunc(obj) - if err != nil { - glog.Errorf("invalid object for filter: %v", obj) - return false - } - if !hasPathPrefix(objKey, key) { - return false - } - return filter(obj) - } -} - -// Returns resource version to which the underlying cache is synced. -func (c *Cacher) LastSyncResourceVersion() (uint64, error) { - // To avoid situation when LastSyncResourceVersion is processed before the - // underlying watchCache is propagated, we acquire 'usable' lock. - c.usable.RLock() - defer c.usable.RUnlock() - - c.RLock() - defer c.RUnlock() - - resourceVersion := c.reflector.LastSyncResourceVersion() - if resourceVersion == "" { - return 0, nil - } - return strconv.ParseUint(resourceVersion, 10, 64) -} - -// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher. -type cacherListerWatcher struct { - storage Interface - resourcePrefix string - newListFunc func() runtime.Object -} - -func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { - return &cacherListerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, - } -} - -// Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) List(options api.ListOptions) (runtime.Object, error) { - list := lw.newListFunc() - if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil { - return nil, err - } - return list, nil -} - -// Implements cache.ListerWatcher interface. -func (lw *cacherListerWatcher) Watch(options api.ListOptions) (watch.Interface, error) { - return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything) -} - -// cacherWatch implements watch.Interface to return a single error -type errWatcher struct { - result chan watch.Event -} - -func newErrWatcher(err error) *errWatcher { - // Create an error event - errEvent := watch.Event{Type: watch.Error} - switch err := err.(type) { - case runtime.Object: - errEvent.Object = err - case *errors.StatusError: - errEvent.Object = &err.ErrStatus - default: - errEvent.Object = &unversioned.Status{ - Status: unversioned.StatusFailure, - Message: err.Error(), - Reason: unversioned.StatusReasonInternalError, - Code: http.StatusInternalServerError, - } - } - - // Create a watcher with room for a single event, populate it, and close the channel - watcher := &errWatcher{result: make(chan watch.Event, 1)} - watcher.result <- errEvent - close(watcher.result) - - return watcher -} - -// Implements watch.Interface. -func (c *errWatcher) ResultChan() <-chan watch.Event { - return c.result -} - -// Implements watch.Interface. -func (c *errWatcher) Stop() { - // no-op -} - -// cacherWatch implements watch.Interface -type cacheWatcher struct { - sync.Mutex - input chan watchCacheEvent - result chan watch.Event - filter FilterFunc - stopped bool - forget func(bool) -} - -func newCacheWatcher(resourceVersion uint64, initEvents []watchCacheEvent, filter FilterFunc, forget func(bool)) *cacheWatcher { - watcher := &cacheWatcher{ - input: make(chan watchCacheEvent, 10), - result: make(chan watch.Event, 10), - filter: filter, - stopped: false, - forget: forget, - } - go watcher.process(initEvents, resourceVersion) - return watcher -} - -// Implements watch.Interface. -func (c *cacheWatcher) ResultChan() <-chan watch.Event { - return c.result -} - -// Implements watch.Interface. -func (c *cacheWatcher) Stop() { - c.forget(true) - c.stop() -} - -func (c *cacheWatcher) stop() { - c.Lock() - defer c.Unlock() - if !c.stopped { - c.stopped = true - close(c.input) - } -} - -var timerPool sync.Pool - -func (c *cacheWatcher) add(event watchCacheEvent) { - // Try to send the event immediately, without blocking. - select { - case c.input <- event: - return - default: - } - - // OK, block sending, but only for up to 5 seconds. - // cacheWatcher.add is called very often, so arrange - // to reuse timers instead of constantly allocating. - const timeout = 5 * time.Second - t, ok := timerPool.Get().(*time.Timer) - if ok { - t.Reset(timeout) - } else { - t = time.NewTimer(timeout) - } - defer timerPool.Put(t) - - select { - case c.input <- event: - stopped := t.Stop() - if !stopped { - // Consume triggered (but not yet received) timer event - // so that future reuse does not get a spurious timeout. - <-t.C - } - case <-t.C: - // This means that we couldn't send event to that watcher. - // Since we don't want to block on it infinitely, - // we simply terminate it. - c.forget(false) - c.stop() - } -} - -func (c *cacheWatcher) sendWatchCacheEvent(event watchCacheEvent) { - curObjPasses := event.Type != watch.Deleted && c.filter(event.Object) - oldObjPasses := false - if event.PrevObject != nil { - oldObjPasses = c.filter(event.PrevObject) - } - if !curObjPasses && !oldObjPasses { - // Watcher is not interested in that object. - return - } - - object, err := api.Scheme.Copy(event.Object) - if err != nil { - glog.Errorf("unexpected copy error: %v", err) - return - } - switch { - case curObjPasses && !oldObjPasses: - c.result <- watch.Event{Type: watch.Added, Object: object} - case curObjPasses && oldObjPasses: - c.result <- watch.Event{Type: watch.Modified, Object: object} - case !curObjPasses && oldObjPasses: - c.result <- watch.Event{Type: watch.Deleted, Object: object} - } -} - -func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) { - defer utilruntime.HandleCrash() - - for _, event := range initEvents { - c.sendWatchCacheEvent(event) - } - defer close(c.result) - defer c.Stop() - for { - event, ok := <-c.input - if !ok { - return - } - // only send events newer than resourceVersion - if event.ResourceVersion > resourceVersion { - c.sendWatchCacheEvent(event) - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/doc.go b/vendor/k8s.io/kubernetes/pkg/storage/doc.go deleted file mode 100644 index dca0d5b70..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Interfaces for database-related operations. -package storage diff --git a/vendor/k8s.io/kubernetes/pkg/storage/errors.go b/vendor/k8s.io/kubernetes/pkg/storage/errors.go deleted file mode 100644 index 61b3cba52..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/errors.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/util/validation/field" -) - -const ( - ErrCodeKeyNotFound int = iota + 1 - ErrCodeKeyExists - ErrCodeResourceVersionConflicts - ErrCodeInvalidObj - ErrCodeUnreachable -) - -var errCodeToMessage = map[int]string{ - ErrCodeKeyNotFound: "key not found", - ErrCodeKeyExists: "key exists", - ErrCodeResourceVersionConflicts: "resource version conflicts", - ErrCodeInvalidObj: "invalid object", - ErrCodeUnreachable: "server unreachable", -} - -func NewKeyNotFoundError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeKeyNotFound, - Key: key, - ResourceVersion: rv, - } -} - -func NewKeyExistsError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeKeyExists, - Key: key, - ResourceVersion: rv, - } -} - -func NewResourceVersionConflictsError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeResourceVersionConflicts, - Key: key, - ResourceVersion: rv, - } -} - -func NewUnreachableError(key string, rv int64) *StorageError { - return &StorageError{ - Code: ErrCodeUnreachable, - Key: key, - ResourceVersion: rv, - } -} - -func NewInvalidObjError(key, msg string) *StorageError { - return &StorageError{ - Code: ErrCodeInvalidObj, - Key: key, - AdditionalErrorMsg: msg, - } -} - -type StorageError struct { - Code int - Key string - ResourceVersion int64 - AdditionalErrorMsg string -} - -func (e *StorageError) Error() string { - return fmt.Sprintf("StorageError: %s, Code: %d, Key: %s, ResourceVersion: %d, AdditionalErrorMsg: %s", - errCodeToMessage[e.Code], e.Code, e.Key, e.ResourceVersion, e.AdditionalErrorMsg) -} - -// IsNotFound returns true if and only if err is "key" not found error. -func IsNotFound(err error) bool { - return isErrCode(err, ErrCodeKeyNotFound) -} - -// IsNodeExist returns true if and only if err is an node already exist error. -func IsNodeExist(err error) bool { - return isErrCode(err, ErrCodeKeyExists) -} - -// IsUnreachable returns true if and only if err indicates the server could not be reached. -func IsUnreachable(err error) bool { - return isErrCode(err, ErrCodeUnreachable) -} - -// IsTestFailed returns true if and only if err is a write conflict. -func IsTestFailed(err error) bool { - return isErrCode(err, ErrCodeResourceVersionConflicts, ErrCodeInvalidObj) -} - -// IsInvalidUID returns true if and only if err is invalid UID error -func IsInvalidObj(err error) bool { - return isErrCode(err, ErrCodeInvalidObj) -} - -func isErrCode(err error, codes ...int) bool { - if err == nil { - return false - } - if e, ok := err.(*StorageError); ok { - for _, code := range codes { - if e.Code == code { - return true - } - } - } - return false -} - -// InvalidError is generated when an error caused by invalid API object occurs -// in the storage package. -type InvalidError struct { - Errs field.ErrorList -} - -func (e InvalidError) Error() string { - return e.Errs.ToAggregate().Error() -} - -// IsInvalidError returns true if and only if err is an InvalidError. -func IsInvalidError(err error) bool { - _, ok := err.(InvalidError) - return ok -} - -func NewInvalidError(errors field.ErrorList) InvalidError { - return InvalidError{errors} -} - -// InternalError is generated when an error occurs in the storage package, i.e., -// not from the underlying storage backend (e.g., etcd). -type InternalError struct { - Reason string -} - -func (e InternalError) Error() string { - return e.Reason -} - -// IsInternalError returns true if and only if err is an InternalError. -func IsInternalError(err error) bool { - _, ok := err.(InternalError) - return ok -} - -func NewInternalError(reason string) InternalError { - return InternalError{reason} -} - -func NewInternalErrorf(format string, a ...interface{}) InternalError { - return InternalError{fmt.Sprintf(format, a)} -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go b/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go deleted file mode 100644 index 89290e29a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/interfaces.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "golang.org/x/net/context" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/watch" -) - -// Versioner abstracts setting and retrieving metadata fields from database response -// onto the object ot list. -type Versioner interface { - // UpdateObject sets storage metadata into an API object. Returns an error if the object - // cannot be updated correctly. May return nil if the requested object does not need metadata - // from database. - UpdateObject(obj runtime.Object, resourceVersion uint64) error - // UpdateList sets the resource version into an API list object. Returns an error if the object - // cannot be updated correctly. May return nil if the requested object does not need metadata - // from database. - UpdateList(obj runtime.Object, resourceVersion uint64) error - // ObjectResourceVersion returns the resource version (for persistence) of the specified object. - // Should return an error if the specified object does not have a persistable version. - ObjectResourceVersion(obj runtime.Object) (uint64, error) -} - -// ResponseMeta contains information about the database metadata that is associated with -// an object. It abstracts the actual underlying objects to prevent coupling with concrete -// database and to improve testability. -type ResponseMeta struct { - // TTL is the time to live of the node that contained the returned object. It may be - // zero or negative in some cases (objects may be expired after the requested - // expiration time due to server lag). - TTL int64 - // The resource version of the node that contained the returned object. - ResourceVersion uint64 -} - -// FilterFunc is a predicate which takes an API object and returns true -// if and only if the object should remain in the set. -type FilterFunc func(obj runtime.Object) bool - -// Everything is a FilterFunc which accepts all objects. -func Everything(runtime.Object) bool { - return true -} - -// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update -// that is guaranteed to succeed. -// See the comment for GuaranteedUpdate for more details. -type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - UID *types.UID `json:"uid,omitempty"` -} - -// NewUIDPreconditions returns a Preconditions with UID set. -func NewUIDPreconditions(uid string) *Preconditions { - u := types.UID(uid) - return &Preconditions{UID: &u} -} - -// Interface offers a common interface for object marshaling/unmarshling operations and -// hides all the storage-related operations behind it. -type Interface interface { - // Returns list of servers addresses of the underyling database. - // TODO: This method is used only in a single place. Consider refactoring and getting rid - // of this method from the interface. - Backends(ctx context.Context) []string - - // Returns Versioner associated with this interface. - Versioner() Versioner - - // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live - // in seconds (0 means forever). If no error is returned and out is not nil, out will be - // set to the read value from database. - Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error - - // Delete removes the specified key and returns the value that existed at that spot. - // If key didn't exist, it will return NotFound storage error. - Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error - - // Watch begins watching the specified key. Events are decoded into API objects, - // and any items passing 'filter' are sent down to returned watch.Interface. - // resourceVersion may be used to specify what version to begin watching, - // which should be the current resourceVersion, and no longer rv+1 - // (e.g. reconnecting without missing any updates). - Watch(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) - - // WatchList begins watching the specified key's items. Items are decoded into API - // objects and any item passing 'filter' are sent down to returned watch.Interface. - // resourceVersion may be used to specify what version to begin watching, - // which should be the current resourceVersion, and no longer rv+1 - // (e.g. reconnecting without missing any updates). - WatchList(ctx context.Context, key string, resourceVersion string, filter FilterFunc) (watch.Interface, error) - - // Get unmarshals json found at key into objPtr. On a not found error, will either - // return a zero object of the requested type, or an error, depending on ignoreNotFound. - // Treats empty responses and nil response nodes exactly like a not found error. - Get(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) error - - // GetToList unmarshals json found at key and opaque it into *List api object - // (an object that satisfies the runtime.IsList definition). - GetToList(ctx context.Context, key string, filter FilterFunc, listObj runtime.Object) error - - // List unmarshalls jsons found at directory defined by key and opaque them - // into *List api object (an object that satisfies runtime.IsList definition). - // The returned contents may be delayed, but it is guaranteed that they will - // be have at least 'resourceVersion'. - List(ctx context.Context, key string, resourceVersion string, filter FilterFunc, listObj runtime.Object) error - - // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') - // retrying the update until success if there is index conflict. - // Note that object passed to tryUpdate may change across invocations of tryUpdate() if - // other writers are simultaneously updating it, so tryUpdate() needs to take into account - // the current contents of the object when deciding how the update object should look. - // If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false - // or zero value in 'ptrToType' parameter otherwise. - // If the object to update has the same value as previous, it won't do any update - // but will return the object in 'ptrToType' parameter. - // - // Example: - // - // s := /* implementation of Interface */ - // err := s.GuaranteedUpdate( - // "myKey", &MyType{}, true, - // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { - // // Before each incovation of the user defined function, "input" is reset to - // // current contents for "myKey" in database. - // curr := input.(*MyType) // Guaranteed to succeed. - // - // // Make the modification - // curr.Counter++ - // - // // Return the modified object - return an error to stop iterating. Return - // // a uint64 to alter the TTL on the object, or nil to keep it the same value. - // return cur, nil, nil - // } - // }) - GuaranteedUpdate(ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool, precondtions *Preconditions, tryUpdate UpdateFunc) error - - // Codec provides access to the underlying codec being used by the implementation. - Codec() runtime.Codec -} - -// Config interface allows storage tiers to generate the proper storage.interface -// and reduce the dependencies to encapsulate storage. -type Config interface { - // Creates the Interface base on ConfigObject - NewStorage() (Interface, error) - - // This function is used to enforce membership, and return the underlying type - GetType() string -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/util.go b/vendor/k8s.io/kubernetes/pkg/storage/util.go deleted file mode 100644 index 6f208f833..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/util.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/api/validation" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util/validation/field" -) - -type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) - -// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc -func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { - return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { - out, err := fn(input) - return out, nil, err - } -} - -// ParseWatchResourceVersion takes a resource version argument and converts it to -// the etcd version we should pass to helper.Watch(). Because resourceVersion is -// an opaque value, the default watch behavior for non-zero watch is to watch -// the next value (if you pass "1", you will see updates from "2" onwards). -func ParseWatchResourceVersion(resourceVersion string) (uint64, error) { - if resourceVersion == "" || resourceVersion == "0" { - return 0, nil - } - version, err := strconv.ParseUint(resourceVersion, 10, 64) - if err != nil { - return 0, NewInvalidError(field.ErrorList{ - // Validation errors are supposed to return version-specific field - // paths, but this is probably close enough. - field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()), - }) - } - return version, nil -} - -// ParseListResourceVersion takes a resource version argument and converts it to -// the etcd version. -func ParseListResourceVersion(resourceVersion string) (uint64, error) { - if resourceVersion == "" { - return 0, nil - } - version, err := strconv.ParseUint(resourceVersion, 10, 64) - return version, err -} - -func NamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { - meta, err := meta.Accessor(obj) - if err != nil { - return "", err - } - name := meta.GetName() - if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { - return "", fmt.Errorf("invalid name: %v", msgs) - } - return prefix + "/" + meta.GetNamespace() + "/" + name, nil -} - -func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) { - meta, err := meta.Accessor(obj) - if err != nil { - return "", err - } - name := meta.GetName() - if msgs := validation.IsValidPathSegmentName(name); len(msgs) != 0 { - return "", fmt.Errorf("invalid name: %v", msgs) - } - return prefix + "/" + name, nil -} - -// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary -func hasPathPrefix(s, pathPrefix string) bool { - // Short circuit if s doesn't contain the prefix at all - if !strings.HasPrefix(s, pathPrefix) { - return false - } - - pathPrefixLength := len(pathPrefix) - - if len(s) == pathPrefixLength { - // Exact match - return true - } - if strings.HasSuffix(pathPrefix, "/") { - // pathPrefix already ensured a path segment boundary - return true - } - if s[pathPrefixLength:pathPrefixLength+1] == "/" { - // The next character in s is a path segment boundary - // Check this instead of normalizing pathPrefix to avoid allocating on every call - return true - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go b/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go deleted file mode 100644 index 3e5ce5d73..000000000 --- a/vendor/k8s.io/kubernetes/pkg/storage/watch_cache.go +++ /dev/null @@ -1,331 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - "sort" - "strconv" - "sync" - "time" - - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/client/cache" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/watch" -) - -const ( - // MaximumListWait determines how long we're willing to wait for a - // list if a client specified a resource version in the future. - MaximumListWait = 60 * time.Second -) - -// watchCacheEvent is a single "watch event" that is send to users of -// watchCache. Additionally to a typical "watch.Event" it contains -// the previous value of the object to enable proper filtering in the -// upper layers. -type watchCacheEvent struct { - Type watch.EventType - Object runtime.Object - PrevObject runtime.Object - ResourceVersion uint64 -} - -// watchCacheElement is a single "watch event" stored in a cache. -// It contains the resource version of the object and the object -// itself. -type watchCacheElement struct { - resourceVersion uint64 - watchCacheEvent watchCacheEvent -} - -// watchCache implements a Store interface. -// However, it depends on the elements implementing runtime.Object interface. -// -// watchCache is a "sliding window" (with a limited capacity) of objects -// observed from a watch. -type watchCache struct { - sync.RWMutex - - // Condition on which lists are waiting for the fresh enough - // resource version. - cond *sync.Cond - - // Maximum size of history window. - capacity int - - // cache is used a cyclic buffer - its first element (with the smallest - // resourceVersion) is defined by startIndex, its last element is defined - // by endIndex (if cache is full it will be startIndex + capacity). - // Both startIndex and endIndex can be greater than buffer capacity - - // you should always apply modulo capacity to get an index in cache array. - cache []watchCacheElement - startIndex int - endIndex int - - // store will effectively support LIST operation from the "end of cache - // history" i.e. from the moment just after the newest cached watched event. - // It is necessary to effectively allow clients to start watching at now. - store cache.Store - - // ResourceVersion up to which the watchCache is propagated. - resourceVersion uint64 - - // This handler is run at the end of every successful Replace() method. - onReplace func() - - // This handler is run at the end of every Add/Update/Delete method - // and additionally gets the previous value of the object. - onEvent func(watchCacheEvent) - - // for testing timeouts. - clock util.Clock -} - -func newWatchCache(capacity int) *watchCache { - wc := &watchCache{ - capacity: capacity, - cache: make([]watchCacheElement, capacity), - startIndex: 0, - endIndex: 0, - store: cache.NewStore(cache.MetaNamespaceKeyFunc), - resourceVersion: 0, - clock: util.RealClock{}, - } - wc.cond = sync.NewCond(wc.RLocker()) - return wc -} - -func (w *watchCache) Add(obj interface{}) error { - object, resourceVersion, err := objectToVersionedRuntimeObject(obj) - if err != nil { - return err - } - event := watch.Event{Type: watch.Added, Object: object} - - f := func(obj runtime.Object) error { return w.store.Add(obj) } - return w.processEvent(event, resourceVersion, f) -} - -func (w *watchCache) Update(obj interface{}) error { - object, resourceVersion, err := objectToVersionedRuntimeObject(obj) - if err != nil { - return err - } - event := watch.Event{Type: watch.Modified, Object: object} - - f := func(obj runtime.Object) error { return w.store.Update(obj) } - return w.processEvent(event, resourceVersion, f) -} - -func (w *watchCache) Delete(obj interface{}) error { - object, resourceVersion, err := objectToVersionedRuntimeObject(obj) - if err != nil { - return err - } - event := watch.Event{Type: watch.Deleted, Object: object} - - f := func(obj runtime.Object) error { return w.store.Delete(obj) } - return w.processEvent(event, resourceVersion, f) -} - -func objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) { - object, ok := obj.(runtime.Object) - if !ok { - return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj) - } - meta, err := meta.Accessor(object) - if err != nil { - return nil, 0, err - } - resourceVersion, err := parseResourceVersion(meta.GetResourceVersion()) - if err != nil { - return nil, 0, err - } - return object, resourceVersion, nil -} - -func parseResourceVersion(resourceVersion string) (uint64, error) { - if resourceVersion == "" { - return 0, nil - } - return strconv.ParseUint(resourceVersion, 10, 64) -} - -func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(runtime.Object) error) error { - w.Lock() - defer w.Unlock() - previous, exists, err := w.store.Get(event.Object) - if err != nil { - return err - } - var prevObject runtime.Object - if exists { - prevObject = previous.(runtime.Object) - } - watchCacheEvent := watchCacheEvent{event.Type, event.Object, prevObject, resourceVersion} - if w.onEvent != nil { - w.onEvent(watchCacheEvent) - } - w.updateCache(resourceVersion, watchCacheEvent) - w.resourceVersion = resourceVersion - w.cond.Broadcast() - return updateFunc(event.Object) -} - -// Assumes that lock is already held for write. -func (w *watchCache) updateCache(resourceVersion uint64, event watchCacheEvent) { - if w.endIndex == w.startIndex+w.capacity { - // Cache is full - remove the oldest element. - w.startIndex++ - } - w.cache[w.endIndex%w.capacity] = watchCacheElement{resourceVersion, event} - w.endIndex++ -} - -func (w *watchCache) List() []interface{} { - w.RLock() - defer w.RUnlock() - return w.store.List() -} - -func (w *watchCache) WaitUntilFreshAndList(resourceVersion uint64) ([]interface{}, uint64, error) { - startTime := w.clock.Now() - go func() { - // Wake us up when the time limit has expired. The docs - // promise that time.After (well, NewTimer, which it calls) - // will wait *at least* the duration given. Since this go - // routine starts sometime after we record the start time, and - // it will wake up the loop below sometime after the broadcast, - // we don't need to worry about waking it up before the time - // has expired accidentally. - <-w.clock.After(MaximumListWait) - w.cond.Broadcast() - }() - - w.RLock() - defer w.RUnlock() - for w.resourceVersion < resourceVersion { - if w.clock.Since(startTime) >= MaximumListWait { - return nil, 0, fmt.Errorf("time limit exceeded while waiting for resource version %v (current value: %v)", resourceVersion, w.resourceVersion) - } - w.cond.Wait() - } - return w.store.List(), w.resourceVersion, nil -} - -func (w *watchCache) ListKeys() []string { - w.RLock() - defer w.RUnlock() - return w.store.ListKeys() -} - -func (w *watchCache) Get(obj interface{}) (interface{}, bool, error) { - w.RLock() - defer w.RUnlock() - return w.store.Get(obj) -} - -func (w *watchCache) GetByKey(key string) (interface{}, bool, error) { - w.RLock() - defer w.RUnlock() - return w.store.GetByKey(key) -} - -func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error { - version, err := parseResourceVersion(resourceVersion) - if err != nil { - return err - } - - w.Lock() - defer w.Unlock() - - w.startIndex = 0 - w.endIndex = 0 - if err := w.store.Replace(objs, resourceVersion); err != nil { - return err - } - w.resourceVersion = version - if w.onReplace != nil { - w.onReplace() - } - w.cond.Broadcast() - return nil -} - -func (w *watchCache) SetOnReplace(onReplace func()) { - w.Lock() - defer w.Unlock() - w.onReplace = onReplace -} - -func (w *watchCache) SetOnEvent(onEvent func(watchCacheEvent)) { - w.Lock() - defer w.Unlock() - w.onEvent = onEvent -} - -func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]watchCacheEvent, error) { - size := w.endIndex - w.startIndex - oldest := w.resourceVersion - if size > 0 { - oldest = w.cache[w.startIndex%w.capacity].resourceVersion - } - if resourceVersion == 0 { - // resourceVersion = 0 means that we don't require any specific starting point - // and we would like to start watching from ~now. - // However, to keep backward compatibility, we additionally need to return the - // current state and only then start watching from that point. - // - // TODO: In v2 api, we should stop returning the current state - #13969. - allItems := w.store.List() - result := make([]watchCacheEvent, len(allItems)) - for i, item := range allItems { - result[i] = watchCacheEvent{Type: watch.Added, Object: item.(runtime.Object)} - } - return result, nil - } - if resourceVersion < oldest-1 { - return nil, errors.NewGone(fmt.Sprintf("too old resource version: %d (%d)", resourceVersion, oldest-1)) - } - - // Binary search the smallest index at which resourceVersion is greater than the given one. - f := func(i int) bool { - return w.cache[(w.startIndex+i)%w.capacity].resourceVersion > resourceVersion - } - first := sort.Search(size, f) - result := make([]watchCacheEvent, size-first) - for i := 0; i < size-first; i++ { - result[i] = w.cache[(w.startIndex+first+i)%w.capacity].watchCacheEvent - } - return result, nil -} - -func (w *watchCache) GetAllEventsSince(resourceVersion uint64) ([]watchCacheEvent, error) { - w.RLock() - defer w.RUnlock() - return w.GetAllEventsSinceThreadUnsafe(resourceVersion) -} - -func (w *watchCache) Resync() error { - // Nothing to do - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/types/doc.go b/vendor/k8s.io/kubernetes/pkg/types/doc.go deleted file mode 100644 index 239a9a5f5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/types/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package types implements various generic types used throughout kubernetes. -package types diff --git a/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go b/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go deleted file mode 100644 index 895d7c5be..000000000 --- a/vendor/k8s.io/kubernetes/pkg/types/namespacedname.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -// NamespacedName comprises a resource name, with a mandatory namespace, -// rendered as "<namespace>/<name>". Being a type captures intent and -// helps make sure that UIDs, namespaced names and non-namespaced names -// do not get conflated in code. For most use cases, namespace and name -// will already have been format validated at the API entry point, so we -// don't do that here. Where that's not the case (e.g. in testing), -// consider using NamespacedNameOrDie() in testing.go in this package. - -type NamespacedName struct { - Namespace string - Name string -} - -// String returns the general purpose string representation -func (n NamespacedName) String() string { - return n.Namespace + "/" + n.Name -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/clock.go b/vendor/k8s.io/kubernetes/pkg/util/clock.go deleted file mode 100644 index 474cbb68d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/clock.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - "time" -) - -// Clock allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type Clock interface { - Now() time.Time - Since(time.Time) time.Duration - After(d time.Duration) <-chan time.Time - Sleep(d time.Duration) - Tick(d time.Duration) <-chan time.Time -} - -var ( - _ = Clock(RealClock{}) - _ = Clock(&FakeClock{}) - _ = Clock(&IntervalClock{}) -) - -// RealClock really calls time.Now() -type RealClock struct{} - -// Now returns the current time. -func (RealClock) Now() time.Time { - return time.Now() -} - -// Since returns time since the specified timestamp. -func (RealClock) Since(ts time.Time) time.Duration { - return time.Since(ts) -} - -// Same as time.After(d). -func (RealClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - -func (RealClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { - lock sync.RWMutex - time time.Time - - // waiters are waiting for the fake time to pass their specified time - waiters []fakeClockWaiter -} - -type fakeClockWaiter struct { - targetTime time.Time - stepInterval time.Duration - skipIfBlocked bool - destChan chan<- time.Time -} - -func NewFakeClock(t time.Time) *FakeClock { - return &FakeClock{ - time: t, - } -} - -// Now returns f's time. -func (f *FakeClock) Now() time.Time { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time -} - -// Since returns time since the time in f. -func (f *FakeClock) Since(ts time.Time) time.Duration { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time.Sub(ts) -} - -// Fake version of time.After(d). -func (f *FakeClock) After(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }) - return ch -} - -func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return ch -} - -// Move clock by Duration, notify anyone that's called After or Tick -func (f *FakeClock) Step(d time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(f.time.Add(d)) -} - -// Sets the time. -func (f *FakeClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(t) -} - -// Actually changes the time and checks any waiters. f must be write-locked. -func (f *FakeClock) setTimeLocked(t time.Time) { - f.time = t - newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) - for i := range f.waiters { - w := &f.waiters[i] - if !w.targetTime.After(t) { - - if w.skipIfBlocked { - select { - case w.destChan <- t: - default: - } - } else { - w.destChan <- t - } - - if w.stepInterval > 0 { - for !w.targetTime.After(t) { - w.targetTime = w.targetTime.Add(w.stepInterval) - } - newWaiters = append(newWaiters, *w) - } - - } else { - newWaiters = append(newWaiters, f.waiters[i]) - } - } - f.waiters = newWaiters -} - -// Returns true if After has been called on f but not yet satisfied (so you can -// write race-free tests). -func (f *FakeClock) HasWaiters() bool { - f.lock.RLock() - defer f.lock.RUnlock() - return len(f.waiters) > 0 -} - -func (f *FakeClock) Sleep(d time.Duration) { - f.Step(d) -} - -// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration -type IntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *IntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *IntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} - -// Unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) After(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement After") -} - -// Unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement Tick") -} - -func (*IntervalClock) Sleep(d time.Duration) { - panic("IntervalClock doesn't implement Sleep") -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go b/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go deleted file mode 100644 index f43664369..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/crypto/crypto.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package crypto - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "math/big" - "net" - "os" - "path/filepath" - "time" -) - -// ShouldGenSelfSignedCerts returns false if the certificate or key files already exists, -// otherwise returns true. -func ShouldGenSelfSignedCerts(certPath, keyPath string) bool { - if canReadFile(certPath) || canReadFile(keyPath) { - return false - } - - return true -} - -// If the file represented by path exists and -// readable, returns true otherwise returns false. -func canReadFile(path string) bool { - f, err := os.Open(path) - if err != nil { - return false - } - - defer f.Close() - - return true -} - -// GenerateSelfSignedCert creates a self-signed certificate and key for the given host. -// Host may be an IP or a DNS name -// You may also specify additional subject alt names (either ip or dns names) for the certificate -// The certificate will be created with file mode 0644. The key will be created with file mode 0600. -// If the certificate or key files already exist, they will be overwritten. -// Any parent directories of the certPath or keyPath will be created as needed with file mode 0755. -func GenerateSelfSignedCert(host, certPath, keyPath string, alternateIPs []net.IP, alternateDNS []string) error { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return err - } - - template := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), - }, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * 365), - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - IsCA: true, - } - - if ip := net.ParseIP(host); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else { - template.DNSNames = append(template.DNSNames, host) - } - - template.IPAddresses = append(template.IPAddresses, alternateIPs...) - template.DNSNames = append(template.DNSNames, alternateDNS...) - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - return err - } - - // Generate cert - certBuffer := bytes.Buffer{} - if err := pem.Encode(&certBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - return err - } - - // Generate key - keyBuffer := bytes.Buffer{} - if err := pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { - return err - } - - // Write cert - if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { - return err - } - if err := ioutil.WriteFile(certPath, certBuffer.Bytes(), os.FileMode(0644)); err != nil { - return err - } - - // Write key - if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { - return err - } - if err := ioutil.WriteFile(keyPath, keyBuffer.Bytes(), os.FileMode(0600)); err != nil { - return err - } - - return nil -} - -// CertPoolFromFile returns an x509.CertPool containing the certificates in the given PEM-encoded file. -// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates -func CertPoolFromFile(filename string) (*x509.CertPool, error) { - certs, err := certificatesFromFile(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} - -// certificatesFromFile returns the x509.Certificates contained in the given PEM-encoded file. -// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates -func certificatesFromFile(file string) ([]*x509.Certificate, error) { - if len(file) == 0 { - return nil, errors.New("error reading certificates from an empty filename") - } - pemBlock, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - certs, err := CertsFromPEM(pemBlock) - if err != nil { - return nil, fmt.Errorf("error reading %s: %s", file, err) - } - return certs, nil -} - -// CertsFromPEM returns the x509.Certificates contained in the given PEM-encoded byte array -// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates -func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { - ok := false - certs := []*x509.Certificate{} - for len(pemCerts) > 0 { - var block *pem.Block - block, pemCerts = pem.Decode(pemCerts) - if block == nil { - break - } - // Only use PEM "CERTIFICATE" blocks without extra headers - if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { - continue - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return certs, err - } - - certs = append(certs, cert) - ok = true - } - - if !ok { - return certs, errors.New("could not read any certificates") - } - return certs, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go b/vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go deleted file mode 100644 index 0442da429..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/deployment/deployment.go +++ /dev/null @@ -1,478 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "fmt" - "strconv" - "time" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util/errors" - "k8s.io/kubernetes/pkg/util/integer" - intstrutil "k8s.io/kubernetes/pkg/util/intstr" - labelsutil "k8s.io/kubernetes/pkg/util/labels" - podutil "k8s.io/kubernetes/pkg/util/pod" - rsutil "k8s.io/kubernetes/pkg/util/replicaset" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - // The revision annotation of a deployment's replica sets which records its rollout sequence - RevisionAnnotation = "deployment.kubernetes.io/revision" - - // Here are the possible rollback event reasons - RollbackRevisionNotFound = "DeploymentRollbackRevisionNotFound" - RollbackTemplateUnchanged = "DeploymentRollbackTemplateUnchanged" - RollbackDone = "DeploymentRollback" -) - -// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -// The third returned value is the new replica set, and it may be nil if it doesn't exist yet. -func GetAllReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, *extensions.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, c) - if err != nil { - return nil, nil, nil, err - } - podList, err := listPods(deployment, c) - if err != nil { - return nil, nil, nil, err - } - oldRSes, allOldRSes, err := FindOldReplicaSets(deployment, rsList, podList) - if err != nil { - return nil, nil, nil, err - } - newRS, err := FindNewReplicaSet(deployment, rsList) - if err != nil { - return nil, nil, nil, err - } - return oldRSes, allOldRSes, newRS, nil -} - -// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func GetOldReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, c) - if err != nil { - return nil, nil, err - } - podList, err := listPods(deployment, c) - if err != nil { - return nil, nil, err - } - return FindOldReplicaSets(deployment, rsList, podList) -} - -// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. -// Returns nil if the new replica set doesn't exist yet. -func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { - rsList, err := listReplicaSets(deployment, c) - if err != nil { - return nil, err - } - return FindNewReplicaSet(deployment, rsList) -} - -// listReplicaSets lists all RSes the given deployment targets with the given client interface. -func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]extensions.ReplicaSet, error) { - return ListReplicaSets(deployment, - func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { - rsList, err := c.Extensions().ReplicaSets(namespace).List(options) - return rsList.Items, err - }) -} - -// listReplicaSets lists all Pods the given deployment targets with the given client interface. -func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.PodList, error) { - return ListPods(deployment, - func(namespace string, options api.ListOptions) (*api.PodList, error) { - return c.Core().Pods(namespace).List(options) - }) -} - -// TODO: switch this to full namespacers -type rsListFunc func(string, api.ListOptions) ([]extensions.ReplicaSet, error) -type podListFunc func(string, api.ListOptions) (*api.PodList, error) - -// ListReplicaSets returns a slice of RSes the given deployment targets. -func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]extensions.ReplicaSet, error) { - // TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector - // should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830; - // or use controllerRef, see https://github.com/kubernetes/kubernetes/issues/2210 - namespace := deployment.Namespace - selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := api.ListOptions{LabelSelector: selector} - return getRSList(namespace, options) -} - -// ListPods returns a list of pods the given deployment targets. -func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.PodList, error) { - namespace := deployment.Namespace - selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) - if err != nil { - return nil, err - } - options := api.ListOptions{LabelSelector: selector} - return getPodList(namespace, options) -} - -// equalIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] -// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes -// (e.g. the addition of a new field will cause the hash code to change) -// Note that we assume input podTemplateSpecs contain non-empty labels -func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) { - // The podTemplateSpec must have a non-empty label so that label selectors can find them. - // This is checked by validation (of resources contain a podTemplateSpec). - if len(template1.Labels) == 0 || len(template2.Labels) == 0 { - return false, fmt.Errorf("Unexpected empty labels found in given template") - } - hash1 := template1.Labels[extensions.DefaultDeploymentUniqueLabelKey] - hash2 := template2.Labels[extensions.DefaultDeploymentUniqueLabelKey] - // compare equality ignoring pod-template-hash - template1.Labels[extensions.DefaultDeploymentUniqueLabelKey] = hash2 - result := api.Semantic.DeepEqual(template1, template2) - template1.Labels[extensions.DefaultDeploymentUniqueLabelKey] = hash1 - return result, nil -} - -// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). -func FindNewReplicaSet(deployment *extensions.Deployment, rsList []extensions.ReplicaSet) (*extensions.ReplicaSet, error) { - newRSTemplate := GetNewReplicaSetTemplate(deployment) - for i := range rsList { - equal, err := equalIgnoreHash(rsList[i].Spec.Template, newRSTemplate) - if err != nil { - return nil, err - } - if equal { - // This is the new ReplicaSet. - return &rsList[i], nil - } - } - // new ReplicaSet does not exist. - return nil, nil -} - -// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes. -// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. -func FindOldReplicaSets(deployment *extensions.Deployment, rsList []extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { - // Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList. - // All pods and replica sets are labeled with pod-template-hash to prevent overlapping - oldRSs := map[string]extensions.ReplicaSet{} - allOldRSs := map[string]extensions.ReplicaSet{} - newRSTemplate := GetNewReplicaSetTemplate(deployment) - for _, pod := range podList.Items { - podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) - for _, rs := range rsList { - rsLabelsSelector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return nil, nil, fmt.Errorf("invalid label selector: %v", err) - } - // Filter out replica set that has the same pod template spec as the deployment - that is the new replica set. - equal, err := equalIgnoreHash(rs.Spec.Template, newRSTemplate) - if err != nil { - return nil, nil, err - } - if equal { - continue - } - allOldRSs[rs.ObjectMeta.Name] = rs - if rsLabelsSelector.Matches(podLabelsSelector) { - oldRSs[rs.ObjectMeta.Name] = rs - } - } - } - requiredRSs := []*extensions.ReplicaSet{} - for key := range oldRSs { - value := oldRSs[key] - requiredRSs = append(requiredRSs, &value) - } - allRSs := []*extensions.ReplicaSet{} - for key := range allOldRSs { - value := allOldRSs[key] - allRSs = append(allRSs, &value) - } - return requiredRSs, allRSs, nil -} - -func WaitForReplicaSetUpdated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { - return wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(namespace).Get(name) - if err != nil { - return false, err - } - return rs.Status.ObservedGeneration >= desiredGeneration, nil - }) -} - -func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, namespace, name string) error { - return wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - rs, err := c.Extensions().ReplicaSets(namespace).Get(name) - if err != nil { - return false, err - } - return rs.Status.ObservedGeneration >= desiredGeneration && - rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil - }) -} - -// LabelPodsWithHash labels all pods in the given podList with the new hash label. -// The returned bool value can be used to tell if all pods are actually labeled. -func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) { - allPodsLabeled := true - for _, pod := range podList.Items { - // Only label the pod that doesn't already have the new hash - if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { - if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod, - func(podToUpdate *api.Pod) error { - // Precondition: the pod doesn't contain the new hash in its label. - if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { - return errors.ErrPreconditionViolated - } - podToUpdate.Labels = labelsutil.AddLabel(podToUpdate.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) - return nil - }); err != nil { - return false, fmt.Errorf("error in adding template hash label %s to pod %+v: %s", hash, pod, err) - } else if podUpdated { - glog.V(4).Infof("Labeled %s %s/%s of %s %s/%s with hash %s.", pod.Kind, pod.Namespace, pod.Name, rs.Kind, rs.Namespace, rs.Name, hash) - } else { - // If the pod wasn't updated but didn't return error when we try to update it, we've hit "pod not found" or "precondition violated" error. - // Then we can't say all pods are labeled - allPodsLabeled = false - } - } - } - return allPodsLabeled, nil -} - -// Returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet. -func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec { - // newRS will have the same template as in deployment spec, plus a unique label in some cases. - newRSTemplate := api.PodTemplateSpec{ - ObjectMeta: deployment.Spec.Template.ObjectMeta, - Spec: deployment.Spec.Template.Spec, - } - newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel( - deployment.Spec.Template.ObjectMeta.Labels, - extensions.DefaultDeploymentUniqueLabelKey, - podutil.GetPodTemplateSpecHash(newRSTemplate)) - return newRSTemplate -} - -// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. -func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment { - deployment.Spec.Template.ObjectMeta = template.ObjectMeta - deployment.Spec.Template.Spec = template.Spec - deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( - deployment.Spec.Template.ObjectMeta.Labels, - extensions.DefaultDeploymentUniqueLabelKey) - return deployment -} - -// Returns the sum of Replicas of the given replica sets. -func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { - totalReplicaCount := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalReplicaCount += rs.Spec.Replicas - } - } - return totalReplicaCount -} - -// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets. -func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 { - totalReplicaCount := int32(0) - for _, rs := range replicaSets { - if rs != nil { - totalReplicaCount += rs.Status.Replicas - } - } - return totalReplicaCount -} - -// GetAvailablePodsForReplicaSets returns the number of available pods (listed from clientset) corresponding to the given replica sets. -func GetAvailablePodsForReplicaSets(c clientset.Interface, deployment *extensions.Deployment, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) { - podList, err := listPods(deployment, c) - if err != nil { - return 0, err - } - return CountAvailablePodsForReplicaSets(podList, rss, minReadySeconds) -} - -// CountAvailablePodsForReplicaSets returns the number of available pods corresponding to the given pod list and replica sets. -// Note that the input pod list should be the pods targeted by the deployment of input replica sets. -func CountAvailablePodsForReplicaSets(podList *api.PodList, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) { - rsPods, err := filterPodsMatchingReplicaSets(rss, podList) - if err != nil { - return 0, err - } - return countAvailablePods(rsPods, minReadySeconds), nil -} - -// GetAvailablePodsForDeployment returns the number of available pods (listed from clientset) corresponding to the given deployment. -func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment, minReadySeconds int32) (int32, error) { - podList, err := listPods(deployment, c) - if err != nil { - return 0, err - } - return countAvailablePods(podList.Items, minReadySeconds), nil -} - -func countAvailablePods(pods []api.Pod, minReadySeconds int32) int32 { - availablePodCount := int32(0) - for _, pod := range pods { - if IsPodAvailable(&pod, minReadySeconds) { - availablePodCount++ - } - } - return availablePodCount -} - -func IsPodAvailable(pod *api.Pod, minReadySeconds int32) bool { - if !controller.IsPodActive(*pod) { - return false - } - // Check if we've passed minReadySeconds since LastTransitionTime - // If so, this pod is ready - for _, c := range pod.Status.Conditions { - // we only care about pod ready conditions - if c.Type == api.PodReady && c.Status == api.ConditionTrue { - // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available): - // 1. minReadySeconds == 0, or - // 2. LastTransitionTime (is set) + minReadySeconds (>0) < current time - minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second - if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(time.Now()) { - return true - } - } - } - return false -} - -// filterPodsMatchingReplicaSets filters the given pod list and only return the ones targeted by the input replicasets -func filterPodsMatchingReplicaSets(replicaSets []*extensions.ReplicaSet, podList *api.PodList) ([]api.Pod, error) { - rsPods := []api.Pod{} - for _, rs := range replicaSets { - matchingFunc, err := rsutil.MatchingPodsFunc(rs) - if err != nil { - return nil, err - } - if matchingFunc == nil { - continue - } - rsPods = append(rsPods, podutil.Filter(podList, matchingFunc)...) - } - return rsPods, nil -} - -// Revision returns the revision number of the input replica set -func Revision(rs *extensions.ReplicaSet) (int64, error) { - v, ok := rs.Annotations[RevisionAnnotation] - if !ok { - return 0, nil - } - return strconv.ParseInt(v, 10, 64) -} - -func IsRollingUpdate(deployment *extensions.Deployment) bool { - return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType -} - -// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have. -// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. -// 1) The new RS is saturated: newRS's replicas == deployment's replicas -// 2) Max number of pods allowed is reached: deployment's replicas + maxSurge == all RSs' replicas -func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) (int32, error) { - switch deployment.Spec.Strategy.Type { - case extensions.RollingUpdateDeploymentStrategyType: - // Check if we can scale up. - maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true) - if err != nil { - return 0, err - } - // Find the total number of pods - currentPodCount := GetReplicaCountForReplicaSets(allRSs) - maxTotalPods := deployment.Spec.Replicas + int32(maxSurge) - if currentPodCount >= maxTotalPods { - // Cannot scale up. - return newRS.Spec.Replicas, nil - } - // Scale up. - scaleUpCount := maxTotalPods - currentPodCount - // Do not exceed the number of desired replicas. - scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas))) - return newRS.Spec.Replicas + scaleUpCount, nil - case extensions.RecreateDeploymentStrategyType: - return deployment.Spec.Replicas, nil - default: - return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) - } -} - -// Polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. -// Returns error if polling timesout. -func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, error), desiredGeneration int64, interval, timeout time.Duration) error { - // TODO: This should take clientset.Interface when all code is updated to use clientset. Keeping it this way allows the function to be used by callers who have client.Interface. - return wait.Poll(interval, timeout, func() (bool, error) { - deployment, err := getDeploymentFunc() - if err != nil { - return false, err - } - return deployment.Status.ObservedGeneration >= desiredGeneration, nil - }) -} - -// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one -// step. For example: -// -// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) -// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) -// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) -// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) -// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) -func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { - surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) - if err != nil { - return 0, 0, err - } - unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) - if err != nil { - return 0, 0, err - } - - if surge == 0 && unavailable == 0 { - // Validation should never allow the user to explicitly use zero values for both maxSurge - // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. - // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the - // theory that surge might not work due to quota. - unavailable = 1 - } - - return int32(surge), int32(unavailable), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/doc.go b/vendor/k8s.io/kubernetes/pkg/util/doc.go deleted file mode 100644 index cd3f0823e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package util implements various utility functions used in both testing and implementation -// of Kubernetes. Package util may not depend on any other package in the Kubernetes -// package tree. -package util diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go b/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go deleted file mode 100644 index b0af0f055..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package errors implements various utility functions and types around errors. -package errors diff --git a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go b/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go deleted file mode 100644 index df3adaf3e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/errors/errors.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "errors" - "fmt" -) - -// Aggregate represents an object that contains multiple errors, but does not -// necessarily have singular semantic meaning. -type Aggregate interface { - error - Errors() []error -} - -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -func NewAggregate(errlist []error) Aggregate { - if len(errlist) == 0 { - return nil - } - return aggregate(errlist) -} - -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregate []error - -// Error is part of the error interface. -func (agg aggregate) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// Errors is part of the Aggregate interface. -func (agg aggregate) Errors() []error { - return []error(agg) -} - -// Matcher is used to match errors. Returns true if the error matches. -type Matcher func(error) bool - -// FilterOut removes all errors that match any of the matchers from the input -// error. If the input is a singular error, only that error is tested. If the -// input implements the Aggregate interface, the list of errors will be -// processed recursively. -// -// This can be used, for example, to remove known-OK errors (such as io.EOF or -// os.PathNotFound) from a list of errors. -func FilterOut(err error, fns ...Matcher) error { - if err == nil { - return nil - } - if agg, ok := err.(Aggregate); ok { - return NewAggregate(filterErrors(agg.Errors(), fns...)) - } - if !matchesError(err, fns...) { - return err - } - return nil -} - -// matchesError returns true if any Matcher returns true -func matchesError(err error, fns ...Matcher) bool { - for _, fn := range fns { - if fn(err) { - return true - } - } - return false -} - -// filterErrors returns any errors (or nested errors, if the list contains -// nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all -// nested slices flattened as a side effect. -func filterErrors(list []error, fns ...Matcher) []error { - result := []error{} - for _, err := range list { - r := FilterOut(err, fns...) - if r != nil { - result = append(result, r) - } - } - return result -} - -// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary -// nesting, and flattens them all into a single Aggregate, recursively. -func Flatten(agg Aggregate) Aggregate { - result := []error{} - if agg == nil { - return nil - } - for _, err := range agg.Errors() { - if a, ok := err.(Aggregate); ok { - r := Flatten(a) - if r != nil { - result = append(result, r.Errors()...) - } - } else { - if err != nil { - result = append(result, err) - } - } - } - return NewAggregate(result) -} - -// AggregateGoroutines runs the provided functions in parallel, stuffing all -// non-nil errors into the returned Aggregate. -// Returns nil if all the functions complete successfully. -func AggregateGoroutines(funcs ...func() error) Aggregate { - errChan := make(chan error, len(funcs)) - for _, f := range funcs { - go func(f func() error) { errChan <- f() }(f) - } - errs := make([]error, 0) - for i := 0; i < cap(errChan); i++ { - if err := <-errChan; err != nil { - errs = append(errs, err) - } - } - return NewAggregate(errs) -} - -// ErrPreconditionViolated is returned when the precondition is violated -var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go b/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go deleted file mode 100644 index 94b9f733f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/flags.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - goflag "flag" - "strings" - - "github.com/golang/glog" - "github.com/spf13/pflag" -) - -// WordSepNormalizeFunc changes all flags that contain "_" separators -func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - if strings.Contains(name, "_") { - return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) - } - return pflag.NormalizedName(name) -} - -// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators -func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - if strings.Contains(name, "_") { - nname := strings.Replace(name, "_", "-", -1) - glog.Warningf("%s is DEPRECATED and will be removed in a future version. Use %s instead.", name, nname) - - return pflag.NormalizedName(nname) - } - return pflag.NormalizedName(name) -} - -// InitFlags normalizes and parses the command line flags -func InitFlags() { - pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) - pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) - pflag.Parse() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go b/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go deleted file mode 100644 index a9359695f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flag/tristate.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flag - -import ( - "fmt" - "strconv" -) - -// Tristate is a flag compatible with flags and pflags that -// keeps track of whether it had a value supplied or not. -type Tristate int - -const ( - Unset Tristate = iota // 0 - True - False -) - -func (f *Tristate) Default(value bool) { - *f = triFromBool(value) -} - -func (f Tristate) String() string { - b := boolFromTri(f) - return fmt.Sprintf("%t", b) -} - -func (f Tristate) Value() bool { - b := boolFromTri(f) - return b -} - -func (f *Tristate) Set(value string) error { - boolVal, err := strconv.ParseBool(value) - if err != nil { - return err - } - - *f = triFromBool(boolVal) - return nil -} - -func (f Tristate) Provided() bool { - if f != Unset { - return true - } - return false -} - -func (f *Tristate) Type() string { - return "tristate" -} - -func boolFromTri(t Tristate) bool { - if t == True { - return true - } else { - return false - } -} - -func triFromBool(b bool) Tristate { - if b { - return True - } else { - return False - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go b/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go deleted file mode 100644 index 1898c55c9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/flowcontrol/backoff.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flowcontrol - -import ( - "sync" - "time" - - "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/util/integer" -) - -type backoffEntry struct { - backoff time.Duration - lastUpdate time.Time -} - -type Backoff struct { - sync.Mutex - Clock util.Clock - defaultDuration time.Duration - maxDuration time.Duration - perItemBackoff map[string]*backoffEntry -} - -func NewFakeBackOff(initial, max time.Duration, tc *util.FakeClock) *Backoff { - return &Backoff{ - perItemBackoff: map[string]*backoffEntry{}, - Clock: tc, - defaultDuration: initial, - maxDuration: max, - } -} - -func NewBackOff(initial, max time.Duration) *Backoff { - return &Backoff{ - perItemBackoff: map[string]*backoffEntry{}, - Clock: util.RealClock{}, - defaultDuration: initial, - maxDuration: max, - } -} - -// Get the current backoff Duration -func (p *Backoff) Get(id string) time.Duration { - p.Lock() - defer p.Unlock() - var delay time.Duration - entry, ok := p.perItemBackoff[id] - if ok { - delay = entry.backoff - } - return delay -} - -// move backoff to the next mark, capping at maxDuration -func (p *Backoff) Next(id string, eventTime time.Time) { - p.Lock() - defer p.Unlock() - entry, ok := p.perItemBackoff[id] - if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { - entry = p.initEntryUnsafe(id) - } else { - delay := entry.backoff * 2 // exponential - entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) - } - entry.lastUpdate = p.Clock.Now() -} - -// Reset forces clearing of all backoff data for a given key. -func (p *Backoff) Reset(id string) { - p.Lock() - defer p.Unlock() - delete(p.perItemBackoff, id) -} - -// Returns True if the elapsed time since eventTime is smaller than the current backoff window -func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() - entry, ok := p.perItemBackoff[id] - if !ok { - return false - } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { - return false - } - return p.Clock.Now().Sub(eventTime) < entry.backoff -} - -// Returns True if time since lastupdate is less than the current backoff window. -func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { - p.Lock() - defer p.Unlock() - entry, ok := p.perItemBackoff[id] - if !ok { - return false - } - if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { - return false - } - return eventTime.Sub(entry.lastUpdate) < entry.backoff -} - -// Garbage collect records that have aged past maxDuration. Backoff users are expected -// to invoke this periodically. -func (p *Backoff) GC() { - p.Lock() - defer p.Unlock() - now := p.Clock.Now() - for id, entry := range p.perItemBackoff { - if now.Sub(entry.lastUpdate) > p.maxDuration*2 { - // GC when entry has not been updated for 2*maxDuration - delete(p.perItemBackoff, id) - } - } -} - -func (p *Backoff) DeleteEntry(id string) { - p.Lock() - defer p.Unlock() - delete(p.perItemBackoff, id) -} - -// Take a lock on *Backoff, before calling initEntryUnsafe -func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { - entry := &backoffEntry{backoff: p.defaultDuration} - p.perItemBackoff[id] = entry - return entry -} - -// After 2*maxDuration we restart the backoff factor to the beginning -func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { - return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go deleted file mode 100644 index 95fd32abe..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package hash - -import ( - "hash" - - "github.com/davecgh/go-spew/spew" -) - -// DeepHashObject writes specified object to hash using the spew library -// which follows pointers and prints actual values of the nested objects -// ensuring the hash does not change when a pointer changes. -func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { - hasher.Reset() - printer := spew.ConfigState{ - Indent: " ", - SortKeys: true, - DisableMethods: true, - SpewKeys: true, - } - printer.Fprintf(hasher, "%#v", objectToWrite) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go b/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go deleted file mode 100644 index c51cd952d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/integer/integer.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integer - -func IntMax(a, b int) int { - if b > a { - return b - } - return a -} - -func IntMin(a, b int) int { - if b < a { - return b - } - return a -} - -func Int64Max(a, b int64) int64 { - if b > a { - return b - } - return a -} - -func Int64Min(a, b int64) int64 { - if b < a { - return b - } - return a -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/deep_copy_generated.go b/vendor/k8s.io/kubernetes/pkg/util/intstr/deep_copy_generated.go deleted file mode 100644 index 29aef0223..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/deep_copy_generated.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package intstr - -import ( - conversion "k8s.io/kubernetes/pkg/conversion" -) - -func DeepCopy_intstr_IntOrString(in IntOrString, out *IntOrString, c *conversion.Cloner) error { - out.Type = in.Type - out.IntVal = in.IntVal - out.StrVal = in.StrVal - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go deleted file mode 100644 index ef39cd586..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.pb.go +++ /dev/null @@ -1,347 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/util/intstr/generated.proto -// DO NOT EDIT! - -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ -package intstr - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} - -func init() { - proto.RegisterType((*IntOrString)(nil), "k8s.io.kubernetes.pkg.util.intstr.IntOrString") -} -func (m *IntOrString) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *IntOrString) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintGenerated(data, i, uint64(m.IntVal)) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.StrVal))) - i += copy(data[i:], m.StrVal) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *IntOrString) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Type)) - n += 1 + sovGenerated(uint64(m.IntVal)) - l = len(m.StrVal) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *IntOrString) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (Type(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) - } - m.IntVal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.IntVal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StrVal = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto b/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto deleted file mode 100644 index 32ad1b6b1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/intstr/generated.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.util.intstr; - -// Package-wide variables from generator "generated". -option go_package = "intstr"; - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +gencopy=true -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message IntOrString { - optional int64 type = 1; - - optional int32 intVal = 2; - - optional string strVal = 3; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/util/json/json.go b/vendor/k8s.io/kubernetes/pkg/util/json/json.go deleted file mode 100644 index 1ff8cc0d4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/json/json.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "bytes" - "encoding/json" - "io" -) - -// NewEncoder delegates to json.NewEncoder -// It is only here so this package can be a drop-in for common encoding/json uses -func NewEncoder(w io.Writer) *json.Encoder { - return json.NewEncoder(w) -} - -// Marshal delegates to json.Marshal -// It is only here so this package can be a drop-in for common encoding/json uses -func Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, numbers are converted to int64 or float64 -func Unmarshal(data []byte, v interface{}) error { - switch v := v.(type) { - case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) - - default: - return json.Unmarshal(data, v) - } -} - -// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. -// values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { - var err error - for k, v := range m { - switch v := v.(type) { - case json.Number: - m[k], err = convertNumber(v) - case map[string]interface{}: - err = convertMapNumbers(v) - case []interface{}: - err = convertSliceNumbers(v) - } - if err != nil { - return err - } - } - return nil -} - -// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. -// values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { - var err error - for i, v := range s { - switch v := v.(type) { - case json.Number: - s[i], err = convertNumber(v) - case map[string]interface{}: - err = convertMapNumbers(v) - case []interface{}: - err = convertSliceNumbers(v) - } - if err != nil { - return err - } - } - return nil -} - -// convertNumber converts a json.Number to an int64 or float64, or returns an error -func convertNumber(n json.Number) (interface{}, error) { - // Attempt to convert to an int64 first - if i, err := n.Int64(); err == nil { - return i, nil - } - // Return a float64 (default json.Decode() behavior) - // An overflow will return an error - return n.Float64() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go deleted file mode 100644 index 6bdf4ac59..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package jsonpath is a template engine using jsonpath syntax, -// which can be seen at http://goessner.net/articles/JsonPath/. -// In addition, it has {range} {end} function to iterate list and slice. -package jsonpath diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go deleted file mode 100644 index 7a402af49..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/jsonpath.go +++ /dev/null @@ -1,486 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" - - "k8s.io/kubernetes/third_party/golang/template" -) - -type JSONPath struct { - name string - parser *Parser - stack [][]reflect.Value //push and pop values in different scopes - cur []reflect.Value //current scope values - beginRange int - inRange int - endRange int -} - -func New(name string) *JSONPath { - return &JSONPath{ - name: name, - beginRange: 0, - inRange: 0, - endRange: 0, - } -} - -// Parse parse the given template, return error -func (j *JSONPath) Parse(text string) (err error) { - j.parser, err = Parse(j.name, text) - return -} - -// Execute bounds data into template and write the result -func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { - fullResults, err := j.FindResults(data) - if err != nil { - return err - } - for ix := range fullResults { - if err := j.PrintResults(wr, fullResults[ix]); err != nil { - return err - } - } - return nil -} - -func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) { - if j.parser == nil { - return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name) - } - - j.cur = []reflect.Value{reflect.ValueOf(data)} - nodes := j.parser.Root.Nodes - fullResult := [][]reflect.Value{} - for i := 0; i < len(nodes); i++ { - node := nodes[i] - results, err := j.walk(j.cur, node) - if err != nil { - return nil, err - } - - //encounter an end node, break the current block - if j.endRange > 0 && j.endRange <= j.inRange { - j.endRange -= 1 - break - } - //encounter a range node, start a range loop - if j.beginRange > 0 { - j.beginRange -= 1 - j.inRange += 1 - for k, value := range results { - j.parser.Root.Nodes = nodes[i+1:] - if k == len(results)-1 { - j.inRange -= 1 - } - nextResults, err := j.FindResults(value.Interface()) - if err != nil { - return nil, err - } - fullResult = append(fullResult, nextResults...) - } - break - } - fullResult = append(fullResult, results) - } - return fullResult, nil -} - -// PrintResults write the results into writer -func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { - for i, r := range results { - text, err := j.evalToText(r) - if err != nil { - return err - } - if i != len(results)-1 { - text = append(text, ' ') - } - if _, err = wr.Write(text); err != nil { - return err - } - } - return nil -} - -// walk visits tree rooted at the given node in DFS order -func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { - switch node := node.(type) { - case *ListNode: - return j.evalList(value, node) - case *TextNode: - return []reflect.Value{reflect.ValueOf(node.Text)}, nil - case *FieldNode: - return j.evalField(value, node) - case *ArrayNode: - return j.evalArray(value, node) - case *FilterNode: - return j.evalFilter(value, node) - case *IntNode: - return j.evalInt(value, node) - case *FloatNode: - return j.evalFloat(value, node) - case *WildcardNode: - return j.evalWildcard(value, node) - case *RecursiveNode: - return j.evalRecursive(value, node) - case *UnionNode: - return j.evalUnion(value, node) - case *IdentifierNode: - return j.evalIdentifier(value, node) - default: - return value, fmt.Errorf("unexpected Node %v", node) - } -} - -// evalInt evaluates IntNode -func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalFloat evaluates FloatNode -func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { - result := make([]reflect.Value, len(input)) - for i := range input { - result[i] = reflect.ValueOf(node.Value) - } - return result, nil -} - -// evalList evaluates ListNode -func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { - var err error - curValue := value - for _, node := range node.Nodes { - curValue, err = j.walk(curValue, node) - if err != nil { - return curValue, err - } - } - return curValue, nil -} - -// evalIdentifier evaluates IdentifierNode -func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { - results := []reflect.Value{} - switch node.Name { - case "range": - j.stack = append(j.stack, j.cur) - j.beginRange += 1 - results = input - case "end": - if j.endRange < j.inRange { //inside a loop, break the current block - j.endRange += 1 - break - } - // the loop is about to end, pop value and continue the following execution - if len(j.stack) > 0 { - j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1] - } else { - return results, fmt.Errorf("not in range, nothing to end") - } - default: - return input, fmt.Errorf("unrecognized identifier %v", node.Name) - } - return results, nil -} - -// evalArray evaluates ArrayNode -func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - - value, isNil := template.Indirect(value) - if isNil { - continue - } - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice", value.Type()) - } - params := node.Params - if !params[0].Known { - params[0].Value = 0 - } - if params[0].Value < 0 { - params[0].Value += value.Len() - } - if !params[1].Known { - params[1].Value = value.Len() - } - - if params[1].Value < 0 { - params[1].Value += value.Len() - } - - sliceLength := value.Len() - if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through. - if params[0].Value >= sliceLength { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength) - } - if params[1].Value > sliceLength { - return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength) - } - } - - if !params[2].Known { - value = value.Slice(params[0].Value, params[1].Value) - } else { - value = value.Slice3(params[0].Value, params[1].Value, params[2].Value) - } - for i := 0; i < value.Len(); i++ { - result = append(result, value.Index(i)) - } - } - return result, nil -} - -// evalUnion evaluates UnionNode -func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, listNode := range node.Nodes { - temp, err := j.evalList(input, listNode) - if err != nil { - return input, err - } - result = append(result, temp...) - } - return result, nil -} - -func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) { - t := value.Type() - var inlineValue *reflect.Value - for ix := 0; ix < t.NumField(); ix++ { - f := t.Field(ix) - jsonTag := f.Tag.Get("json") - parts := strings.Split(jsonTag, ",") - if len(parts) == 0 { - continue - } - if parts[0] == node.Value { - return value.Field(ix), nil - } - if len(parts[0]) == 0 { - val := value.Field(ix) - inlineValue = &val - } - } - if inlineValue != nil { - if inlineValue.Kind() == reflect.Struct { - // handle 'inline' - match, err := j.findFieldInValue(inlineValue, node) - if err != nil { - return reflect.Value{}, err - } - if match.IsValid() { - return match, nil - } - } - } - return value.FieldByName(node.Value), nil -} - -// evalField evaluates filed of struct or key of map. -func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { - results := []reflect.Value{} - // If there's no input, there's no output - if len(input) == 0 { - return results, nil - } - for _, value := range input { - var result reflect.Value - value, isNil := template.Indirect(value) - if isNil { - continue - } - - if value.Kind() == reflect.Struct { - var err error - if result, err = j.findFieldInValue(&value, node); err != nil { - return nil, err - } - } else if value.Kind() == reflect.Map { - mapKeyType := value.Type().Key() - nodeValue := reflect.ValueOf(node.Value) - // node value type must be convertible to map key type - if !nodeValue.Type().ConvertibleTo(mapKeyType) { - return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType) - } - result = value.MapIndex(nodeValue.Convert(mapKeyType)) - } - if result.IsValid() { - results = append(results, result) - } - } - if len(results) == 0 { - return results, fmt.Errorf("%s is not found", node.Value) - } - return results, nil -} - -// evalWildcard extract all contents of the given value -func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalRecursive visit the given value recursively and push all of them to result -func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { - result := []reflect.Value{} - for _, value := range input { - results := []reflect.Value{} - value, isNil := template.Indirect(value) - if isNil { - continue - } - - kind := value.Kind() - if kind == reflect.Struct { - for i := 0; i < value.NumField(); i++ { - results = append(results, value.Field(i)) - } - } else if kind == reflect.Map { - for _, key := range value.MapKeys() { - results = append(results, value.MapIndex(key)) - } - } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { - for i := 0; i < value.Len(); i++ { - results = append(results, value.Index(i)) - } - } - if len(results) != 0 { - result = append(result, value) - output, err := j.evalRecursive(results, node) - if err != nil { - return result, err - } - result = append(result, output...) - } - } - return result, nil -} - -// evalFilter filter array according to FilterNode -func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { - results := []reflect.Value{} - for _, value := range input { - value, _ = template.Indirect(value) - - if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { - return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value) - } - for i := 0; i < value.Len(); i++ { - temp := []reflect.Value{value.Index(i)} - lefts, err := j.evalList(temp, node.Left) - - //case exists - if node.Operator == "exists" { - if len(lefts) > 0 { - results = append(results, value.Index(i)) - } - continue - } - - if err != nil { - return input, err - } - - var left, right interface{} - if len(lefts) != 1 { - return input, fmt.Errorf("can only compare one element at a time") - } - left = lefts[0].Interface() - - rights, err := j.evalList(temp, node.Right) - if err != nil { - return input, err - } - if len(rights) != 1 { - return input, fmt.Errorf("can only compare one element at a time") - } - right = rights[0].Interface() - - pass := false - switch node.Operator { - case "<": - pass, err = template.Less(left, right) - case ">": - pass, err = template.Greater(left, right) - case "==": - pass, err = template.Equal(left, right) - case "!=": - pass, err = template.NotEqual(left, right) - case "<=": - pass, err = template.LessEqual(left, right) - case ">=": - pass, err = template.GreaterEqual(left, right) - default: - return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) - } - if err != nil { - return results, err - } - if pass { - results = append(results, value.Index(i)) - } - } - } - return results, nil -} - -// evalToText translates reflect value to corresponding text -func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { - iface, ok := template.PrintableValue(v) - if !ok { - return nil, fmt.Errorf("can't print type %s", v.Type()) - } - var buffer bytes.Buffer - fmt.Fprint(&buffer, iface) - return buffer.Bytes(), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go deleted file mode 100644 index ddf015c04..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/node.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import "fmt" - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Type returns itself and provides an easy default implementation -func (t NodeType) Type() NodeType { - return t -} - -func (t NodeType) String() string { - return NodeTypeName[t] -} - -const ( - NodeText NodeType = iota - NodeArray - NodeList - NodeField - NodeIdentifier - NodeFilter - NodeInt - NodeFloat - NodeWildcard - NodeRecursive - NodeUnion -) - -var NodeTypeName = map[NodeType]string{ - NodeText: "NodeText", - NodeArray: "NodeArray", - NodeList: "NodeList", - NodeField: "NodeField", - NodeIdentifier: "NodeIdentifier", - NodeFilter: "NodeFilter", - NodeInt: "NodeInt", - NodeFloat: "NodeFloat", - NodeWildcard: "NodeWildcard", - NodeRecursive: "NodeRecursive", - NodeUnion: "NodeUnion", -} - -type Node interface { - Type() NodeType - String() string -} - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Nodes []Node // The element nodes in lexical order. -} - -func newList() *ListNode { - return &ListNode{NodeType: NodeList} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) String() string { - return fmt.Sprintf("%s", l.Type()) -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Text string // The text; may span newlines. -} - -func newText(text string) *TextNode { - return &TextNode{NodeType: NodeText, Text: text} -} - -func (t *TextNode) String() string { - return fmt.Sprintf("%s: %s", t.Type(), t.Text) -} - -// FieldNode holds field of struct -type FieldNode struct { - NodeType - Value string -} - -func newField(value string) *FieldNode { - return &FieldNode{NodeType: NodeField, Value: value} -} - -func (f *FieldNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Value) -} - -// IdentifierNode holds an identifier -type IdentifierNode struct { - NodeType - Name string -} - -func newIdentifier(value string) *IdentifierNode { - return &IdentifierNode{ - NodeType: NodeIdentifier, - Name: value, - } -} - -func (f *IdentifierNode) String() string { - return fmt.Sprintf("%s: %s", f.Type(), f.Name) -} - -// ParamsEntry holds param information for ArrayNode -type ParamsEntry struct { - Value int - Known bool //whether the value is known when parse it -} - -// ArrayNode holds start, end, step information for array index selection -type ArrayNode struct { - NodeType - Params [3]ParamsEntry //start, end, step -} - -func newArray(params [3]ParamsEntry) *ArrayNode { - return &ArrayNode{ - NodeType: NodeArray, - Params: params, - } -} - -func (a *ArrayNode) String() string { - return fmt.Sprintf("%s: %v", a.Type(), a.Params) -} - -// FilterNode holds operand and operator information for filter -type FilterNode struct { - NodeType - Left *ListNode - Right *ListNode - Operator string -} - -func newFilter(left, right *ListNode, operator string) *FilterNode { - return &FilterNode{ - NodeType: NodeFilter, - Left: left, - Right: right, - Operator: operator, - } -} - -func (f *FilterNode) String() string { - return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) -} - -// IntNode holds integer value -type IntNode struct { - NodeType - Value int -} - -func newInt(num int) *IntNode { - return &IntNode{NodeType: NodeInt, Value: num} -} - -func (i *IntNode) String() string { - return fmt.Sprintf("%s: %d", i.Type(), i.Value) -} - -// FloatNode holds float value -type FloatNode struct { - NodeType - Value float64 -} - -func newFloat(num float64) *FloatNode { - return &FloatNode{NodeType: NodeFloat, Value: num} -} - -func (i *FloatNode) String() string { - return fmt.Sprintf("%s: %f", i.Type(), i.Value) -} - -// WildcardNode means a wildcard -type WildcardNode struct { - NodeType -} - -func newWildcard() *WildcardNode { - return &WildcardNode{NodeType: NodeWildcard} -} - -func (i *WildcardNode) String() string { - return fmt.Sprintf("%s", i.Type()) -} - -// RecursiveNode means a recursive descent operator -type RecursiveNode struct { - NodeType -} - -func newRecursive() *RecursiveNode { - return &RecursiveNode{NodeType: NodeRecursive} -} - -func (r *RecursiveNode) String() string { - return fmt.Sprintf("%s", r.Type()) -} - -// UnionNode is union of ListNode -type UnionNode struct { - NodeType - Nodes []*ListNode -} - -func newUnion(nodes []*ListNode) *UnionNode { - return &UnionNode{NodeType: NodeUnion, Nodes: nodes} -} - -func (u *UnionNode) String() string { - return fmt.Sprintf("%s", u.Type()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go b/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go deleted file mode 100644 index bd1f5ecd4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/jsonpath/parser.go +++ /dev/null @@ -1,427 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package jsonpath - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "unicode" - "unicode/utf8" -) - -const eof = -1 - -const ( - leftDelim = "{" - rightDelim = "}" -) - -type Parser struct { - Name string - Root *ListNode - input string - cur *ListNode - pos int - start int - width int -} - -// Parse parsed the given text and return a node Parser. -// If an error is encountered, parsing stops and an empty -// Parser is returned with the error -func Parse(name, text string) (*Parser, error) { - p := NewParser(name) - err := p.Parse(text) - if err != nil { - p = nil - } - return p, err -} - -func NewParser(name string) *Parser { - return &Parser{ - Name: name, - } -} - -// parseAction parsed the expression inside delimiter -func parseAction(name, text string) (*Parser, error) { - p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) - // when error happens, p will be nil, so we need to return here - if err != nil { - return p, err - } - p.Root = p.Root.Nodes[0].(*ListNode) - return p, nil -} - -func (p *Parser) Parse(text string) error { - p.input = text - p.Root = newList() - p.pos = 0 - return p.parseText(p.Root) -} - -// consumeText return the parsed text since last cosumeText -func (p *Parser) consumeText() string { - value := p.input[p.start:p.pos] - p.start = p.pos - return value -} - -// next returns the next rune in the input. -func (p *Parser) next() rune { - if int(p.pos) >= len(p.input) { - p.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(p.input[p.pos:]) - p.width = w - p.pos += p.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (p *Parser) peek() rune { - r := p.next() - p.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (p *Parser) backup() { - p.pos -= p.width -} - -func (p *Parser) parseText(cur *ListNode) error { - for { - if strings.HasPrefix(p.input[p.pos:], leftDelim) { - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return p.parseLeftDelim(cur) - } - if p.next() == eof { - break - } - } - // Correctly reached EOF. - if p.pos > p.start { - cur.append(newText(p.consumeText())) - } - return nil -} - -// parseLeftDelim scans the left delimiter, which is known to be present. -func (p *Parser) parseLeftDelim(cur *ListNode) error { - p.pos += len(leftDelim) - p.consumeText() - newNode := newList() - cur.append(newNode) - cur = newNode - return p.parseInsideAction(cur) -} - -func (p *Parser) parseInsideAction(cur *ListNode) error { - prefixMap := map[string]func(*ListNode) error{ - rightDelim: p.parseRightDelim, - "[?(": p.parseFilter, - "..": p.parseRecursive, - } - for prefix, parseFunc := range prefixMap { - if strings.HasPrefix(p.input[p.pos:], prefix) { - return parseFunc(cur) - } - } - - switch r := p.next(); { - case r == eof || isEndOfLine(r): - return fmt.Errorf("unclosed action") - case r == ' ': - p.consumeText() - case r == '@' || r == '$': //the current object, just pass it - p.consumeText() - case r == '[': - return p.parseArray(cur) - case r == '"': - return p.parseQuote(cur) - case r == '.': - return p.parseField(cur) - case r == '+' || r == '-' || unicode.IsDigit(r): - p.backup() - return p.parseNumber(cur) - case isAlphaNumeric(r): - p.backup() - return p.parseIdentifier(cur) - default: - return fmt.Errorf("unrecognized character in action: %#U", r) - } - return p.parseInsideAction(cur) -} - -// parseRightDelim scans the right delimiter, which is known to be present. -func (p *Parser) parseRightDelim(cur *ListNode) error { - p.pos += len(rightDelim) - p.consumeText() - cur = p.Root - return p.parseText(cur) -} - -// parseIdentifier scans build-in keywords, like "range" "end" -func (p *Parser) parseIdentifier(cur *ListNode) error { - var r rune - for { - r = p.next() - if isTerminator(r) { - p.backup() - break - } - } - value := p.consumeText() - cur.append(newIdentifier(value)) - return p.parseInsideAction(cur) -} - -// parseRecursive scans the recursive desent operator .. -func (p *Parser) parseRecursive(cur *ListNode) error { - p.pos += len("..") - p.consumeText() - cur.append(newRecursive()) - if r := p.peek(); isAlphaNumeric(r) { - return p.parseField(cur) - } - return p.parseInsideAction(cur) -} - -// parseNumber scans number -func (p *Parser) parseNumber(cur *ListNode) error { - r := p.peek() - if r == '+' || r == '-' { - r = p.next() - } - for { - r = p.next() - if r != '.' && !unicode.IsDigit(r) { - p.backup() - break - } - } - value := p.consumeText() - i, err := strconv.Atoi(value) - if err == nil { - cur.append(newInt(i)) - return p.parseInsideAction(cur) - } - d, err := strconv.ParseFloat(value, 64) - if err == nil { - cur.append(newFloat(d)) - return p.parseInsideAction(cur) - } - return fmt.Errorf("cannot parse number %s", value) -} - -// parseArray scans array index selection -func (p *Parser) parseArray(cur *ListNode) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated array") - case ']': - break Loop - } - } - text := p.consumeText() - text = string(text[1 : len(text)-1]) - if text == "*" { - text = ":" - } - - //union operator - strs := strings.Split(text, ",") - if len(strs) > 1 { - union := []*ListNode{} - for _, str := range strs { - parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) - if err != nil { - return err - } - union = append(union, parser.Root) - } - cur.append(newUnion(union)) - return p.parseInsideAction(cur) - } - - // dict key - reg := regexp.MustCompile(`^'([^']*)'$`) - value := reg.FindStringSubmatch(text) - if value != nil { - parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) - if err != nil { - return err - } - for _, node := range parser.Root.Nodes { - cur.append(node) - } - return p.parseInsideAction(cur) - } - - //slice operator - reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) - value = reg.FindStringSubmatch(text) - if value == nil { - return fmt.Errorf("invalid array index %s", text) - } - value = value[1:] - params := [3]ParamsEntry{} - for i := 0; i < 3; i++ { - if value[i] != "" { - if i > 0 { - value[i] = value[i][1:] - } - if i > 0 && value[i] == "" { - params[i].Known = false - } else { - var err error - params[i].Known = true - params[i].Value, err = strconv.Atoi(value[i]) - if err != nil { - return fmt.Errorf("array index %s is not a number", value[i]) - } - } - } else { - if i == 1 { - params[i].Known = true - params[i].Value = params[0].Value + 1 - } else { - params[i].Known = false - params[i].Value = 0 - } - } - } - cur.append(newArray(params)) - return p.parseInsideAction(cur) -} - -// parseFilter scans filter inside array selection -func (p *Parser) parseFilter(cur *ListNode) error { - p.pos += len("[?(") - p.consumeText() -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated filter") - case ')': - break Loop - } - } - if p.next() != ']' { - return fmt.Errorf("unclosed array expect ]") - } - reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) - text := p.consumeText() - text = string(text[:len(text)-2]) - value := reg.FindStringSubmatch(text) - if value == nil { - parser, err := parseAction("text", text) - if err != nil { - return err - } - cur.append(newFilter(parser.Root, newList(), "exists")) - } else { - leftParser, err := parseAction("left", value[1]) - if err != nil { - return err - } - rightParser, err := parseAction("right", value[3]) - if err != nil { - return err - } - cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) - } - return p.parseInsideAction(cur) -} - -// parseQuote unquotes string inside double quote -func (p *Parser) parseQuote(cur *ListNode) error { -Loop: - for { - switch p.next() { - case eof, '\n': - return fmt.Errorf("unterminated quoted string") - case '"': - break Loop - } - } - value := p.consumeText() - s, err := strconv.Unquote(value) - if err != nil { - return fmt.Errorf("unquote string %s error %v", value, err) - } - cur.append(newText(s)) - return p.parseInsideAction(cur) -} - -// parseField scans a field until a terminator -func (p *Parser) parseField(cur *ListNode) error { - p.consumeText() - var r rune - for { - r = p.next() - if isTerminator(r) { - p.backup() - break - } - } - value := p.consumeText() - if value == "*" { - cur.append(newWildcard()) - } else { - cur.append(newField(value)) - } - return p.parseInsideAction(cur) -} - -// isTerminator reports whether the input is at valid termination character to appear after an identifier. -func isTerminator(r rune) bool { - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '[', ']', '$', '@', '{', '}': - return true - } - return false -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go b/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go deleted file mode 100644 index 0746d878d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package labels provides utilities to work with Kubernetes labels. -package labels diff --git a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go b/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go deleted file mode 100644 index 624d5ad68..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/labels/labels.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package labels - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api/unversioned" -) - -// Clones the given map and returns a new map with the given key and value added. -// Returns the given map, if labelKey is empty. -func CloneAndAddLabel(labels map[string]string, labelKey string, labelValue uint32) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - // Clone. - newLabels := map[string]string{} - for key, value := range labels { - newLabels[key] = value - } - newLabels[labelKey] = fmt.Sprintf("%d", labelValue) - return newLabels -} - -// CloneAndRemoveLabel clones the given map and returns a new map with the given key removed. -// Returns the given map, if labelKey is empty. -func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - // Clone. - newLabels := map[string]string{} - for key, value := range labels { - newLabels[key] = value - } - delete(newLabels, labelKey) - return newLabels -} - -// AddLabel returns a map with the given key and value added to the given map. -func AddLabel(labels map[string]string, labelKey string, labelValue string) map[string]string { - if labelKey == "" { - // Don't need to add a label. - return labels - } - if labels == nil { - labels = make(map[string]string) - } - labels[labelKey] = labelValue - return labels -} - -// Clones the given selector and returns a new selector with the given key and value added. -// Returns the given selector, if labelKey is empty. -func CloneSelectorAndAddLabel(selector *unversioned.LabelSelector, labelKey string, labelValue uint32) *unversioned.LabelSelector { - if labelKey == "" { - // Don't need to add a label. - return selector - } - - // Clone. - newSelector := new(unversioned.LabelSelector) - - // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. - newSelector.MatchLabels = make(map[string]string) - if selector.MatchLabels != nil { - for key, val := range selector.MatchLabels { - newSelector.MatchLabels[key] = val - } - } - newSelector.MatchLabels[labelKey] = fmt.Sprintf("%d", labelValue) - - if selector.MatchExpressions != nil { - newMExps := make([]unversioned.LabelSelectorRequirement, len(selector.MatchExpressions)) - for i, me := range selector.MatchExpressions { - newMExps[i].Key = me.Key - newMExps[i].Operator = me.Operator - if me.Values != nil { - newMExps[i].Values = make([]string, len(me.Values)) - copy(newMExps[i].Values, me.Values) - } else { - newMExps[i].Values = nil - } - } - newSelector.MatchExpressions = newMExps - } else { - newSelector.MatchExpressions = nil - } - - return newSelector -} - -// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. -func AddLabelToSelector(selector *unversioned.LabelSelector, labelKey string, labelValue string) *unversioned.LabelSelector { - if labelKey == "" { - // Don't need to add a label. - return selector - } - if selector.MatchLabels == nil { - selector.MatchLabels = make(map[string]string) - } - selector.MatchLabels[labelKey] = labelValue - return selector -} - -// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels -func SelectorHasLabel(selector *unversioned.LabelSelector, labelKey string) bool { - return len(selector.MatchLabels[labelKey]) > 0 -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go b/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go deleted file mode 100644 index b48478df8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/line_delimiter.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "io" - "strings" -) - -// A Line Delimiter is a filter that will -type LineDelimiter struct { - output io.Writer - delimiter []byte - buf bytes.Buffer -} - -// NewLineDelimiter allocates a new io.Writer that will split input on lines -// and bracket each line with the delimiter string. This can be useful in -// output tests where it is difficult to see and test trailing whitespace. -func NewLineDelimiter(output io.Writer, delimiter string) *LineDelimiter { - return &LineDelimiter{output: output, delimiter: []byte(delimiter)} -} - -// Write writes buf to the LineDelimiter ld. The only errors returned are ones -// encountered while writing to the underlying output stream. -func (ld *LineDelimiter) Write(buf []byte) (n int, err error) { - return ld.buf.Write(buf) -} - -// Flush all lines up until now. This will assume insert a linebreak at the current point of the stream. -func (ld *LineDelimiter) Flush() (err error) { - lines := strings.Split(ld.buf.String(), "\n") - for _, line := range lines { - if _, err = ld.output.Write(ld.delimiter); err != nil { - return - } - if _, err = ld.output.Write([]byte(line)); err != nil { - return - } - if _, err = ld.output.Write(ld.delimiter); err != nil { - return - } - if _, err = ld.output.Write([]byte("\n")); err != nil { - return - } - } - return -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/logs.go b/vendor/k8s.io/kubernetes/pkg/util/logs.go deleted file mode 100644 index c79c4903d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/logs.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "flag" - "log" - "time" - - "github.com/golang/glog" - "github.com/spf13/pflag" - "k8s.io/kubernetes/pkg/util/wait" -) - -var logFlushFreq = pflag.Duration("log-flush-frequency", 5*time.Second, "Maximum number of seconds between log flushes") - -// TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd. -func init() { - flag.Set("logtostderr", "true") -} - -// GlogWriter serves as a bridge between the standard log package and the glog package. -type GlogWriter struct{} - -// Write implements the io.Writer interface. -func (writer GlogWriter) Write(data []byte) (n int, err error) { - glog.Info(string(data)) - return len(data), nil -} - -// InitLogs initializes logs the way we want for kubernetes. -func InitLogs() { - log.SetOutput(GlogWriter{}) - log.SetFlags(0) - // The default glog flush interval is 30 seconds, which is frighteningly long. - go wait.Until(glog.Flush, *logFlushFreq, wait.NeverStop) -} - -// FlushLogs flushes logs immediately. -func FlushLogs() { - glog.Flush() -} - -// NewLogger creates a new log.Logger which sends logs to glog.Info. -func NewLogger(prefix string) *log.Logger { - return log.New(GlogWriter{}, prefix, 0) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/http.go b/vendor/k8s.io/kubernetes/pkg/util/net/http.go deleted file mode 100644 index 68073776a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/http.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - - "github.com/golang/glog" - "golang.org/x/net/http2" -) - -// IsProbableEOF returns true if the given error resembles a connection termination -// scenario that would justify assuming that the watch is empty. -// These errors are what the Go http stack returns back to us which are general -// connection closure errors (strongly correlated) and callers that need to -// differentiate probable errors in connection behavior between normal "this is -// disconnected" should use the method. -func IsProbableEOF(err error) bool { - if uerr, ok := err.(*url.Error); ok { - err = uerr.Err - } - switch { - case err == io.EOF: - return true - case err.Error() == "http: can't write HTTP request on broken connection": - return true - case strings.Contains(err.Error(), "connection reset by peer"): - return true - case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): - return true - } - return false -} - -var defaultTransport = http.DefaultTransport.(*http.Transport) - -// SetOldTransportDefaults applies the defaults from http.DefaultTransport -// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset -func SetOldTransportDefaults(t *http.Transport) *http.Transport { - if t.Proxy == nil || isDefault(t.Proxy) { - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) - } - if t.Dial == nil { - t.Dial = defaultTransport.Dial - } - if t.TLSHandshakeTimeout == 0 { - t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout - } - return t -} - -// SetTransportDefaults applies the defaults from http.DefaultTransport -// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset -func SetTransportDefaults(t *http.Transport) *http.Transport { - t = SetOldTransportDefaults(t) - // Allow HTTP2 clients but default off for now - if s := os.Getenv("ENABLE_HTTP2"); len(s) > 0 { - if err := http2.ConfigureTransport(t); err != nil { - glog.Warningf("Transport failed http2 configuration: %v", err) - } - } - return t -} - -type RoundTripperWrapper interface { - http.RoundTripper - WrappedRoundTripper() http.RoundTripper -} - -type DialFunc func(net, addr string) (net.Conn, error) - -func Dialer(transport http.RoundTripper) (DialFunc, error) { - if transport == nil { - return nil, nil - } - - switch transport := transport.(type) { - case *http.Transport: - return transport.Dial, nil - case RoundTripperWrapper: - return Dialer(transport.WrappedRoundTripper()) - default: - return nil, fmt.Errorf("unknown transport type: %v", transport) - } -} - -func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { - if transport == nil { - return nil, nil - } - - switch transport := transport.(type) { - case *http.Transport: - return transport.TLSClientConfig, nil - case RoundTripperWrapper: - return TLSClientConfig(transport.WrappedRoundTripper()) - default: - return nil, fmt.Errorf("unknown transport type: %v", transport) - } -} - -func FormatURL(scheme string, host string, port int, path string) *url.URL { - return &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(host, strconv.Itoa(port)), - Path: path, - } -} - -func GetHTTPClient(req *http.Request) string { - if userAgent, ok := req.Header["User-Agent"]; ok { - if len(userAgent) > 0 { - return userAgent[0] - } - } - return "unknown" -} - -// Extracts and returns the clients IP from the given request. -// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. -// Returns nil if none of them are set or is set to an invalid value. -func GetClientIP(req *http.Request) net.IP { - hdr := req.Header - // First check the X-Forwarded-For header for requests via proxy. - hdrForwardedFor := hdr.Get("X-Forwarded-For") - if hdrForwardedFor != "" { - // X-Forwarded-For can be a csv of IPs in case of multiple proxies. - // Use the first valid one. - parts := strings.Split(hdrForwardedFor, ",") - for _, part := range parts { - ip := net.ParseIP(strings.TrimSpace(part)) - if ip != nil { - return ip - } - } - } - - // Try the X-Real-Ip header. - hdrRealIp := hdr.Get("X-Real-Ip") - if hdrRealIp != "" { - ip := net.ParseIP(hdrRealIp) - if ip != nil { - return ip - } - } - - // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. - // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err == nil { - return net.ParseIP(host) - } - - // Fallback if Remote Address was just IP. - return net.ParseIP(req.RemoteAddr) -} - -var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) - -// isDefault checks to see if the transportProxierFunc is pointing to the default one -func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { - transportProxierPointer := fmt.Sprintf("%p", transportProxier) - return transportProxierPointer == defaultProxyFuncPointer -} - -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/interface.go b/vendor/k8s.io/kubernetes/pkg/util/net/interface.go deleted file mode 100644 index cdf5ddb54..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/interface.go +++ /dev/null @@ -1,278 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - - "strings" - - "github.com/golang/glog" -) - -type Route struct { - Interface string - Destination net.IP - Gateway net.IP - // TODO: add more fields here if needed -} - -func getRoutes(input io.Reader) ([]Route, error) { - routes := []Route{} - if input == nil { - return nil, fmt.Errorf("input is nil") - } - scanner := bufio.NewReader(input) - for { - line, err := scanner.ReadString('\n') - if err == io.EOF { - break - } - //ignore the headers in the route info - if strings.HasPrefix(line, "Iface") { - continue - } - fields := strings.Fields(line) - routes = append(routes, Route{}) - route := &routes[len(routes)-1] - route.Interface = fields[0] - ip, err := parseIP(fields[1]) - if err != nil { - return nil, err - } - route.Destination = ip - ip, err = parseIP(fields[2]) - if err != nil { - return nil, err - } - route.Gateway = ip - } - return routes, nil -} - -func parseIP(str string) (net.IP, error) { - if str == "" { - return nil, fmt.Errorf("input is nil") - } - bytes, err := hex.DecodeString(str) - if err != nil { - return nil, err - } - //TODO add ipv6 support - if len(bytes) != net.IPv4len { - return nil, fmt.Errorf("only IPv4 is supported") - } - bytes[0], bytes[1], bytes[2], bytes[3] = bytes[3], bytes[2], bytes[1], bytes[0] - return net.IP(bytes), nil -} - -func isInterfaceUp(intf *net.Interface) bool { - if intf == nil { - return false - } - if intf.Flags&net.FlagUp != 0 { - glog.V(4).Infof("Interface %v is up", intf.Name) - return true - } - return false -} - -//getFinalIP method receives all the IP addrs of a Interface -//and returns a nil if the address is Loopback, Ipv6, link-local or nil. -//It returns a valid IPv4 if an Ipv4 address is found in the array. -func getFinalIP(addrs []net.Addr) (net.IP, error) { - if len(addrs) > 0 { - for i := range addrs { - glog.V(4).Infof("Checking addr %s.", addrs[i].String()) - ip, _, err := net.ParseCIDR(addrs[i].String()) - if err != nil { - return nil, err - } - //Only IPv4 - //TODO : add IPv6 support - if ip.To4() != nil { - if !ip.IsLoopback() && !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { - glog.V(4).Infof("IP found %v", ip) - return ip, nil - } else { - glog.V(4).Infof("Loopback/link-local found %v", ip) - } - } else { - glog.V(4).Infof("%v is not a valid IPv4 address", ip) - } - - } - } - return nil, nil -} - -func getIPFromInterface(intfName string, nw networkInterfacer) (net.IP, error) { - intf, err := nw.InterfaceByName(intfName) - if err != nil { - return nil, err - } - if isInterfaceUp(intf) { - addrs, err := nw.Addrs(intf) - if err != nil { - return nil, err - } - glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) - finalIP, err := getFinalIP(addrs) - if err != nil { - return nil, err - } - if finalIP != nil { - glog.V(4).Infof("valid IPv4 address for interface %q found as %v.", intfName, finalIP) - return finalIP, nil - } - } - - return nil, nil -} - -func flagsSet(flags net.Flags, test net.Flags) bool { - return flags&test != 0 -} - -func flagsClear(flags net.Flags, test net.Flags) bool { - return flags&test == 0 -} - -func chooseHostInterfaceNativeGo() (net.IP, error) { - intfs, err := net.Interfaces() - if err != nil { - return nil, err - } - i := 0 - var ip net.IP - for i = range intfs { - if flagsSet(intfs[i].Flags, net.FlagUp) && flagsClear(intfs[i].Flags, net.FlagLoopback|net.FlagPointToPoint) { - addrs, err := intfs[i].Addrs() - if err != nil { - return nil, err - } - if len(addrs) > 0 { - for _, addr := range addrs { - if addrIP, _, err := net.ParseCIDR(addr.String()); err == nil { - if addrIP.To4() != nil { - ip = addrIP.To4() - if !ip.IsLinkLocalMulticast() && !ip.IsLinkLocalUnicast() { - break - } - } - } - } - if ip != nil { - // This interface should suffice. - break - } - } - } - } - if ip == nil { - return nil, fmt.Errorf("no acceptable interface from host") - } - glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) - return ip, nil -} - -//ChooseHostInterface is a method used fetch an IP for a daemon. -//It uses data from /proc/net/route file. -//For a node with no internet connection ,it returns error -//For a multi n/w interface node it returns the IP of the interface with gateway on it. -func ChooseHostInterface() (net.IP, error) { - inFile, err := os.Open("/proc/net/route") - if err != nil { - if os.IsNotExist(err) { - return chooseHostInterfaceNativeGo() - } - return nil, err - } - defer inFile.Close() - var nw networkInterfacer = networkInterface{} - return chooseHostInterfaceFromRoute(inFile, nw) -} - -type networkInterfacer interface { - InterfaceByName(intfName string) (*net.Interface, error) - Addrs(intf *net.Interface) ([]net.Addr, error) -} - -type networkInterface struct{} - -func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - intf, err := net.InterfaceByName(intfName) - if err != nil { - return nil, err - } - return intf, nil -} - -func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - return addrs, nil -} - -func chooseHostInterfaceFromRoute(inFile io.Reader, nw networkInterfacer) (net.IP, error) { - routes, err := getRoutes(inFile) - if err != nil { - return nil, err - } - zero := net.IP{0, 0, 0, 0} - var finalIP net.IP - for i := range routes { - //find interface with gateway - if routes[i].Destination.Equal(zero) { - glog.V(4).Infof("Default route transits interface %q", routes[i].Interface) - finalIP, err := getIPFromInterface(routes[i].Interface, nw) - if err != nil { - return nil, err - } - if finalIP != nil { - glog.V(4).Infof("Choosing IP %v ", finalIP) - return finalIP, nil - } - } - } - glog.V(4).Infof("No valid IP found") - if finalIP == nil { - return nil, fmt.Errorf("Unable to select an IP.") - } - return nil, nil -} - -// If bind-address is usable, return it directly -// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default -// interface. -func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { - if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { - hostIP, err := ChooseHostInterface() - if err != nil { - return nil, err - } - bindAddress = hostIP - } - return bindAddress, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md b/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md deleted file mode 100644 index b0f238a26..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/sets/README.md +++ /dev/null @@ -1,17 +0,0 @@ -This package contains hand-coded set implementations that should be similar to -the autogenerated ones in `pkg/util/sets`. - -We can't simply use net.IPNet as a map-key in Go (because it contains a -`[]byte`). - -We could use the same workaround we use here (a string representation as the -key) to autogenerate sets. If we do that, or decide on an alternate approach, -we should replace the implementations in this package with the autogenerated -versions. - -It is expected that callers will alias this import as `netsets` -i.e. `import netsets "k8s.io/kubernetes/pkg/util/net/sets"` - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/pkg/util/net/sets/README.md?pixel)]() diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go b/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go deleted file mode 100644 index db117f63e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sets - -import ( - "net" - "strings" -) - -type IPNet map[string]*net.IPNet - -func ParseIPNets(specs ...string) (IPNet, error) { - ipnetset := make(IPNet) - for _, spec := range specs { - spec = strings.TrimSpace(spec) - _, ipnet, err := net.ParseCIDR(spec) - if err != nil { - return nil, err - } - k := ipnet.String() // In case of normalization - ipnetset[k] = ipnet - } - return ipnetset, nil -} - -// Insert adds items to the set. -func (s IPNet) Insert(items ...*net.IPNet) { - for _, item := range items { - s[item.String()] = item - } -} - -// Delete removes all items from the set. -func (s IPNet) Delete(items ...*net.IPNet) { - for _, item := range items { - delete(s, item.String()) - } -} - -// Has returns true if and only if item is contained in the set. -func (s IPNet) Has(item *net.IPNet) bool { - _, contained := s[item.String()] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s IPNet) HasAll(items ...*net.IPNet) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s IPNet) Difference(s2 IPNet) IPNet { - result := make(IPNet) - for k, i := range s { - _, found := s2[k] - if found { - continue - } - result[k] = i - } - return result -} - -// StringSlice returns a []string with the String representation of each element in the set. -// Order is undefined. -func (s IPNet) StringSlice() []string { - a := make([]string, 0, len(s)) - for k := range s { - a = append(a, k) - } - return a -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 IPNet) IsSuperset(s2 IPNet) bool { - for k := range s2 { - _, found := s1[k] - if !found { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 IPNet) Equal(s2 IPNet) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -// Len returns the size of the set. -func (s IPNet) Len() int { - return len(s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/util.go b/vendor/k8s.io/kubernetes/pkg/util/net/util.go deleted file mode 100644 index 92d5d0b98..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/net/util.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "net" - "reflect" -) - -// IPNetEqual checks if the two input IPNets are representing the same subnet. -// For example, -// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. -// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. -func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { - if ipnet1 == nil || ipnet2 == nil { - return false - } - if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { - return true - } - return false -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go b/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go deleted file mode 100644 index 3bad7d0b7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/pod/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package pod provides utilities to work with Kubernetes pod and pod templates. -package pod diff --git a/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go b/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go deleted file mode 100644 index 9c57aaebb..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/pod/pod.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package pod - -import ( - "fmt" - "hash/adler32" - "time" - - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" - errorsutil "k8s.io/kubernetes/pkg/util/errors" - hashutil "k8s.io/kubernetes/pkg/util/hash" - "k8s.io/kubernetes/pkg/util/wait" -) - -func GetPodTemplateSpecHash(template api.PodTemplateSpec) uint32 { - podTemplateSpecHasher := adler32.New() - hashutil.DeepHashObject(podTemplateSpecHasher, template) - return podTemplateSpecHasher.Sum32() -} - -// TODO: use client library instead when it starts to support update retries -// see https://github.com/kubernetes/kubernetes/issues/21479 -type updatePodFunc func(pod *api.Pod) error - -// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored. -// The returned bool value can be used to tell if the pod is actually updated. -func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, bool, error) { - var err error - var podUpdated bool - oldPod := pod - if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - pod, err = podClient.Get(oldPod.Name) - if err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - if err = applyUpdate(pod); err != nil { - return false, err - } - if pod, err = podClient.Update(pod); err == nil { - // Update successful. - return true, nil - } - // TODO: don't retry on perm-failed errors and handle them gracefully - // Update could have failed due to conflict error. Try again. - return false, nil - }); err == nil { - // When there's no error, we've updated this pod. - podUpdated = true - } - - // Handle returned error from wait poll - if err == wait.ErrWaitTimeout { - err = fmt.Errorf("timed out trying to update pod: %+v", oldPod) - } - // Ignore the pod not found error, but the pod isn't updated. - if errors.IsNotFound(err) { - glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) - err = nil - } - // Ignore the precondition violated error, but the pod isn't updated. - if err == errorsutil.ErrPreconditionViolated { - glog.V(4).Infof("%s %s/%s precondition doesn't hold, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name) - err = nil - } - - // If the error is non-nil the returned pod cannot be trusted; if podUpdated is false, the pod isn't updated; - // if the error is nil and podUpdated is true, the returned pod contains the applied update. - return pod, podUpdated, err -} - -// Filter uses the input function f to filter the given pod list, and return the filtered pods -func Filter(podList *api.PodList, f func(api.Pod) bool) []api.Pod { - pods := make([]api.Pod, 0) - for _, p := range podList.Items { - if f(p) { - pods = append(pods, p) - } - } - return pods -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go b/vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go deleted file mode 100644 index e5dd26517..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/replicaset/replicaset.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package replicaset - -import ( - "fmt" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" - unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned" - "k8s.io/kubernetes/pkg/labels" - errorsutil "k8s.io/kubernetes/pkg/util/errors" - labelsutil "k8s.io/kubernetes/pkg/util/labels" - podutil "k8s.io/kubernetes/pkg/util/pod" - "k8s.io/kubernetes/pkg/util/wait" -) - -// TODO: use client library instead when it starts to support update retries -// see https://github.com/kubernetes/kubernetes/issues/21479 -type updateRSFunc func(rs *extensions.ReplicaSet) error - -// UpdateRSWithRetries updates a RS with given applyUpdate function. Note that RS not found error is ignored. -// The returned bool value can be used to tell if the RS is actually updated. -func UpdateRSWithRetries(rsClient unversionedextensions.ReplicaSetInterface, rs *extensions.ReplicaSet, applyUpdate updateRSFunc) (*extensions.ReplicaSet, bool, error) { - var err error - var rsUpdated bool - oldRs := rs - if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) { - rs, err = rsClient.Get(oldRs.Name) - if err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - if err = applyUpdate(rs); err != nil { - return false, err - } - if rs, err = rsClient.Update(rs); err == nil { - // Update successful. - return true, nil - } - // TODO: don't retry on perm-failed errors and handle them gracefully - // Update could have failed due to conflict error. Try again. - return false, nil - }); err == nil { - // When there's no error, we've updated this RS. - rsUpdated = true - } - - // Handle returned error from wait poll - if err == wait.ErrWaitTimeout { - err = fmt.Errorf("timed out trying to update RS: %+v", oldRs) - } - // Ignore the RS not found error, but the RS isn't updated. - if errors.IsNotFound(err) { - glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldRs.Kind, oldRs.Namespace, oldRs.Name) - err = nil - } - // Ignore the precondition violated error, but the RS isn't updated. - if err == errorsutil.ErrPreconditionViolated { - glog.V(4).Infof("%s %s/%s precondition doesn't hold, skip updating it.", oldRs.Kind, oldRs.Namespace, oldRs.Name) - err = nil - } - - // If the error is non-nil the returned RS cannot be trusted; if rsUpdated is false, the contoller isn't updated; - // if the error is nil and rsUpdated is true, the returned RS contains the applied update. - return rs, rsUpdated, err -} - -// GetPodTemplateSpecHash returns the pod template hash of a ReplicaSet's pod template space -func GetPodTemplateSpecHash(rs extensions.ReplicaSet) string { - meta := rs.Spec.Template.ObjectMeta - meta.Labels = labelsutil.CloneAndRemoveLabel(meta.Labels, extensions.DefaultDeploymentUniqueLabelKey) - return fmt.Sprintf("%d", podutil.GetPodTemplateSpecHash(api.PodTemplateSpec{ - ObjectMeta: meta, - Spec: rs.Spec.Template.Spec, - })) -} - -// MatchingPodsFunc returns a filter function for pods with matching labels -func MatchingPodsFunc(rs *extensions.ReplicaSet) (func(api.Pod) bool, error) { - if rs == nil { - return nil, nil - } - selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - return func(pod api.Pod) bool { - podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) - return selector.Matches(podLabelsSelector) - }, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go b/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go deleted file mode 100644 index 8d166045e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/resource_container_linux.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build linux - -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "os" - "syscall" - - "github.com/opencontainers/runc/libcontainer/cgroups/fs" - "github.com/opencontainers/runc/libcontainer/configs" -) - -// Creates resource-only containerName if it does not already exist and moves -// the current process to it. -// -// containerName must be an absolute container name. -func RunInResourceContainer(containerName string) error { - manager := fs.Manager{ - Cgroups: &configs.Cgroup{ - Parent: "/", - Name: containerName, - Resources: &configs.Resources{ - AllowAllDevices: true, - }, - }, - } - - return manager.Apply(os.Getpid()) -} - -func ApplyRLimitForSelf(maxOpenFiles uint64) { - syscall.Setrlimit(syscall.RLIMIT_NOFILE, &syscall.Rlimit{Max: maxOpenFiles, Cur: maxOpenFiles}) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go deleted file mode 100644 index a8ee51927..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/resource_container_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !linux - -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "errors" -) - -func RunInResourceContainer(containerName string) error { - return errors.New("resource-only containers unsupported in this platform") -} - -func ApplyRLimitForSelf(maxOpenFiles uint64) error { - return errors.New("SetRLimit unsupported in this platform") -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/runner.go b/vendor/k8s.io/kubernetes/pkg/util/runner.go deleted file mode 100644 index 60645cf0e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/runner.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" -) - -// Runner is an abstraction to make it easy to start and stop groups of things that can be -// described by a single function which waits on a channel close to exit. -type Runner struct { - lock sync.Mutex - loopFuncs []func(stop chan struct{}) - stop *chan struct{} -} - -// NewRunner makes a runner for the given function(s). The function(s) should loop until -// the channel is closed. -func NewRunner(f ...func(stop chan struct{})) *Runner { - return &Runner{loopFuncs: f} -} - -// Start begins running. -func (r *Runner) Start() { - r.lock.Lock() - defer r.lock.Unlock() - if r.stop == nil { - c := make(chan struct{}) - r.stop = &c - for i := range r.loopFuncs { - go r.loopFuncs[i](*r.stop) - } - } -} - -// Stop stops running. -func (r *Runner) Stop() { - r.lock.Lock() - defer r.lock.Unlock() - if r.stop != nil { - close(*r.stop) - r.stop = nil - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go b/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go deleted file mode 100644 index 641846a2d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/runtime/runtime.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "runtime" - - "github.com/golang/glog" -) - -// For testing, bypass HandleCrash. -var ReallyCrash bool - -// PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} - -//TODO search the public functions -// HandleCrash simply catches a crash and logs an error. Meant to be called via defer. -// Additional context-specific handlers can be provided, and will be called in case of panic -func HandleCrash(additionalHandlers ...func(interface{})) { - if ReallyCrash { - return - } - if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - } -} - -// logPanic logs the caller tree when a panic occurs. -func logPanic(r interface{}) { - callers := getCallers(r) - glog.Errorf("Recovered from panic: %#v (%v)\n%v", r, r, callers) -} - -func getCallers(r interface{}) string { - callers := "" - for i := 0; true; i++ { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - callers = callers + fmt.Sprintf("%v:%v\n", file, line) - } - - return callers -} - -// ErrorHandlers is a list of functions which will be invoked when an unreturnable -// error occurs. -var ErrorHandlers = []func(error){logError} - -// HandlerError is a method to invoke when a non-user facing piece of code cannot -// return an error and needs to indicate it has been ignored. Invoking this method -// is preferable to logging the error - the default behavior is to log but the -// errors may be sent to a remote server for analysis. -func HandleError(err error) { - // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead - if err == nil { - return - } - - for _, fn := range ErrorHandlers { - fn(err) - } -} - -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - glog.ErrorDepth(2, err) -} - -// GetCaller returns the caller of the function that calls it. -func GetCaller() string { - var pc [1]uintptr - runtime.Callers(3, pc[:]) - f := runtime.FuncForPC(pc[0]) - if f == nil { - return fmt.Sprintf("Unable to find caller") - } - return f.Name() -} - -// RecoverFromPanic replaces the specified error with an error containing the -// original error, and the call tree when a panic occurs. This enables error -// handlers to handle errors and panics the same way. -func RecoverFromPanic(err *error) { - if r := recover(); r != nil { - callers := getCallers(r) - - *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%v", - r, - *err, - callers) - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go b/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go deleted file mode 100644 index a27cb62f5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/sets/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by set-gen. Do not edit it manually! - -// Package sets has auto-generated set types. -package sets diff --git a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go b/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go deleted file mode 100644 index f32dbabcf..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/slice/slice.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package slice provides utility methods for common operations on slices. -package slice - -import ( - "sort" - - utilrand "k8s.io/kubernetes/pkg/util/rand" -) - -// CopyStrings copies the contents of the specified string slice -// into a new slice. -func CopyStrings(s []string) []string { - c := make([]string, len(s)) - copy(c, s) - return c -} - -// SortStrings sorts the specified string slice in place. It returns the same -// slice that was provided in order to facilitate method chaining. -func SortStrings(s []string) []string { - sort.Strings(s) - return s -} - -// ShuffleStrings copies strings from the specified slice into a copy in random -// order. It returns a new slice. -func ShuffleStrings(s []string) []string { - shuffled := make([]string, len(s)) - perm := utilrand.Perm(len(s)) - for i, j := range perm { - shuffled[j] = s[i] - } - return shuffled -} - -// Int64Slice attaches the methods of Interface to []int64, -// sorting in increasing order. -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// Sorts []int64 in increasing order -func SortInts64(a []int64) { sort.Sort(Int64Slice(a)) } diff --git a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go deleted file mode 100644 index 676713bc8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/strategicpatch/patch.go +++ /dev/null @@ -1,1243 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "fmt" - "reflect" - "sort" - - "k8s.io/kubernetes/pkg/util/json" - forkedjson "k8s.io/kubernetes/third_party/forked/json" - - "github.com/davecgh/go-spew/spew" - "github.com/ghodss/yaml" -) - -// An alternate implementation of JSON Merge Patch -// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate -// certain fields with metadata that indicates whether the elements of JSON -// lists should be merged or replaced. -// -// For more information, see the PATCH section of docs/devel/api-conventions.md. -// -// Some of the content of this package was borrowed with minor adaptations from -// evanphx/json-patch and openshift/origin. - -const ( - directiveMarker = "$patch" - deleteDirective = "delete" - replaceDirective = "replace" - mergeDirective = "merge" -) - -// IsPreconditionFailed returns true if the provided error indicates -// a precondition failed. -func IsPreconditionFailed(err error) bool { - _, ok := err.(errPreconditionFailed) - return ok -} - -type errPreconditionFailed struct { - message string -} - -func newErrPreconditionFailed(target map[string]interface{}) errPreconditionFailed { - s := fmt.Sprintf("precondition failed for: %v", target) - return errPreconditionFailed{s} -} - -func (err errPreconditionFailed) Error() string { - return err.message -} - -type errConflict struct { - message string -} - -func newErrConflict(patch, current string) errConflict { - s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) - return errConflict{s} -} - -func (err errConflict) Error() string { - return err.message -} - -// IsConflict returns true if the provided error indicates -// a conflict between the patch and the current configuration. -func IsConflict(err error) bool { - _, ok := err.(errConflict) - return ok -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON document") -var errNoListOfLists = fmt.Errorf("Lists of lists are not supported") - -// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. -// Instead of defining a Delta that holds an original, a patch and a set of preconditions, -// the reconcile method accepts a set of preconditions as an argument. - -// PreconditionFunc asserts that an incompatible change is not present within a patch. -type PreconditionFunc func(interface{}) bool - -// RequireKeyUnchanged returns a precondition function that fails if the provided key -// is present in the patch (indicating that its value has changed). -func RequireKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - - // The presence of key means that its value has been changed, so the test fails. - _, ok = patchMap[key] - return !ok - } -} - -// Deprecated: Use the synonym CreateTwoWayMergePatch, instead. -func CreateStrategicMergePatch(original, modified []byte, dataStruct interface{}) ([]byte, error) { - return CreateTwoWayMergePatch(original, modified, dataStruct) -} - -// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original -// document and a modified document, which are passed to the method as json encoded content. It will -// return a patch that yields the modified document when applied to the original document, or an error -// if either of the two documents is invalid. -func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, errBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, errBadJSONDoc - } - } - - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - - patchMap, err := diffMaps(originalMap, modifiedMap, t, false, false) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, newErrPreconditionFailed(patchMap) - } - } - - return json.Marshal(patchMap) -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original. -func diffMaps(original, modified map[string]interface{}, t reflect.Type, ignoreChangesAndAdditions, ignoreDeletions bool) (map[string]interface{}, error) { - patch := map[string]interface{}{} - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - for key, modifiedValue := range modified { - originalValue, ok := original[key] - if !ok { - // Key was added, so add to patch - if !ignoreChangesAndAdditions { - patch[key] = modifiedValue - } - - continue - } - - if key == directiveMarker { - originalString, ok := originalValue.(string) - if !ok { - return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - - modifiedString, ok := modifiedValue.(string) - if !ok { - return nil, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - - if modifiedString != originalString { - patch[directiveMarker] = modifiedValue - } - - continue - } - - if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { - // Types have changed, so add to patch - if !ignoreChangesAndAdditions { - patch[key] = modifiedValue - } - - continue - } - - // Types are the same, so compare values - switch originalValueTyped := originalValue.(type) { - case map[string]interface{}: - modifiedValueTyped := modifiedValue.(map[string]interface{}) - fieldType, _, _, err := forkedjson.LookupPatchMetadata(t, key) - if err != nil { - return nil, err - } - - patchValue, err := diffMaps(originalValueTyped, modifiedValueTyped, fieldType, ignoreChangesAndAdditions, ignoreDeletions) - if err != nil { - return nil, err - } - - if len(patchValue) > 0 { - patch[key] = patchValue - } - - continue - case []interface{}: - modifiedValueTyped := modifiedValue.([]interface{}) - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) - if err != nil { - return nil, err - } - - if fieldPatchStrategy == mergeDirective { - patchValue, err := diffLists(originalValueTyped, modifiedValueTyped, fieldType.Elem(), fieldPatchMergeKey, ignoreChangesAndAdditions, ignoreDeletions) - if err != nil { - return nil, err - } - - if len(patchValue) > 0 { - patch[key] = patchValue - } - - continue - } - } - - if !ignoreChangesAndAdditions { - if !reflect.DeepEqual(originalValue, modifiedValue) { - // Values are different, so add to patch - patch[key] = modifiedValue - } - } - } - - if !ignoreDeletions { - // Add nils for deleted values - for key := range original { - _, found := modified[key] - if !found { - patch[key] = nil - } - } - } - - return patch, nil -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original, -// for a pair of lists with merge semantics. -func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { - if len(original) == 0 { - if len(modified) == 0 || ignoreChangesAndAdditions { - return nil, nil - } - - return modified, nil - } - - elementType, err := sliceElementType(original, modified) - if err != nil { - return nil, err - } - - var patch []interface{} - - if elementType.Kind() == reflect.Map { - patch, err = diffListsOfMaps(original, modified, t, mergeKey, ignoreChangesAndAdditions, ignoreDeletions) - } else if !ignoreChangesAndAdditions { - patch, err = diffListsOfScalars(original, modified) - } - - if err != nil { - return nil, err - } - - return patch, nil -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original, -// for a pair of lists of scalars with merge semantics. -func diffListsOfScalars(original, modified []interface{}) ([]interface{}, error) { - if len(modified) == 0 { - // There is no need to check the length of original because there is no way to create - // a patch that deletes a scalar from a list of scalars with merge semantics. - return nil, nil - } - - patch := []interface{}{} - - originalScalars := uniqifyAndSortScalars(original) - modifiedScalars := uniqifyAndSortScalars(modified) - originalIndex, modifiedIndex := 0, 0 - -loopB: - for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { - for ; originalIndex < len(originalScalars); originalIndex++ { - originalString := fmt.Sprintf("%v", original[originalIndex]) - modifiedString := fmt.Sprintf("%v", modified[modifiedIndex]) - if originalString >= modifiedString { - if originalString != modifiedString { - patch = append(patch, modified[modifiedIndex]) - } - - continue loopB - } - // There is no else clause because there is no way to create a patch that deletes - // a scalar from a list of scalars with merge semantics. - } - - break - } - - // Add any remaining items found only in modified - for ; modifiedIndex < len(modifiedScalars); modifiedIndex++ { - patch = append(patch, modified[modifiedIndex]) - } - - return patch, nil -} - -var errNoMergeKeyFmt = "map: %v does not contain declared merge key: %s" -var errBadArgTypeFmt = "expected a %s, but received a %s" - -// Returns a (recursive) strategic merge patch that yields modified when applied to original, -// for a pair of lists of maps with merge semantics. -func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, ignoreChangesAndAdditions, ignoreDeletions bool) ([]interface{}, error) { - patch := make([]interface{}, 0) - - originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) - if err != nil { - return nil, err - } - - modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) - if err != nil { - return nil, err - } - - originalIndex, modifiedIndex := 0, 0 - -loopB: - for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { - modifiedMap, ok := modifiedSorted[modifiedIndex].(map[string]interface{}) - if !ok { - t := reflect.TypeOf(modifiedSorted[modifiedIndex]) - return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) - } - - modifiedValue, ok := modifiedMap[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, modifiedMap, mergeKey) - } - - for ; originalIndex < len(originalSorted); originalIndex++ { - originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) - if !ok { - t := reflect.TypeOf(originalSorted[originalIndex]) - return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) - } - - originalValue, ok := originalMap[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) - } - - // Assume that the merge key values are comparable strings - originalString := fmt.Sprintf("%v", originalValue) - modifiedString := fmt.Sprintf("%v", modifiedValue) - if originalString >= modifiedString { - if originalString == modifiedString { - // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalMap, modifiedMap, t, ignoreChangesAndAdditions, ignoreDeletions) - if err != nil { - return nil, err - } - - originalIndex++ - if len(patchValue) > 0 { - patchValue[mergeKey] = modifiedValue - patch = append(patch, patchValue) - } - } else if !ignoreChangesAndAdditions { - // Item was added, so add to patch - patch = append(patch, modifiedMap) - } - - continue loopB - } - - if !ignoreDeletions { - // Item was deleted, so add delete directive - patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) - } - } - - break - } - - if !ignoreDeletions { - // Delete any remaining items found only in original - for ; originalIndex < len(originalSorted); originalIndex++ { - originalMap, ok := originalSorted[originalIndex].(map[string]interface{}) - if !ok { - t := reflect.TypeOf(originalSorted[originalIndex]) - return nil, fmt.Errorf(errBadArgTypeFmt, "map[string]interface{}", t.Kind().String()) - } - - originalValue, ok := originalMap[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, originalMap, mergeKey) - } - - patch = append(patch, map[string]interface{}{mergeKey: originalValue, directiveMarker: deleteDirective}) - } - } - - if !ignoreChangesAndAdditions { - // Add any remaining items found only in modified - for ; modifiedIndex < len(modifiedSorted); modifiedIndex++ { - patch = append(patch, modifiedSorted[modifiedIndex]) - } - } - - return patch, nil -} - -// Deprecated: StrategicMergePatchData is deprecated. Use the synonym StrategicMergePatch, -// instead, which follows the naming convention of evanphx/json-patch. -func StrategicMergePatchData(original, patch []byte, dataStruct interface{}) ([]byte, error) { - return StrategicMergePatch(original, patch, dataStruct) -} - -// StrategicMergePatch applies a strategic merge patch. The patch and the original document -// must be json encoded content. A patch can be created from an original and a modified document -// by calling CreateStrategicMergePatch. -func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { - if original == nil { - original = []byte("{}") - } - - if patch == nil { - patch = []byte("{}") - } - - originalMap := map[string]interface{}{} - err := json.Unmarshal(original, &originalMap) - if err != nil { - return nil, errBadJSONDoc - } - - patchMap := map[string]interface{}{} - err = json.Unmarshal(patch, &patchMap) - if err != nil { - return nil, errBadJSONDoc - } - - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - - result, err := mergeMap(originalMap, patchMap, t) - if err != nil { - return nil, err - } - - return json.Marshal(result) -} - -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, fmt.Errorf(errBadArgTypeFmt, "struct", "nil") - } - - t := reflect.TypeOf(dataStruct) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf(errBadArgTypeFmt, "struct", t.Kind().String()) - } - - return t, nil -} - -var errBadPatchTypeFmt = "unknown patch type: %s in map: %v" - -// Merge fields from a patch map into the original map. Note: This may modify -// both the original map and the patch because getting a deep copy of a map in -// golang is highly non-trivial. -func mergeMap(original, patch map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { - if v, ok := patch[directiveMarker]; ok { - if v == replaceDirective { - // If the patch contains "$patch: replace", don't merge it, just use the - // patch directly. Later on, we can add a single level replace that only - // affects the map that the $patch is in. - delete(patch, directiveMarker) - return patch, nil - } - - if v == deleteDirective { - // If the patch contains "$patch: delete", don't merge it, just return - // an empty map. - return map[string]interface{}{}, nil - } - - return nil, fmt.Errorf(errBadPatchTypeFmt, v, patch) - } - - // nil is an accepted value for original to simplify logic in other places. - // If original is nil, replace it with an empty map and then apply the patch. - if original == nil { - original = map[string]interface{}{} - } - - // Start merging the patch into the original. - for k, patchV := range patch { - // If the value of this key is null, delete the key if it exists in the - // original. Otherwise, skip it. - if patchV == nil { - if _, ok := original[k]; ok { - delete(original, k) - } - - continue - } - - _, ok := original[k] - if !ok { - // If it's not in the original document, just take the patch value. - original[k] = patchV - continue - } - - // If the data type is a pointer, resolve the element. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - // If they're both maps or lists, recurse into the value. - originalType := reflect.TypeOf(original[k]) - patchType := reflect.TypeOf(patchV) - if originalType == patchType { - // First find the fieldPatchStrategy and fieldPatchMergeKey. - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - - if originalType.Kind() == reflect.Map && fieldPatchStrategy != replaceDirective { - typedOriginal := original[k].(map[string]interface{}) - typedPatch := patchV.(map[string]interface{}) - var err error - original[k], err = mergeMap(typedOriginal, typedPatch, fieldType) - if err != nil { - return nil, err - } - - continue - } - - if originalType.Kind() == reflect.Slice && fieldPatchStrategy == mergeDirective { - elemType := fieldType.Elem() - typedOriginal := original[k].([]interface{}) - typedPatch := patchV.([]interface{}) - var err error - original[k], err = mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey) - if err != nil { - return nil, err - } - - continue - } - } - - // If originalType and patchType are different OR the types are both - // maps or slices but we're just supposed to replace them, just take - // the value from patch. - original[k] = patchV - } - - return original, nil -} - -// Merge two slices together. Note: This may modify both the original slice and -// the patch because getting a deep copy of a slice in golang is highly -// non-trivial. -func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string) ([]interface{}, error) { - if len(original) == 0 && len(patch) == 0 { - return original, nil - } - - // All the values must be of the same type, but not a list. - t, err := sliceElementType(original, patch) - if err != nil { - return nil, err - } - - // If the elements are not maps, merge the slices of scalars. - if t.Kind() != reflect.Map { - // Maybe in the future add a "concat" mode that doesn't - // uniqify. - both := append(original, patch...) - return uniqifyScalars(both), nil - } - - if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) - } - - // First look for any special $patch elements. - patchWithoutSpecialElements := []interface{}{} - replace := false - for _, v := range patch { - typedV := v.(map[string]interface{}) - patchType, ok := typedV[directiveMarker] - if ok { - if patchType == deleteDirective { - mergeValue, ok := typedV[mergeKey] - if ok { - _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if found { - // Delete the element at originalKey. - original = append(original[:originalKey], original[originalKey+1:]...) - } - } else { - return nil, fmt.Errorf("delete patch type with no merge key defined") - } - } else if patchType == replaceDirective { - replace = true - // Continue iterating through the array to prune any other $patch elements. - } else if patchType == mergeDirective { - return nil, fmt.Errorf("merging lists cannot yet be specified in the patch") - } else { - return nil, fmt.Errorf(errBadPatchTypeFmt, patchType, typedV) - } - } else { - patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) - } - } - - if replace { - return patchWithoutSpecialElements, nil - } - - patch = patchWithoutSpecialElements - - // Merge patch into original. - for _, v := range patch { - // Because earlier we confirmed that all the elements are maps. - typedV := v.(map[string]interface{}) - mergeValue, ok := typedV[mergeKey] - if !ok { - return nil, fmt.Errorf(errNoMergeKeyFmt, typedV, mergeKey) - } - - // If we find a value with this merge key value in original, merge the - // maps. Otherwise append onto original. - originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if found { - var mergedMaps interface{} - var err error - // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, elemType) - if err != nil { - return nil, err - } - - original[originalKey] = mergedMaps - } else { - original = append(original, v) - } - } - - return original, nil -} - -// This method no longer panics if any element of the slice is not a map. -func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { - for k, v := range m { - typedV, ok := v.(map[string]interface{}) - if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) - } - - valueToMatch, ok := typedV[key] - if ok && valueToMatch == value { - return typedV, k, true, nil - } - } - - return nil, 0, false, nil -} - -// This function takes a JSON map and sorts all the lists that should be merged -// by key. This is needed by tests because in JSON, list order is significant, -// but in Strategic Merge Patch, merge lists do not have significant order. -// Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { - var m map[string]interface{} - err := json.Unmarshal(mapJSON, &m) - if err != nil { - return nil, err - } - - newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) - if err != nil { - return nil, err - } - - return json.Marshal(newM) -} - -func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { - newS := map[string]interface{}{} - for k, v := range s { - if k != directiveMarker { - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - - // If v is a map or a merge slice, recurse. - if typedV, ok := v.(map[string]interface{}); ok { - var err error - v, err = sortMergeListsByNameMap(typedV, fieldType) - if err != nil { - return nil, err - } - } else if typedV, ok := v.([]interface{}); ok { - if fieldPatchStrategy == mergeDirective { - var err error - v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) - if err != nil { - return nil, err - } - } - } - } - - newS[k] = v - } - - return newS, nil -} - -func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { - if len(s) == 0 { - return s, nil - } - - // We don't support lists of lists yet. - t, err := sliceElementType(s) - if err != nil { - return nil, err - } - - // If the elements are not maps... - if t.Kind() != reflect.Map { - // Sort the elements, because they may have been merged out of order. - return uniqifyAndSortScalars(s), nil - } - - // Elements are maps - if one of the keys of the map is a map or a - // list, we may need to recurse into it. - newS := []interface{}{} - for _, elem := range s { - if recurse { - typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, elemType) - if err != nil { - return nil, err - } - - newS = append(newS, newElem) - } else { - newS = append(newS, elem) - } - } - - // Sort the maps. - newS = sortMapsBasedOnField(newS, mergeKey) - return newS, nil -} - -func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { - mapM := mapSliceFromSlice(m) - ss := SortableSliceOfMaps{mapM, fieldName} - sort.Sort(ss) - newS := sliceFromMapSlice(ss.s) - return newS -} - -func mapSliceFromSlice(m []interface{}) []map[string]interface{} { - newM := []map[string]interface{}{} - for _, v := range m { - vt := v.(map[string]interface{}) - newM = append(newM, vt) - } - - return newM -} - -func sliceFromMapSlice(s []map[string]interface{}) []interface{} { - newS := []interface{}{} - for _, v := range s { - newS = append(newS, v) - } - - return newS -} - -type SortableSliceOfMaps struct { - s []map[string]interface{} - k string // key to sort on -} - -func (ss SortableSliceOfMaps) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfMaps) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) - jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfMaps) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -func uniqifyAndSortScalars(s []interface{}) []interface{} { - s = uniqifyScalars(s) - - ss := SortableSliceOfScalars{s} - sort.Sort(ss) - return ss.s -} - -func uniqifyScalars(s []interface{}) []interface{} { - // Clever algorithm to uniqify. - length := len(s) - 1 - for i := 0; i < length; i++ { - for j := i + 1; j <= length; j++ { - if s[i] == s[j] { - s[j] = s[length] - s = s[0:length] - length-- - j-- - } - } - } - - return s -} - -type SortableSliceOfScalars struct { - s []interface{} -} - -func (ss SortableSliceOfScalars) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfScalars) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i]) - jStr := fmt.Sprintf("%v", ss.s[j]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfScalars) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -// Returns the type of the elements of N slice(s). If the type is different, -// another slice or undefined, returns an error. -func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { - var prevType reflect.Type - for _, s := range slices { - // Go through elements of all given slices and make sure they are all the same type. - for _, v := range s { - currentType := reflect.TypeOf(v) - if prevType == nil { - prevType = currentType - // We don't support lists of lists yet. - if prevType.Kind() == reflect.Slice { - return nil, errNoListOfLists - } - } else { - if prevType != currentType { - return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) - } - prevType = currentType - } - } - } - - if prevType == nil { - return nil, fmt.Errorf("no elements in any of the given slices") - } - - return prevType, nil -} - -// HasConflicts returns true if the left and right JSON interface objects overlap with -// different values in any key. All keys are required to be strings. Since patches of the -// same Type have congruent keys, this is valid for multiple patch types. This method -// supports JSON merge patch semantics. -func HasConflicts(left, right interface{}) (bool, error) { - switch typedLeft := left.(type) { - case map[string]interface{}: - switch typedRight := right.(type) { - case map[string]interface{}: - for key, leftValue := range typedLeft { - rightValue, ok := typedRight[key] - if !ok { - return false, nil - } - return HasConflicts(leftValue, rightValue) - } - - return false, nil - default: - return true, nil - } - case []interface{}: - switch typedRight := right.(type) { - case []interface{}: - if len(typedLeft) != len(typedRight) { - return true, nil - } - - for i := range typedLeft { - return HasConflicts(typedLeft[i], typedRight[i]) - } - - return false, nil - default: - return true, nil - } - case string, float64, bool, int, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} - -// MergingMapsHaveConflicts returns true if the left and right JSON interface -// objects overlap with different values in any key. All keys are required to be -// strings. Since patches of the same Type have congruent keys, this is valid -// for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { - t, err := getTagStructType(dataStruct) - if err != nil { - return true, err - } - - return mergingMapFieldsHaveConflicts(left, right, t, "", "") -} - -func mergingMapFieldsHaveConflicts( - left, right interface{}, - fieldType reflect.Type, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - switch leftType := left.(type) { - case map[string]interface{}: - switch rightType := right.(type) { - case map[string]interface{}: - leftMarker, okLeft := leftType[directiveMarker] - rightMarker, okRight := rightType[directiveMarker] - // if one or the other has a directive marker, - // then we need to consider that before looking at the individual keys, - // since a directive operates on the whole map. - if okLeft || okRight { - // if one has a directive marker and the other doesn't, - // then we have a conflict, since one is deleting or replacing the whole map, - // and the other is doing things to individual keys. - if okLeft != okRight { - return true, nil - } - - // if they both have markers, but they are not the same directive, - // then we have a conflict because they're doing different things to the map. - if leftMarker != rightMarker { - return true, nil - } - } - - // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, fieldType) - default: - return true, nil - } - case []interface{}: - switch rightType := right.(type) { - case []interface{}: - return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) - default: - return true, nil - } - case string, float64, bool, int, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} - -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { - for key, leftValue := range typedLeft { - if key != directiveMarker { - if rightValue, ok := typedRight[key]; ok { - fieldType, fieldPatchStrategy, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) - if err != nil { - return true, err - } - - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - fieldType, fieldPatchStrategy, fieldPatchMergeKey); hasConflicts { - return true, err - } - } - } - } - - return false, nil -} - -func slicesHaveConflicts( - typedLeft, typedRight []interface{}, - fieldType reflect.Type, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - elementType, err := sliceElementType(typedLeft, typedRight) - if err != nil { - return true, err - } - - valueType := fieldType.Elem() - if fieldPatchStrategy == mergeDirective { - // Merging lists of scalars have no conflicts by definition - // So we only need to check further if the elements are maps - if elementType.Kind() != reflect.Map { - return false, nil - } - - // Build a map for each slice and then compare the two maps - leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) - if err != nil { - return true, err - } - - rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) - if err != nil { - return true, err - } - - return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) - } - - // Either we don't have type information, or these are non-merging lists - if len(typedLeft) != len(typedRight) { - return true, nil - } - - // Sort scalar slices to prevent ordering issues - // We have no way to sort non-merging lists of maps - if elementType.Kind() != reflect.Map { - typedLeft = uniqifyAndSortScalars(typedLeft) - typedRight = uniqifyAndSortScalars(typedRight) - } - - // Compare the slices element by element in order - // This test will fail if the slices are not sorted - for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { - return true, err - } - } - - return false, nil -} - -func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { - result := make(map[string]interface{}, len(slice)) - for _, value := range slice { - typedValue, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("invalid element type in merging list:%v", slice) - } - - mergeValue, ok := typedValue[mergeKey] - if !ok { - return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) - } - - result[fmt.Sprintf("%s", mergeValue)] = typedValue - } - - return result, nil -} - -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { - for key, leftValue := range typedLeft { - if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { - return true, err - } - } - } - - return false, nil -} - -// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, -// while preserving any changes or deletions made to the original configuration in the interim, -// and not overridden by the current configuration. All three documents must be passed to the -// method as json encoded content. It will return a strategic merge patch, or an error if any -// of the documents is invalid, or if there are any preconditions that fail against the modified -// configuration, or, if overwrite is false and there are conflicts between the modified and current -// configurations. Conflicts are defined as keys changed differently from original to modified -// than from original to current. In other words, a conflict occurs if modified changes any key -// in a way that is different from how it is changed in current (e.g., deleting it, changing its -// value). -func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, errBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, errBadJSONDoc - } - } - - currentMap := map[string]interface{}{} - if len(current) > 0 { - if err := json.Unmarshal(current, ¤tMap); err != nil { - return nil, errBadJSONDoc - } - } - - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - - // The patch is the difference from current to modified without deletions, plus deletions - // from original to modified. To find it, we compute deletions, which are the deletions from - // original to modified, and delta, which is the difference from current to modified without - // deletions, and then apply delta to deletions as a patch, which should be strictly additive. - deltaMap, err := diffMaps(currentMap, modifiedMap, t, false, true) - if err != nil { - return nil, err - } - - deletionsMap, err := diffMaps(originalMap, modifiedMap, t, true, false) - if err != nil { - return nil, err - } - - patchMap, err := mergeMap(deletionsMap, deltaMap, t) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, newErrPreconditionFailed(patchMap) - } - } - - // If overwrite is false, and the patch contains any keys that were changed differently, - // then return a conflict error. - if !overwrite { - changedMap, err := diffMaps(originalMap, currentMap, t, false, false) - if err != nil { - return nil, err - } - - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) - if err != nil { - return nil, err - } - - if hasConflicts { - return nil, newErrConflict(toYAMLOrError(patchMap), toYAMLOrError(changedMap)) - } - } - - return json.Marshal(patchMap) -} - -func toYAMLOrError(v interface{}) string { - y, err := toYAML(v) - if err != nil { - return err.Error() - } - - return y -} - -func toYAML(v interface{}) (string, error) { - y, err := yaml.Marshal(v) - if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) - } - - return string(y), nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/string_flag.go b/vendor/k8s.io/kubernetes/pkg/util/string_flag.go deleted file mode 100644 index 4208bf5f6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/string_flag.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. -type StringFlag struct { - // If Set has been invoked this value is true - provided bool - // The exact value provided on the flag - value string -} - -func NewStringFlag(defaultVal string) StringFlag { - return StringFlag{value: defaultVal} -} - -func (f *StringFlag) Default(value string) { - f.value = value -} - -func (f StringFlag) String() string { - return f.value -} - -func (f StringFlag) Value() string { - return f.value -} - -func (f *StringFlag) Set(value string) error { - f.value = value - f.provided = true - - return nil -} - -func (f StringFlag) Provided() bool { - return f.provided -} - -func (f *StringFlag) Type() string { - return "string" -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/trace.go b/vendor/k8s.io/kubernetes/pkg/util/trace.go deleted file mode 100644 index ed9da94b3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/trace.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "fmt" - "time" - - "github.com/golang/glog" -) - -type traceStep struct { - stepTime time.Time - msg string -} - -type Trace struct { - name string - startTime time.Time - steps []traceStep -} - -func NewTrace(name string) *Trace { - return &Trace{name, time.Now(), nil} -} - -func (t *Trace) Step(msg string) { - if t.steps == nil { - // traces almost always have less than 6 steps, do this to avoid more than a single allocation - t.steps = make([]traceStep, 0, 6) - } - t.steps = append(t.steps, traceStep{time.Now(), msg}) -} - -func (t *Trace) Log() { - endTime := time.Now() - var buffer bytes.Buffer - - buffer.WriteString(fmt.Sprintf("Trace %q (started %v):\n", t.name, t.startTime)) - lastStepTime := t.startTime - for _, step := range t.steps { - buffer.WriteString(fmt.Sprintf("[%v] [%v] %v\n", step.stepTime.Sub(t.startTime), step.stepTime.Sub(lastStepTime), step.msg)) - lastStepTime = step.stepTime - } - buffer.WriteString(fmt.Sprintf("[%v] [%v] END\n", endTime.Sub(t.startTime), endTime.Sub(lastStepTime))) - glog.Info(buffer.String()) -} - -func (t *Trace) LogIfLong(threshold time.Duration) { - if time.Since(t.startTime) >= threshold { - t.Log() - } -} - -func (t *Trace) TotalTime() time.Duration { - return time.Since(t.startTime) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/util.go b/vendor/k8s.io/kubernetes/pkg/util/util.go deleted file mode 100644 index 4826a448b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/util.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "os" - "reflect" - "regexp" -) - -// Takes a list of strings and compiles them into a list of regular expressions -func CompileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return []*regexp.Regexp{}, err - } - regexps = append(regexps, r) - } - return regexps, nil -} - -// Detects if using systemd as the init system -// Please note that simply reading /proc/1/cmdline can be misleading because -// some installation of various init programs can automatically make /sbin/init -// a symlink or even a renamed version of their main program. -// TODO(dchen1107): realiably detects the init system using on the system: -// systemd, upstart, initd, etc. -func UsingSystemdInitSystem() bool { - if _, err := os.Stat("/run/systemd/system"); err == nil { - return true - } - - return false -} - -// Tests whether all pointer fields in a struct are nil. This is useful when, -// for example, an API struct is handled by plugins which need to distinguish -// "no plugin accepted this spec" from "this spec is empty". -// -// This function is only valid for structs and pointers to structs. Any other -// type will cause a panic. Passing a typed nil pointer will return true. -func AllPtrFieldsNil(obj interface{}) bool { - v := reflect.ValueOf(obj) - if !v.IsValid() { - panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - for i := 0; i < v.NumField(); i++ { - if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { - return false - } - } - return true -} - -func FileExists(filename string) (bool, error) { - if _, err := os.Stat(filename); os.IsNotExist(err) { - return false, nil - } else if err != nil { - return false, err - } - return true, nil -} - -// borrowed from ioutil.ReadDir -// ReadDir reads the directory named by dirname and returns -// a list of directory entries, minus those with lstat errors -func ReadDirNoExit(dirname string) ([]os.FileInfo, []error, error) { - if dirname == "" { - dirname = "." - } - - f, err := os.Open(dirname) - if err != nil { - return nil, nil, err - } - defer f.Close() - - names, err := f.Readdirnames(-1) - list := make([]os.FileInfo, 0, len(names)) - errs := make([]error, 0, len(names)) - for _, filename := range names { - fip, lerr := os.Lstat(dirname + "/" + filename) - if os.IsNotExist(lerr) { - // File disappeared between readdir + stat. - // Just treat it as if it didn't exist. - continue - } - - list = append(list, fip) - errs = append(errs, lerr) - } - - return list, errs, nil -} - -// IntPtr returns a pointer to an int -func IntPtr(i int) *int { - o := i - return &o -} - -// Int32Ptr returns a pointer to an int32 -func Int32Ptr(i int32) *int32 { - o := i - return &o -} - -// IntPtrDerefOr dereference the int ptr and returns it i not nil, -// else returns def. -func IntPtrDerefOr(ptr *int, def int) int { - if ptr != nil { - return *ptr - } - return def -} - -// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, -// else returns def. -func Int32PtrDerefOr(ptr *int32, def int32) int32 { - if ptr != nil { - return *ptr - } - return def -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/uuid.go b/vendor/k8s.io/kubernetes/pkg/util/uuid.go deleted file mode 100644 index 7e1396f12..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/uuid.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - - "github.com/pborman/uuid" - "k8s.io/kubernetes/pkg/types" -) - -var uuidLock sync.Mutex -var lastUUID uuid.UUID - -func NewUUID() types.UID { - uuidLock.Lock() - defer uuidLock.Unlock() - result := uuid.NewUUID() - // The UUID package is naive and can generate identical UUIDs if the - // time interval is quick enough. - // The UUID uses 100 ns increments so it's short enough to actively - // wait for a new value. - for uuid.Equal(lastUUID, result) == true { - result = uuid.NewUUID() - } - lastUUID = result - return types.UID(result.String()) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go b/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go deleted file mode 100644 index 203f7cc8f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/field/errors.go +++ /dev/null @@ -1,228 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "encoding/json" - "fmt" - "strings" - - utilerrors "k8s.io/kubernetes/pkg/util/errors" -) - -// Error is an implementation of the 'error' interface, which represents a -// field-level validation error. -type Error struct { - Type ErrorType - Field string - BadValue interface{} - Detail string -} - -var _ error = &Error{} - -// Error implements the error interface. -func (v *Error) Error() string { - return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) -} - -// ErrorBody returns the error message without the field name. This is useful -// for building nice-looking higher-level error reporting. -func (v *Error) ErrorBody() string { - var s string - switch v.Type { - case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: - s = fmt.Sprintf("%s", v.Type) - default: - var bad string - badBytes, err := json.Marshal(v.BadValue) - if err != nil { - bad = err.Error() - } else { - bad = string(badBytes) - } - s = fmt.Sprintf("%s: %s", v.Type, bad) - } - if len(v.Detail) != 0 { - s += fmt.Sprintf(": %s", v.Detail) - } - return s -} - -// ErrorType is a machine readable value providing more detail about why -// a field is invalid. These values are expected to match 1-1 with -// CauseType in api/types.go. -type ErrorType string - -// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. -const ( - // ErrorTypeNotFound is used to report failure to find a requested value - // (e.g. looking up an ID). See NotFound(). - ErrorTypeNotFound ErrorType = "FieldValueNotFound" - // ErrorTypeRequired is used to report required values that are not - // provided (e.g. empty strings, null values, or empty arrays). See - // Required(). - ErrorTypeRequired ErrorType = "FieldValueRequired" - // ErrorTypeDuplicate is used to report collisions of values that must be - // unique (e.g. unique IDs). See Duplicate(). - ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" - // ErrorTypeInvalid is used to report malformed values (e.g. failed regex - // match, too long, out of bounds). See Invalid(). - ErrorTypeInvalid ErrorType = "FieldValueInvalid" - // ErrorTypeNotSupported is used to report unknown values for enumerated - // fields (e.g. a list of valid values). See NotSupported(). - ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" - // ErrorTypeForbidden is used to report valid (as per formatting rules) - // values which would be accepted under some conditions, but which are not - // permitted by the current conditions (such as security policy). See - // Forbidden(). - ErrorTypeForbidden ErrorType = "FieldValueForbidden" - // ErrorTypeTooLong is used to report that the given value is too long. - // This is similar to ErrorTypeInvalid, but the error will not include the - // too-long value. See TooLong(). - ErrorTypeTooLong ErrorType = "FieldValueTooLong" - // ErrorTypeInternal is used to report other errors that are not related - // to user input. See InternalError(). - ErrorTypeInternal ErrorType = "InternalError" -) - -// String converts a ErrorType into its corresponding canonical error message. -func (t ErrorType) String() string { - switch t { - case ErrorTypeNotFound: - return "Not found" - case ErrorTypeRequired: - return "Required value" - case ErrorTypeDuplicate: - return "Duplicate value" - case ErrorTypeInvalid: - return "Invalid value" - case ErrorTypeNotSupported: - return "Unsupported value" - case ErrorTypeForbidden: - return "Forbidden" - case ErrorTypeTooLong: - return "Too long" - case ErrorTypeInternal: - return "Internal error" - default: - panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) - } -} - -// NotFound returns a *Error indicating "value not found". This is -// used to report failure to find a requested value (e.g. looking up an ID). -func NotFound(field *Path, value interface{}) *Error { - return &Error{ErrorTypeNotFound, field.String(), value, ""} -} - -// Required returns a *Error indicating "value required". This is used -// to report required values that are not provided (e.g. empty strings, null -// values, or empty arrays). -func Required(field *Path, detail string) *Error { - return &Error{ErrorTypeRequired, field.String(), "", detail} -} - -// Duplicate returns a *Error indicating "duplicate value". This is -// used to report collisions of values that must be unique (e.g. names or IDs). -func Duplicate(field *Path, value interface{}) *Error { - return &Error{ErrorTypeDuplicate, field.String(), value, ""} -} - -// Invalid returns a *Error indicating "invalid value". This is used -// to report malformed values (e.g. failed regex match, too long, out of bounds). -func Invalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeInvalid, field.String(), value, detail} -} - -// NotSupported returns a *Error indicating "unsupported value". -// This is used to report unknown values for enumerated fields (e.g. a list of -// valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { - detail := "" - if validValues != nil && len(validValues) > 0 { - detail = "supported values: " + strings.Join(validValues, ", ") - } - return &Error{ErrorTypeNotSupported, field.String(), value, detail} -} - -// Forbidden returns a *Error indicating "forbidden". This is used to -// report valid (as per formatting rules) values which would be accepted under -// some conditions, but which are not permitted by current conditions (e.g. -// security policy). -func Forbidden(field *Path, detail string) *Error { - return &Error{ErrorTypeForbidden, field.String(), "", detail} -} - -// TooLong returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. -func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} -} - -// InternalError returns a *Error indicating "internal error". This is used -// to signal that an error was found that was not directly related to user -// input. The err argument must be non-nil. -func InternalError(field *Path, err error) *Error { - return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} -} - -// ErrorList holds a set of Errors. It is plausible that we might one day have -// non-field errors in this same umbrella package, but for now we don't, so -// we can keep it simple and leave ErrorList here. -type ErrorList []*Error - -// NewErrorTypeMatcher returns an errors.Matcher that returns true -// if the provided error is a Error and has the provided ErrorType. -func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { - return func(err error) bool { - if e, ok := err.(*Error); ok { - return e.Type == t - } - return false - } -} - -// ToAggregate converts the ErrorList into an errors.Aggregate. -func (list ErrorList) ToAggregate() utilerrors.Aggregate { - errs := make([]error, len(list)) - for i := range list { - errs[i] = list[i] - } - return utilerrors.NewAggregate(errs) -} - -func fromAggregate(agg utilerrors.Aggregate) ErrorList { - errs := agg.Errors() - list := make(ErrorList, len(errs)) - for i := range errs { - list[i] = errs[i].(*Error) - } - return list -} - -// Filter removes items from the ErrorList that match the provided fns. -func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { - err := utilerrors.FilterOut(list.ToAggregate(), fns...) - if err == nil { - return nil - } - // FilterOut takes an Aggregate and returns an Aggregate - return fromAggregate(err.(utilerrors.Aggregate)) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go b/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go deleted file mode 100644 index 30ff5a8f7..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/field/path.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "bytes" - "fmt" - "strconv" -) - -// Path represents the path from some root to a particular field. -type Path struct { - name string // the name of this field or "" if this is an index - index string // if name == "", this is a subscript (index or map key) of the previous element - parent *Path // nil if this is the root element -} - -// NewPath creates a root Path object. -func NewPath(name string, moreNames ...string) *Path { - r := &Path{name: name, parent: nil} - for _, anotherName := range moreNames { - r = &Path{name: anotherName, parent: r} - } - return r -} - -// Root returns the root element of this Path. -func (p *Path) Root() *Path { - for ; p.parent != nil; p = p.parent { - // Do nothing. - } - return p -} - -// Child creates a new Path that is a child of the method receiver. -func (p *Path) Child(name string, moreNames ...string) *Path { - r := NewPath(name, moreNames...) - r.Root().parent = p - return r -} - -// Index indicates that the previous Path is to be subscripted by an int. -// This sets the same underlying value as Key. -func (p *Path) Index(index int) *Path { - return &Path{index: strconv.Itoa(index), parent: p} -} - -// Key indicates that the previous Path is to be subscripted by a string. -// This sets the same underlying value as Index. -func (p *Path) Key(key string) *Path { - return &Path{index: key, parent: p} -} - -// String produces a string representation of the Path. -func (p *Path) String() string { - // make a slice to iterate - elems := []*Path{} - for ; p != nil; p = p.parent { - elems = append(elems, p) - } - - // iterate, but it has to be backwards - buf := bytes.NewBuffer(nil) - for i := range elems { - p := elems[len(elems)-1-i] - if p.parent != nil && len(p.name) > 0 { - // This is either the root or it is a subscript. - buf.WriteString(".") - } - if len(p.name) > 0 { - buf.WriteString(p.name) - } else { - fmt.Fprintf(buf, "[%s]", p.index) - } - } - return buf.String() -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go deleted file mode 100644 index 9361a4bfa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/validation/validation.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "math" - "net" - "regexp" - "strings" -) - -const qnameCharFmt string = "[A-Za-z0-9]" -const qnameExtCharFmt string = "[-A-Za-z0-9_.]" -const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt -const qualifiedNameMaxLength int = 63 - -var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") - -// IsQualifiedName tests whether the value passed is what Kubernetes calls a -// "qualified name". This is a format used in various places throughout the -// system. If the value is not valid, a list of error strings is returned. -// Otherwise an empty list (or nil) is returned. -func IsQualifiedName(value string) []string { - var errs []string - parts := strings.Split(value, "/") - var name string - switch len(parts) { - case 1: - name = parts[0] - case 2: - var prefix string - prefix, name = parts[0], parts[1] - if len(prefix) == 0 { - errs = append(errs, "prefix part "+EmptyError()) - } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { - errs = append(errs, prefixEach(msgs, "prefix part ")...) - } - default: - return append(errs, RegexError(qualifiedNameFmt, "MyName", "my.name", "123-abc")+ - " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName'") - } - - if len(name) == 0 { - errs = append(errs, "name part "+EmptyError()) - } else if len(name) > qualifiedNameMaxLength { - errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) - } - if !qualifiedNameRegexp.MatchString(name) { - errs = append(errs, "name part "+RegexError(qualifiedNameFmt, "MyName", "my.name", "123-abc")) - } - return errs -} - -const labelValueFmt string = "(" + qualifiedNameFmt + ")?" -const LabelValueMaxLength int = 63 - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// IsValidLabelValue tests whether the value passed is a valid label value. If -// the value is not valid, a list of error strings is returned. Otherwise an -// empty list (or nil) is returned. -func IsValidLabelValue(value string) []string { - var errs []string - if len(value) > LabelValueMaxLength { - errs = append(errs, MaxLenError(LabelValueMaxLength)) - } - if !labelValueRegexp.MatchString(value) { - errs = append(errs, RegexError(labelValueFmt, "MyValue", "my_value", "12345")) - } - return errs -} - -const DNS1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const DNS1123LabelMaxLength int = 63 - -var dns1123LabelRegexp = regexp.MustCompile("^" + DNS1123LabelFmt + "$") - -// IsDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func IsDNS1123Label(value string) []string { - var errs []string - if len(value) > DNS1123LabelMaxLength { - errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(DNS1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -const DNS1123SubdomainFmt string = DNS1123LabelFmt + "(\\." + DNS1123LabelFmt + ")*" -const DNS1123SubdomainMaxLength int = 253 - -var dns1123SubdomainRegexp = regexp.MustCompile("^" + DNS1123SubdomainFmt + "$") - -// IsDNS1123Subdomain tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123). -func IsDNS1123Subdomain(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !dns1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(DNS1123SubdomainFmt, "example.com")) - } - return errs -} - -const DNS952LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const DNS952LabelMaxLength int = 24 - -var dns952LabelRegexp = regexp.MustCompile("^" + DNS952LabelFmt + "$") - -// IsDNS952Label tests for a string that conforms to the definition of a label in -// DNS (RFC 952). -func IsDNS952Label(value string) []string { - var errs []string - if len(value) > DNS952LabelMaxLength { - errs = append(errs, MaxLenError(DNS952LabelMaxLength)) - } - if !dns952LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(DNS952LabelFmt, "my-name", "abc-123")) - } - return errs -} - -const CIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" - -var cIdentifierRegexp = regexp.MustCompile("^" + CIdentifierFmt + "$") - -// IsCIdentifier tests for a string that conforms the definition of an identifier -// in C. This checks the format, but not the length. -func IsCIdentifier(value string) bool { - return cIdentifierRegexp.MatchString(value) -} - -// IsValidPortNum tests that the argument is a valid, non-zero port number. -func IsValidPortNum(port int) bool { - return 0 < port && port < 65536 -} - -// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 -// TODO: once we have a type for UID/GID we should make these that type. -const ( - minUserID = 0 - maxUserID = math.MaxInt32 - minGroupID = 0 - maxGroupID = math.MaxInt32 -) - -// IsValidGroupId tests that the argument is a valid gids. -func IsValidGroupId(gid int64) bool { - return minGroupID <= gid && gid <= maxGroupID -} - -// IsValidUserId tests that the argument is a valid uids. -func IsValidUserId(uid int64) bool { - return minUserID <= uid && uid <= maxUserID -} - -const doubleHyphensFmt string = ".*(--).*" - -var doubleHyphensRegexp = regexp.MustCompile("^" + doubleHyphensFmt + "$") - -const IdentifierNoHyphensBeginEndFmt string = "[a-z0-9]([a-z0-9-]*[a-z0-9])*" - -var identifierNoHyphensBeginEndRegexp = regexp.MustCompile("^" + IdentifierNoHyphensBeginEndFmt + "$") - -const atLeastOneLetterFmt string = ".*[a-z].*" - -var atLeastOneLetterRegexp = regexp.MustCompile("^" + atLeastOneLetterFmt + "$") - -// IsValidPortName check that the argument is valid syntax. It must be non empty and no more than 15 characters long -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string and cannot be adjacent to other hyphens. -// Although RFC 6335 allows upper and lower case characters but case is ignored for comparison purposes: (HTTP -// and http denote the same service). -func IsValidPortName(port string) bool { - if len(port) < 1 || len(port) > 15 { - return false - } - if doubleHyphensRegexp.MatchString(port) { - return false - } - if identifierNoHyphensBeginEndRegexp.MatchString(port) && atLeastOneLetterRegexp.MatchString(port) { - return true - } - return false -} - -// IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) bool { - return net.ParseIP(value) != nil -} - -const percentFmt string = "[0-9]+%" - -var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") - -func IsValidPercent(percent string) bool { - return percentRegexp.MatchString(percent) -} - -const HTTPHeaderNameFmt string = "[-A-Za-z0-9]+" - -var httpHeaderNameRegexp = regexp.MustCompile("^" + HTTPHeaderNameFmt + "$") - -// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's -// definition of a valid header field name (a stricter subset than RFC7230). -func IsHTTPHeaderName(value string) bool { - return httpHeaderNameRegexp.MatchString(value) -} - -// MaxLenError returns a string explanation of a "string too long" validation -// failure. -func MaxLenError(length int) string { - return fmt.Sprintf("must be no more than %d characters", length) -} - -// RegexError returns a string explanation of a regex validation failure. -func RegexError(fmt string, examples ...string) string { - s := "must match the regex " + fmt - if len(examples) == 0 { - return s - } - s += " (e.g. " - for i := range examples { - if i > 0 { - s += " or " - } - s += "'" + examples[i] + "'" - } - return s + ")" -} - -// EmptyError returns a string explanation of a "must not be empty" validation -// failure. -func EmptyError() string { - return "must be non-empty" -} - -func prefixEach(msgs []string, prefix string) []string { - for i := range msgs { - msgs[i] = prefix + msgs[i] - } - return msgs -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go b/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go deleted file mode 100644 index 240397a22..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package wait provides tools for polling or listening for changes -// to a condition. -package wait diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go b/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go deleted file mode 100644 index b56560e75..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package wait - -import ( - "errors" - "math/rand" - "time" - - "k8s.io/kubernetes/pkg/util/runtime" -) - -// For any test of the style: -// ... -// <- time.After(timeout): -// t.Errorf("Timed out") -// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s -// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine -// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. -var ForeverTestTimeout = time.Second * 30 - -// NeverStop may be passed to Until to make it never stop. -var NeverStop <-chan struct{} = make(chan struct{}) - -// Forever is syntactic sugar on top of Until -func Forever(f func(), period time.Duration) { - Until(f, period, NeverStop) -} - -// Until loops until stop channel is closed, running f every period. -// Until is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = true (which means the timer for period -// starts after the f completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. NonSlidingUntil is syntactic sugar on top of JitterUntil -// with zero jitter factor, with sliding = false (meaning the timer for -// period starts at the same time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged. -// Catches any panics, and keeps going. f may not be invoked if -// stop channel is already closed. Pass NeverStop to Until if you -// don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - select { - case <-stopCh: - return - default: - } - - for { - jitteredPeriod := period - if jitterFactor > 0.0 { - jitteredPeriod = Jitter(period, jitterFactor) - } - - var t *time.Timer - if !sliding { - t = time.NewTimer(jitteredPeriod) - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = time.NewTimer(jitteredPeriod) - } else { - // The timer we created could already have fired, so be - // careful and check stopCh first. - select { - case <-stopCh: - return - default: - } - } - - select { - case <-stopCh: - return - case <-t.C: - } - } -} - -// Jitter returns a time.Duration between duration and duration + maxFactor * duration, -// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a -// suggested default value will be chosen. -func Jitter(duration time.Duration, maxFactor float64) time.Duration { - if maxFactor <= 0.0 { - maxFactor = 1.0 - } - wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) - return wait -} - -// ErrWaitTimeout is returned when the condition exited without success -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - -// ConditionFunc returns true if the condition is satisfied, or an error -// if the loop should be aborted. -type ConditionFunc func() (done bool, err error) - -// Backoff is parameters applied to a Backoff function. -type Backoff struct { - Duration time.Duration - Factor float64 - Jitter float64 - Steps int -} - -// ExponentialBackoff repeats a condition check up to steps times, increasing the wait -// by multipling the previous duration by factor. If jitter is greater than zero, -// a random amount of each duration is added (between duration and duration*(1+jitter)). -// If the condition never returns true, ErrWaitTimeout is returned. All other errors -// terminate immediately. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - duration := backoff.Duration - for i := 0; i < backoff.Steps; i++ { - if i != 0 { - adjusted := duration - if backoff.Jitter > 0.0 { - adjusted = Jitter(duration, backoff.Jitter) - } - time.Sleep(adjusted) - duration = time.Duration(float64(duration) * backoff.Factor) - } - if ok, err := condition(); err != nil || ok { - return err - } - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. condition will always be invoked at least once but some intervals -// may be missed if the condition takes too long or the time window is too short. -// If you want to Poll something forever, see PollInfinite. -// Poll always waits the interval before the first check of the condition. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return pollInternal(poller(interval, timeout), condition) -} - -func pollInternal(wait WaitFunc, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return WaitFor(wait, condition, done) -} - -// PollImmediate is identical to Poll, except that it performs the first check -// immediately, not waiting interval beforehand. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return pollImmediateInternal(poller(interval, timeout), condition) -} - -func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil - } - return pollInternal(wait, condition) -} - -// PollInfinite polls forever. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return WaitFor(poller(interval, 0), condition, done) -} - -// WaitFunc creates a channel that receives an item every time a test -// should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} - -// WaitFor gets a channel from wait(), and then invokes fn once for every value -// placed on the channel and once more when the channel is closed. If fn -// returns an error the loop ends and that error is returned, and if fn returns -// true the loop ends and nil is returned. ErrWaitTimeout will be returned if -// the channel is closed without fn ever returning true. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - c := wait(done) - for { - _, open := <-c - ok, err := fn() - if err != nil { - return err - } - if ok { - return nil - } - if !open { - break - } - } - return ErrWaitTimeout -} - -// poller returns a WaitFunc that will send to the channel every -// interval until timeout has elapsed and then close the channel. -// Over very short intervals you may receive no ticks before -// the channel is closed. If timeout is 0, the channel -// will never be closed. -func poller(interval, timeout time.Duration) WaitFunc { - return WaitFunc(func(done <-chan struct{}) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-done: - return - } - } - }() - - return ch - }) -} diff --git a/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go b/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go deleted file mode 100644 index a48a6ce86..000000000 --- a/vendor/k8s.io/kubernetes/pkg/util/yaml/decoder.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "unicode" - - "github.com/ghodss/yaml" - "github.com/golang/glog" -) - -// ToJSON converts a single YAML document into a JSON document -// or returns an error. If the document appears to be JSON the -// YAML decoding path is not used (so that error messages are -// JSON specific). -func ToJSON(data []byte) ([]byte, error) { - if hasJSONPrefix(data) { - return data, nil - } - return yaml.YAMLToJSON(data) -} - -// YAMLToJSONDecoder decodes YAML documents from an io.Reader by -// separating individual documents. It first converts the YAML -// body to JSON, then unmarshals the JSON. -type YAMLToJSONDecoder struct { - scanner *bufio.Scanner -} - -// NewYAMLToJSONDecoder decodes YAML documents from the provided -// stream in chunks by converting each document (as defined by -// the YAML spec) into its own chunk, converting it to JSON via -// yaml.YAMLToJSON, and then passing it to json.Decoder. -func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { - scanner := bufio.NewScanner(r) - scanner.Split(splitYAMLDocument) - return &YAMLToJSONDecoder{ - scanner: scanner, - } -} - -// Decode reads a YAML document as JSON from the stream or returns -// an error. The decoding rules match json.Unmarshal, not -// yaml.Unmarshal. -func (d *YAMLToJSONDecoder) Decode(into interface{}) error { - if d.scanner.Scan() { - data, err := yaml.YAMLToJSON(d.scanner.Bytes()) - if err != nil { - return err - } - return json.Unmarshal(data, into) - } - err := d.scanner.Err() - if err == nil { - err = io.EOF - } - return err -} - -// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if -// the data is not sufficient. -type YAMLDecoder struct { - r io.ReadCloser - scanner *bufio.Scanner - remaining []byte -} - -// NewDocumentDecoder decodes YAML documents from the provided -// stream in chunks by converting each document (as defined by -// the YAML spec) into its own chunk. io.ErrShortBuffer will be -// returned if the entire buffer could not be read to assist -// the caller in framing the chunk. -func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { - scanner := bufio.NewScanner(r) - scanner.Split(splitYAMLDocument) - return &YAMLDecoder{ - r: r, - scanner: scanner, - } -} - -// Read reads the previous slice into the buffer, or attempts to read -// the next chunk. -// TODO: switch to readline approach. -func (d *YAMLDecoder) Read(data []byte) (n int, err error) { - left := len(d.remaining) - if left == 0 { - // return the next chunk from the stream - if !d.scanner.Scan() { - err := d.scanner.Err() - if err == nil { - err = io.EOF - } - return 0, err - } - out := d.scanner.Bytes() - d.remaining = out - left = len(out) - } - - // fits within data - if left <= len(data) { - copy(data, d.remaining) - d.remaining = nil - return len(d.remaining), nil - } - - // caller will need to reread - copy(data, d.remaining[:left]) - d.remaining = d.remaining[left:] - return len(data), io.ErrShortBuffer -} - -func (d *YAMLDecoder) Close() error { - return d.r.Close() -} - -const yamlSeparator = "\n---" - -// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. -func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - sep := len([]byte(yamlSeparator)) - if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { - // We have a potential document terminator - i += sep - after := data[i:] - if len(after) == 0 { - // we can't read any more characters - if atEOF { - return len(data), data[:len(data)-sep], nil - } - return 0, nil, nil - } - if j := bytes.IndexByte(after, '\n'); j >= 0 { - return i + j + 1, data[0 : i-sep], nil - } - return 0, nil, nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -// decoder is a convenience interface for Decode. -type decoder interface { - Decode(into interface{}) error -} - -// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or -// YAML documents by sniffing for a leading { character. -type YAMLOrJSONDecoder struct { - r io.Reader - bufferSize int - - decoder decoder -} - -// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents -// or JSON documents from the given reader as a stream. bufferSize determines -// how far into the stream the decoder will look to figure out whether this -// is a JSON stream (has whitespace followed by an open brace). -func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { - return &YAMLOrJSONDecoder{ - r: r, - bufferSize: bufferSize, - } -} - -// Decode unmarshals the next object from the underlying stream into the -// provide object, or returns an error. -func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { - if d.decoder == nil { - buffer, isJSON := GuessJSONStream(d.r, d.bufferSize) - if isJSON { - glog.V(4).Infof("decoding stream as JSON") - d.decoder = json.NewDecoder(buffer) - } else { - glog.V(4).Infof("decoding stream as YAML") - d.decoder = NewYAMLToJSONDecoder(buffer) - } - } - err := d.decoder.Decode(into) - if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { - if syntax, ok := err.(*json.SyntaxError); ok { - data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) - if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) - } - js := string(data) - start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 - line := strings.Count(js[:start], "\n") - return fmt.Errorf("json: line %d: %s", line, syntax.Error()) - } - } - return err -} - -// GuessJSONStream scans the provided reader up to size, looking -// for an open brace indicating this is JSON. It will return the -// bufio.Reader it creates for the consumer. -func GuessJSONStream(r io.Reader, size int) (io.Reader, bool) { - buffer := bufio.NewReaderSize(r, size) - b, _ := buffer.Peek(size) - return buffer, hasJSONPrefix(b) -} - -var jsonPrefix = []byte("{") - -// hasJSONPrefix returns true if the provided buffer appears to start with -// a JSON open brace. -func hasJSONPrefix(buf []byte) bool { - return hasPrefix(buf, jsonPrefix) -} - -// Return true if the first non-whitespace bytes in buf is -// prefix. -func hasPrefix(buf []byte, prefix []byte) bool { - trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) - return bytes.HasPrefix(trim, prefix) -} diff --git a/vendor/k8s.io/kubernetes/pkg/version/doc.go b/vendor/k8s.io/kubernetes/pkg/version/doc.go deleted file mode 100644 index c0397829d..000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package version supplies version information collected at build time to -// kubernetes components. -package version diff --git a/vendor/k8s.io/kubernetes/pkg/version/semver.go b/vendor/k8s.io/kubernetes/pkg/version/semver.go deleted file mode 100644 index 1b5a845ad..000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/semver.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "strings" - "unicode" - - "github.com/blang/semver" - "github.com/golang/glog" -) - -func Parse(gitversion string) (semver.Version, error) { - // optionally trim leading spaces then one v - var seen bool - gitversion = strings.TrimLeftFunc(gitversion, func(ch rune) bool { - if seen { - return false - } - if ch == 'v' { - seen = true - return true - } - return unicode.IsSpace(ch) - }) - - return semver.Make(gitversion) -} - -func MustParse(gitversion string) semver.Version { - v, err := Parse(gitversion) - if err != nil { - glog.Fatalf("failed to parse semver from gitversion %q: %v", gitversion, err) - } - return v -} diff --git a/vendor/k8s.io/kubernetes/pkg/version/version.go b/vendor/k8s.io/kubernetes/pkg/version/version.go deleted file mode 100644 index b8ac0d6c3..000000000 --- a/vendor/k8s.io/kubernetes/pkg/version/version.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "fmt" - "runtime" - - "github.com/prometheus/client_golang/prometheus" -) - -// Info contains versioning information. -// TODO: Add []string of api versions supported? It's still unclear -// how we'll want to distribute that information. -type Info struct { - Major string `json:"major"` - Minor string `json:"minor"` - GitVersion string `json:"gitVersion"` - GitCommit string `json:"gitCommit"` - GitTreeState string `json:"gitTreeState"` - BuildDate string `json:"buildDate"` - GoVersion string `json:"goVersion"` - Compiler string `json:"compiler"` - Platform string `json:"platform"` -} - -// Get returns the overall codebase version. It's for detecting -// what code a binary was built from. -func Get() Info { - // These variables typically come from -ldflags settings and in - // their absence fallback to the settings in pkg/version/base.go - return Info{ - Major: gitMajor, - Minor: gitMinor, - GitVersion: gitVersion, - GitCommit: gitCommit, - GitTreeState: gitTreeState, - BuildDate: buildDate, - GoVersion: runtime.Version(), - Compiler: runtime.Compiler, - Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), - } -} - -// String returns info as a human-friendly version string. -func (info Info) String() string { - return info.GitVersion -} - -func init() { - buildInfo := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "kubernetes_build_info", - Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", - }, - []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"}, - ) - info := Get() - buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) - - prometheus.MustRegister(buildInfo) -} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/doc.go b/vendor/k8s.io/kubernetes/pkg/watch/doc.go deleted file mode 100644 index fd9b437e1..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package watch contains a generic watchable interface, and a fake for -// testing code that uses the watch interface. -package watch diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go deleted file mode 100644 index 2d13ca809..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/decoder.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package versioned - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/runtime/serializer/streaming" - "k8s.io/kubernetes/pkg/watch" -) - -// Decoder implements the watch.Decoder interface for io.ReadClosers that -// have contents which consist of a series of watchEvent objects encoded -// with the given streaming decoder. The internal objects will be then -// decoded by the embedded decoder. -type Decoder struct { - decoder streaming.Decoder - embeddedDecoder runtime.Decoder -} - -// NewDecoder creates an Decoder for the given writer and codec. -func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder { - return &Decoder{ - decoder: decoder, - embeddedDecoder: embeddedDecoder, - } -} - -// Decode blocks until it can return the next object in the reader. Returns an error -// if the reader is closed or an object can't be decoded. -func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { - var got Event - res, _, err := d.decoder.Decode(nil, &got) - if err != nil { - return "", nil, err - } - if res != &got { - return "", nil, fmt.Errorf("unable to decode to versioned.Event") - } - switch got.Type { - case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): - default: - return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) - } - - obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw) - if err != nil { - return "", nil, fmt.Errorf("unable to decode watch event: %v", err) - } - return watch.EventType(got.Type), obj, nil -} - -// Close closes the underlying r. -func (d *Decoder) Close() { - d.decoder.Close() -} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go deleted file mode 100644 index c1cbbd8ba..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.pb.go +++ /dev/null @@ -1,342 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/watch/versioned/generated.proto -// DO NOT EDIT! - -/* - Package versioned is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/watch/versioned/generated.proto - - It has these top-level messages: - Event -*/ -package versioned - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} - -func init() { - proto.RegisterType((*Event)(nil), "k8s.io.kubernetes.pkg.watch.versioned.Event") -} -func (m *Event) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Event) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Object.Size())) - n1, err := m.Object.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Event) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Event) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Object.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto b/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto deleted file mode 100644 index 5bb702210..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/generated.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.pkg.watch.versioned; - -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "versioned"; - -// Event represents a single event to a watched resource. -// -// +protobuf=true -message Event { - optional string type = 1; - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - optional k8s.io.kubernetes.pkg.runtime.RawExtension object = 2; -} - diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go deleted file mode 100644 index feaea3b6b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/register.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package versioned - -import ( - "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/watch" -) - -// WatchEventKind is name reserved for serializing watch events. -const WatchEventKind = "WatchEvent" - -// AddToGroupVersion registers the watch external and internal kinds with the scheme, and ensures the proper -// conversions are in place. -func AddToGroupVersion(scheme *runtime.Scheme, groupVersion unversioned.GroupVersion) { - scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &Event{}) - scheme.AddKnownTypeWithName( - unversioned.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), - &InternalEvent{}, - ) - scheme.AddConversionFuncs( - Convert_versioned_Event_to_watch_Event, - Convert_versioned_InternalEvent_to_versioned_Event, - Convert_watch_Event_to_versioned_Event, - Convert_versioned_Event_to_versioned_InternalEvent, - ) -} - -func Convert_watch_Event_to_versioned_Event(in *watch.Event, out *Event, s conversion.Scope) error { - out.Type = string(in.Type) - switch t := in.Object.(type) { - case *runtime.Unknown: - // TODO: handle other fields on Unknown and detect type - out.Object.Raw = t.Raw - case nil: - default: - out.Object.Object = in.Object - } - return nil -} - -func Convert_versioned_InternalEvent_to_versioned_Event(in *InternalEvent, out *Event, s conversion.Scope) error { - return Convert_watch_Event_to_versioned_Event((*watch.Event)(in), out, s) -} - -func Convert_versioned_Event_to_watch_Event(in *Event, out *watch.Event, s conversion.Scope) error { - out.Type = watch.EventType(in.Type) - if in.Object.Object != nil { - out.Object = in.Object.Object - } else if in.Object.Raw != nil { - // TODO: handle other fields on Unknown and detect type - out.Object = &runtime.Unknown{ - Raw: in.Object.Raw, - ContentType: runtime.ContentTypeJSON, - } - } - return nil -} - -func Convert_versioned_Event_to_versioned_InternalEvent(in *Event, out *InternalEvent, s conversion.Scope) error { - return Convert_versioned_Event_to_watch_Event(in, (*watch.Event)(out), s) -} - -// InternalEvent makes watch.Event versioned -type InternalEvent watch.Event - -func (e *InternalEvent) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } -func (e *Event) GetObjectKind() unversioned.ObjectKind { return unversioned.EmptyObjectKind } diff --git a/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go b/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go deleted file mode 100644 index ba608aeab..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/versioned/types.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package versioned contains the versioned types for watch. This is the first -// serialization version unless otherwise noted. -package versioned - -import ( - "k8s.io/kubernetes/pkg/runtime" -) - -// Event represents a single event to a watched resource. -// -// +protobuf=true -type Event struct { - Type string `json:"type" protobuf:"bytes,1,opt,name=type"` - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` -} diff --git a/vendor/k8s.io/kubernetes/pkg/watch/watch.go b/vendor/k8s.io/kubernetes/pkg/watch/watch.go deleted file mode 100644 index e8fca0a62..000000000 --- a/vendor/k8s.io/kubernetes/pkg/watch/watch.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watch - -import ( - "sync" - - "k8s.io/kubernetes/pkg/runtime" -) - -// Interface can be implemented by anything that knows how to watch and report changes. -type Interface interface { - // Stops watching. Will close the channel returned by ResultChan(). Releases - // any resources used by the watch. - Stop() - - // Returns a chan which will receive all the events. If an error occurs - // or Stop() is called, this channel will be closed, in which case the - // watch should be completely cleaned up. - ResultChan() <-chan Event -} - -// EventType defines the possible types of events. -type EventType string - -const ( - Added EventType = "ADDED" - Modified EventType = "MODIFIED" - Deleted EventType = "DELETED" - Error EventType = "ERROR" -) - -// Event represents a single event to a watched resource. -type Event struct { - Type EventType - - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - Object runtime.Object -} - -type emptyWatch chan Event - -// NewEmptyWatch returns a watch interface that returns no results and is closed. -// May be used in certain error conditions where no information is available but -// an error is not warranted. -func NewEmptyWatch() Interface { - ch := make(chan Event) - close(ch) - return emptyWatch(ch) -} - -// Stop implements Interface -func (w emptyWatch) Stop() { -} - -// ResultChan implements Interface -func (w emptyWatch) ResultChan() <-chan Event { - return chan Event(w) -} - -// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. -type FakeWatcher struct { - result chan Event - Stopped bool - sync.Mutex -} - -func NewFake() *FakeWatcher { - return &FakeWatcher{ - result: make(chan Event), - } -} - -// Stop implements Interface.Stop(). -func (f *FakeWatcher) Stop() { - f.Lock() - defer f.Unlock() - if !f.Stopped { - close(f.result) - f.Stopped = true - } -} - -// Reset prepares the watcher to be reused. -func (f *FakeWatcher) Reset() { - f.Lock() - defer f.Unlock() - f.Stopped = false - f.result = make(chan Event) -} - -func (f *FakeWatcher) ResultChan() <-chan Event { - return f.result -} - -// Add sends an add event. -func (f *FakeWatcher) Add(obj runtime.Object) { - f.result <- Event{Added, obj} -} - -// Modify sends a modify event. -func (f *FakeWatcher) Modify(obj runtime.Object) { - f.result <- Event{Modified, obj} -} - -// Delete sends a delete event. -func (f *FakeWatcher) Delete(lastValue runtime.Object) { - f.result <- Event{Deleted, lastValue} -} - -// Error sends an Error event. -func (f *FakeWatcher) Error(errValue runtime.Object) { - f.result <- Event{Error, errValue} -} - -// Action sends an event of the requested type, for table-based testing. -func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { - f.result <- Event{action, obj} -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go deleted file mode 100644 index 1efbb20f1..000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gcp - -import ( - "net/http" - "time" - - "github.com/golang/glog" - "golang.org/x/net/context" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - - "k8s.io/kubernetes/pkg/client/restclient" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil { - glog.Fatalf("Failed to register gcp auth plugin: %v", err) - } -} - -type gcpAuthProvider struct { - tokenSource oauth2.TokenSource - persister restclient.AuthProviderConfigPersister -} - -func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - ts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister) - if err != nil { - return nil, err - } - return &gcpAuthProvider{ts, persister}, nil -} - -func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &oauth2.Transport{ - Source: g.tokenSource, - Base: rt, - } -} - -func (g *gcpAuthProvider) Login() error { return nil } - -type cachedTokenSource struct { - source oauth2.TokenSource - accessToken string - expiry time.Time - persister restclient.AuthProviderConfigPersister -} - -func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister) (*cachedTokenSource, error) { - var expiryTime time.Time - if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil { - expiryTime = parsedTime - } - ts, err := google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform") - if err != nil { - return nil, err - } - return &cachedTokenSource{ - source: ts, - accessToken: accessToken, - expiry: expiryTime, - persister: persister, - }, nil -} - -func (t *cachedTokenSource) Token() (*oauth2.Token, error) { - tok := &oauth2.Token{ - AccessToken: t.accessToken, - TokenType: "Bearer", - Expiry: t.expiry, - } - if tok.Valid() && !tok.Expiry.IsZero() { - return tok, nil - } - tok, err := t.source.Token() - if err != nil { - return nil, err - } - if t.persister != nil { - cached := map[string]string{ - "access-token": tok.AccessToken, - "expiry": tok.Expiry.Format(time.RFC3339Nano), - } - if err := t.persister.Persist(cached); err != nil { - glog.V(4).Infof("Failed to persist token: %v", err) - } - } - return tok, nil -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS deleted file mode 100644 index ecf334993..000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -assignees: - - bobbyrullo diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go deleted file mode 100644 index 3ad279c10..000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package oidc - -import ( - "encoding/base64" - "errors" - "fmt" - "net/http" - "strings" - "time" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/oauth2" - "github.com/coreos/go-oidc/oidc" - "github.com/golang/glog" - - "k8s.io/kubernetes/pkg/client/restclient" - "k8s.io/kubernetes/pkg/util/wait" -) - -const ( - cfgIssuerUrl = "idp-issuer-url" - cfgClientID = "client-id" - cfgClientSecret = "client-secret" - cfgCertificateAuthority = "idp-certificate-authority" - cfgCertificateAuthorityData = "idp-certificate-authority-data" - cfgExtraScopes = "extra-scopes" - cfgIDToken = "id-token" - cfgRefreshToken = "refresh-token" -) - -var ( - backoff = wait.Backoff{ - Duration: 1 * time.Second, - Factor: 2, - Jitter: .1, - Steps: 5, - } -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil { - glog.Fatalf("Failed to register oidc auth plugin: %v", err) - } -} - -func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - issuer := cfg[cfgIssuerUrl] - if issuer == "" { - return nil, fmt.Errorf("Must provide %s", cfgIssuerUrl) - } - - clientID := cfg[cfgClientID] - if clientID == "" { - return nil, fmt.Errorf("Must provide %s", cfgClientID) - } - - clientSecret := cfg[cfgClientSecret] - if clientSecret == "" { - return nil, fmt.Errorf("Must provide %s", cfgClientSecret) - } - - var certAuthData []byte - var err error - if cfg[cfgCertificateAuthorityData] != "" { - certAuthData, err = base64.StdEncoding.DecodeString(cfg[cfgCertificateAuthorityData]) - if err != nil { - return nil, err - } - } - - clientConfig := restclient.Config{ - TLSClientConfig: restclient.TLSClientConfig{ - CAFile: cfg[cfgCertificateAuthority], - CAData: certAuthData, - }, - } - - trans, err := restclient.TransportFor(&clientConfig) - if err != nil { - return nil, err - } - hc := &http.Client{Transport: trans} - - providerCfg, err := oidc.FetchProviderConfig(hc, strings.TrimSuffix(issuer, "/")) - if err != nil { - return nil, fmt.Errorf("error fetching provider config: %v", err) - } - - scopes := strings.Split(cfg[cfgExtraScopes], ",") - oidcCfg := oidc.ClientConfig{ - HTTPClient: hc, - Credentials: oidc.ClientCredentials{ - ID: clientID, - Secret: clientSecret, - }, - ProviderConfig: providerCfg, - Scope: append(scopes, oidc.DefaultScope...), - } - - client, err := oidc.NewClient(oidcCfg) - if err != nil { - return nil, fmt.Errorf("error creating OIDC Client: %v", err) - } - - oClient := &oidcClient{client} - - var initialIDToken jose.JWT - if cfg[cfgIDToken] != "" { - initialIDToken, err = jose.ParseJWT(cfg[cfgIDToken]) - if err != nil { - return nil, err - } - } - - return &oidcAuthProvider{ - initialIDToken: initialIDToken, - refresher: &idTokenRefresher{ - client: oClient, - cfg: cfg, - persister: persister, - }, - }, nil -} - -type oidcAuthProvider struct { - refresher *idTokenRefresher - initialIDToken jose.JWT -} - -func (g *oidcAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - at := &oidc.AuthenticatedTransport{ - TokenRefresher: g.refresher, - RoundTripper: rt, - } - at.SetJWT(g.initialIDToken) - return &roundTripper{ - wrapped: at, - refresher: g.refresher, - } -} - -func (g *oidcAuthProvider) Login() error { - return errors.New("not yet implemented") -} - -type OIDCClient interface { - refreshToken(rt string) (oauth2.TokenResponse, error) - verifyJWT(jwt jose.JWT) error -} - -type roundTripper struct { - refresher *idTokenRefresher - wrapped *oidc.AuthenticatedTransport -} - -func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - var res *http.Response - var err error - firstTime := true - wait.ExponentialBackoff(backoff, func() (bool, error) { - if !firstTime { - var jwt jose.JWT - jwt, err = r.refresher.Refresh() - if err != nil { - return true, nil - } - r.wrapped.SetJWT(jwt) - } else { - firstTime = false - } - - res, err = r.wrapped.RoundTrip(req) - if err != nil { - return true, nil - } - if res.StatusCode == http.StatusUnauthorized { - return false, nil - } - return true, nil - }) - return res, err -} - -type idTokenRefresher struct { - cfg map[string]string - client OIDCClient - persister restclient.AuthProviderConfigPersister - intialIDToken jose.JWT -} - -func (r *idTokenRefresher) Verify(jwt jose.JWT) error { - claims, err := jwt.Claims() - if err != nil { - return err - } - - now := time.Now() - exp, ok, err := claims.TimeClaim("exp") - switch { - case err != nil: - return fmt.Errorf("failed to parse 'exp' claim: %v", err) - case !ok: - return errors.New("missing required 'exp' claim") - case exp.Before(now): - return fmt.Errorf("token already expired at: %v", exp) - } - - return nil -} - -func (r *idTokenRefresher) Refresh() (jose.JWT, error) { - rt, ok := r.cfg[cfgRefreshToken] - if !ok { - return jose.JWT{}, errors.New("No valid id-token, and cannot refresh without refresh-token") - } - - tokens, err := r.client.refreshToken(rt) - if err != nil { - return jose.JWT{}, fmt.Errorf("could not refresh token: %v", err) - } - jwt, err := jose.ParseJWT(tokens.IDToken) - if err != nil { - return jose.JWT{}, err - } - - if tokens.RefreshToken != "" && tokens.RefreshToken != rt { - r.cfg[cfgRefreshToken] = tokens.RefreshToken - } - r.cfg[cfgIDToken] = jwt.Encode() - - err = r.persister.Persist(r.cfg) - if err != nil { - return jose.JWT{}, fmt.Errorf("could not perist new tokens: %v", err) - } - - return jwt, r.client.verifyJWT(jwt) -} - -type oidcClient struct { - client *oidc.Client -} - -func (o *oidcClient) refreshToken(rt string) (oauth2.TokenResponse, error) { - oac, err := o.client.OAuthClient() - if err != nil { - return oauth2.TokenResponse{}, err - } - - return oac.RequestToken(oauth2.GrantTypeRefreshToken, rt) -} - -func (o *oidcClient) verifyJWT(jwt jose.JWT) error { - return o.client.VerifyJWT(jwt) -} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go b/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go deleted file mode 100644 index 2b422ddda..000000000 --- a/vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugins - -import ( - // Initialize all known client auth plugins. - _ "k8s.io/kubernetes/plugin/pkg/client/auth/gcp" - _ "k8s.io/kubernetes/plugin/pkg/client/auth/oidc" -) diff --git a/vendor/k8s.io/kubernetes/third_party/forked/json/fields.go b/vendor/k8s.io/kubernetes/third_party/forked/json/fields.go deleted file mode 100644 index 1d17270ee..000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/json/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json is forked from the Go standard library to enable us to find the -// field of a struct that a given JSON key maps to. -package json - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Finds the patchStrategy and patchMergeKey struct tag fields on a given -// struct field given the struct type and the JSON name of the field. -// TODO: fix the returned errors to be introspectable. -func LookupPatchMetadata(t reflect.Type, jsonField string) (reflect.Type, string, string, error) { - if t.Kind() == reflect.Map { - return t.Elem(), "", "", nil - } - if t.Kind() != reflect.Struct { - return nil, "", "", fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s", - t.Kind().String()) - } - jf := []byte(jsonField) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, jf) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, jf) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential struct field. - tjf := t.Field(f.index[0]) - // we must navigate down all the anonymously included structs in the chain - for i := 1; i < len(f.index); i++ { - tjf = tjf.Type.Field(f.index[i]) - } - patchStrategy := tjf.Tag.Get("patchStrategy") - patchMergeKey := tjf.Tag.Get("patchMergeKey") - return tjf.Type, patchStrategy, patchMergeKey, nil - } - return nil, "", "", fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - // index is the sequence of indexes from the containing type fields to this field. - // it is a slice because anonymous structs will need multiple navigation steps to correctly - // resolve the proper fields - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func (f field) String() string { - return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/reflect/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/reflect/LICENSE deleted file mode 100644 index 744875676..000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/reflect/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/golang/template/exec.go b/vendor/k8s.io/kubernetes/third_party/golang/template/exec.go deleted file mode 100644 index 739fd3509..000000000 --- a/vendor/k8s.io/kubernetes/third_party/golang/template/exec.go +++ /dev/null @@ -1,94 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions indirect and printableValue -//are exported as public functions. -package template - -import ( - "fmt" - "reflect" -) - -var Indirect = indirect -var PrintableValue = printableValue - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "<no value>", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} diff --git a/vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go b/vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go deleted file mode 100644 index 27a008b0a..000000000 --- a/vendor/k8s.io/kubernetes/third_party/golang/template/funcs.go +++ /dev/null @@ -1,599 +0,0 @@ -//This package is copied from Go library text/template. -//The original private functions eq, ge, gt, le, lt, and ne -//are exported as public functions. -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -var Equal = eq -var GreaterEqual = ge -var Greater = gt -var LessEqual = le -var Less = lt -var NotEqual = ne - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// AddFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string) (reflect.Value, bool) { - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -}